hexsha
stringlengths
40
40
size
int64
7
1.05M
ext
stringclasses
13 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
269
max_stars_repo_name
stringlengths
5
108
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
sequencelengths
1
9
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
269
max_issues_repo_name
stringlengths
5
116
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
sequencelengths
1
9
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
269
max_forks_repo_name
stringlengths
5
116
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
sequencelengths
1
9
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
7
1.05M
avg_line_length
float64
1.21
330k
max_line_length
int64
6
990k
alphanum_fraction
float64
0.01
0.99
author_id
stringlengths
2
40
4f9dc822d36e60a1cda23e23e1831df56bff1380
151,617
cc
C++
src/deoptimizer/deoptimizer.cc
hamzahamidi/v8
d9fa5bce27a1680ab2d372f31f2721663044963e
[ "BSD-3-Clause" ]
null
null
null
src/deoptimizer/deoptimizer.cc
hamzahamidi/v8
d9fa5bce27a1680ab2d372f31f2721663044963e
[ "BSD-3-Clause" ]
null
null
null
src/deoptimizer/deoptimizer.cc
hamzahamidi/v8
d9fa5bce27a1680ab2d372f31f2721663044963e
[ "BSD-3-Clause" ]
null
null
null
// Copyright 2013 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/deoptimizer/deoptimizer.h" #include <memory> #include "src/ast/prettyprinter.h" #include "src/builtins/accessors.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/callable.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/register-configuration.h" #include "src/diagnostics/disasm.h" #include "src/execution/frames-inl.h" #include "src/execution/v8threads.h" #include "src/handles/global-handles.h" #include "src/heap/heap-inl.h" #include "src/init/v8.h" #include "src/interpreter/interpreter.h" #include "src/logging/counters.h" #include "src/logging/log.h" #include "src/objects/debug-objects-inl.h" #include "src/objects/heap-number-inl.h" #include "src/objects/smi.h" #include "src/tracing/trace-event.h" // Has to be the last include (doesn't have include guards) #include "src/objects/object-macros.h" namespace v8 { namespace internal { // {FrameWriter} offers a stack writer abstraction for writing // FrameDescriptions. The main service the class provides is managing // {top_offset_}, i.e. the offset of the next slot to write to. class FrameWriter { public: static const int NO_INPUT_INDEX = -1; FrameWriter(Deoptimizer* deoptimizer, FrameDescription* frame, CodeTracer::Scope* trace_scope) : deoptimizer_(deoptimizer), frame_(frame), trace_scope_(trace_scope), top_offset_(frame->GetFrameSize()) {} void PushRawValue(intptr_t value, const char* debug_hint) { PushValue(value); if (trace_scope_ != nullptr) { DebugPrintOutputValue(value, debug_hint); } } void PushRawObject(Object obj, const char* debug_hint) { intptr_t value = obj.ptr(); PushValue(value); if (trace_scope_ != nullptr) { DebugPrintOutputObject(obj, top_offset_, debug_hint); } } void PushCallerPc(intptr_t pc) { top_offset_ -= kPCOnStackSize; frame_->SetCallerPc(top_offset_, pc); DebugPrintOutputValue(pc, "caller's pc\n"); } void PushCallerFp(intptr_t fp) { top_offset_ -= kFPOnStackSize; frame_->SetCallerFp(top_offset_, fp); DebugPrintOutputValue(fp, "caller's fp\n"); } void PushCallerConstantPool(intptr_t cp) { top_offset_ -= kSystemPointerSize; frame_->SetCallerConstantPool(top_offset_, cp); DebugPrintOutputValue(cp, "caller's constant_pool\n"); } void PushTranslatedValue(const TranslatedFrame::iterator& iterator, const char* debug_hint = "") { Object obj = iterator->GetRawValue(); PushRawObject(obj, debug_hint); if (trace_scope_) { PrintF(trace_scope_->file(), " (input #%d)\n", iterator.input_index()); } deoptimizer_->QueueValueForMaterialization(output_address(top_offset_), obj, iterator); } unsigned top_offset() const { return top_offset_; } private: void PushValue(intptr_t value) { CHECK_GE(top_offset_, 0); top_offset_ -= kSystemPointerSize; frame_->SetFrameSlot(top_offset_, value); } Address output_address(unsigned output_offset) { Address output_address = static_cast<Address>(frame_->GetTop()) + output_offset; return output_address; } void DebugPrintOutputValue(intptr_t value, const char* debug_hint = "") { if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- " V8PRIxPTR_FMT " ; %s", output_address(top_offset_), top_offset_, value, debug_hint); } } void DebugPrintOutputObject(Object obj, unsigned output_offset, const char* debug_hint = "") { if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- ", output_address(output_offset), output_offset); if (obj.IsSmi()) { PrintF(V8PRIxPTR_FMT " <Smi %d>", obj.ptr(), Smi::cast(obj).value()); } else { obj.ShortPrint(trace_scope_->file()); } PrintF(trace_scope_->file(), " ; %s", debug_hint); } } Deoptimizer* deoptimizer_; FrameDescription* frame_; CodeTracer::Scope* trace_scope_; unsigned top_offset_; }; DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) { Code* start = &deopt_entry_code_[0]; Code* end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1]; heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end)); } DeoptimizerData::~DeoptimizerData() { Code* start = &deopt_entry_code_[0]; heap_->UnregisterStrongRoots(FullObjectSlot(start)); } Code DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) { return deopt_entry_code_[static_cast<int>(kind)]; } void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) { deopt_entry_code_[static_cast<int>(kind)] = code; } Code Deoptimizer::FindDeoptimizingCode(Address addr) { if (function_.IsHeapObject()) { // Search all deoptimizing code in the native context of the function. Isolate* isolate = isolate_; NativeContext native_context = function_.context().native_context(); Object element = native_context.DeoptimizedCodeListHead(); while (!element.IsUndefined(isolate)) { Code code = Code::cast(element); CHECK(code.kind() == Code::OPTIMIZED_FUNCTION); if (code.contains(addr)) return code; element = code.next_code_link(); } } return Code(); } // We rely on this function not causing a GC. It is called from generated code // without having a real stack frame in place. Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind, unsigned bailout_id, Address from, int fp_to_sp_delta, Isolate* isolate) { JSFunction function = JSFunction::cast(Object(raw_function)); Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind, bailout_id, from, fp_to_sp_delta); CHECK_NULL(isolate->deoptimizer_data()->current_); isolate->deoptimizer_data()->current_ = deoptimizer; return deoptimizer; } Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { Deoptimizer* result = isolate->deoptimizer_data()->current_; CHECK_NOT_NULL(result); result->DeleteFrameDescriptions(); isolate->deoptimizer_data()->current_ = nullptr; return result; } DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( JavaScriptFrame* frame, int jsframe_index, Isolate* isolate) { CHECK(frame->is_optimized()); TranslatedState translated_values(frame); translated_values.Prepare(frame->fp()); TranslatedState::iterator frame_it = translated_values.end(); int counter = jsframe_index; for (auto it = translated_values.begin(); it != translated_values.end(); it++) { if (it->kind() == TranslatedFrame::kInterpretedFunction || it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation || it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) { if (counter == 0) { frame_it = it; break; } counter--; } } CHECK(frame_it != translated_values.end()); // We only include kJavaScriptBuiltinContinuation frames above to get the // counting right. CHECK_EQ(frame_it->kind(), TranslatedFrame::kInterpretedFunction); DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(&translated_values, frame_it, isolate); return info; } namespace { class ActivationsFinder : public ThreadVisitor { public: explicit ActivationsFinder(std::set<Code>* codes, Code topmost_optimized_code, bool safe_to_deopt_topmost_optimized_code) : codes_(codes) { #ifdef DEBUG topmost_ = topmost_optimized_code; safe_to_deopt_ = safe_to_deopt_topmost_optimized_code; #endif } // Find the frames with activations of codes marked for deoptimization, search // for the trampoline to the deoptimizer call respective to each code, and use // it to replace the current pc on the stack. void VisitThread(Isolate* isolate, ThreadLocalTop* top) override { for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { if (it.frame()->type() == StackFrame::OPTIMIZED) { Code code = it.frame()->LookupCode(); if (code.kind() == Code::OPTIMIZED_FUNCTION && code.marked_for_deoptimization()) { codes_->erase(code); // Obtain the trampoline to the deoptimizer call. SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc()); int trampoline_pc = safepoint.trampoline_pc(); DCHECK_IMPLIES(code == topmost_, safe_to_deopt_); // Replace the current pc on the stack with the trampoline. it.frame()->set_pc(code.raw_instruction_start() + trampoline_pc); } } } } private: std::set<Code>* codes_; #ifdef DEBUG Code topmost_; bool safe_to_deopt_; #endif }; } // namespace // Move marked code from the optimized code list to the deoptimized code list, // and replace pc on the stack for codes marked for deoptimization. void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) { DisallowHeapAllocation no_allocation; Isolate* isolate = native_context.GetIsolate(); Code topmost_optimized_code; bool safe_to_deopt_topmost_optimized_code = false; #ifdef DEBUG // Make sure all activations of optimized code can deopt at their current PC. // The topmost optimized code has special handling because it cannot be // deoptimized due to weak object dependency. for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done(); it.Advance()) { StackFrame::Type type = it.frame()->type(); if (type == StackFrame::OPTIMIZED) { Code code = it.frame()->LookupCode(); JSFunction function = static_cast<OptimizedFrame*>(it.frame())->function(); if (FLAG_trace_deopt) { CodeTracer::Scope scope(isolate->GetCodeTracer()); PrintF(scope.file(), "[deoptimizer found activation of function: "); function.PrintName(scope.file()); PrintF(scope.file(), " / %" V8PRIxPTR "]\n", function.ptr()); } SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc()); // Turbofan deopt is checked when we are patching addresses on stack. bool safe_if_deopt_triggered = safepoint.has_deoptimization_index(); bool is_builtin_code = code.kind() == Code::BUILTIN; DCHECK(topmost_optimized_code.is_null() || safe_if_deopt_triggered || is_builtin_code); if (topmost_optimized_code.is_null()) { topmost_optimized_code = code; safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered; } } } #endif // We will use this set to mark those Code objects that are marked for // deoptimization and have not been found in stack frames. std::set<Code> codes; // Move marked code from the optimized code list to the deoptimized code list. // Walk over all optimized code objects in this native context. Code prev; Object element = native_context.OptimizedCodeListHead(); while (!element.IsUndefined(isolate)) { Code code = Code::cast(element); CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION); Object next = code.next_code_link(); if (code.marked_for_deoptimization()) { codes.insert(code); if (!prev.is_null()) { // Skip this code in the optimized code list. prev.set_next_code_link(next); } else { // There was no previous node, the next node is the new head. native_context.SetOptimizedCodeListHead(next); } // Move the code to the _deoptimized_ code list. code.set_next_code_link(native_context.DeoptimizedCodeListHead()); native_context.SetDeoptimizedCodeListHead(code); } else { // Not marked; preserve this element. prev = code; } element = next; } ActivationsFinder visitor(&codes, topmost_optimized_code, safe_to_deopt_topmost_optimized_code); // Iterate over the stack of this thread. visitor.VisitThread(isolate, isolate->thread_local_top()); // In addition to iterate over the stack of this thread, we also // need to consider all the other threads as they may also use // the code currently beings deoptimized. isolate->thread_manager()->IterateArchivedThreads(&visitor); // If there's no activation of a code in any stack then we can remove its // deoptimization data. We do this to ensure that code objects that are // unlinked don't transitively keep objects alive unnecessarily. for (Code code : codes) { isolate->heap()->InvalidateCodeDeoptimizationData(code); } native_context.GetOSROptimizedCodeCache().EvictMarkedCode( native_context.GetIsolate()); } void Deoptimizer::DeoptimizeAll(Isolate* isolate) { RuntimeCallTimerScope runtimeTimer(isolate, RuntimeCallCounterId::kDeoptimizeCode); TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); TRACE_EVENT0("v8", "V8.DeoptimizeCode"); if (FLAG_trace_deopt) { CodeTracer::Scope scope(isolate->GetCodeTracer()); PrintF(scope.file(), "[deoptimize all code in all contexts]\n"); } isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock); DisallowHeapAllocation no_allocation; // For all contexts, mark all code, then deoptimize. Object context = isolate->heap()->native_contexts_list(); while (!context.IsUndefined(isolate)) { NativeContext native_context = NativeContext::cast(context); MarkAllCodeForContext(native_context); OSROptimizedCodeCache::Clear(native_context); DeoptimizeMarkedCodeForContext(native_context); context = native_context.next_context_link(); } } void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) { RuntimeCallTimerScope runtimeTimer(isolate, RuntimeCallCounterId::kDeoptimizeCode); TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); TRACE_EVENT0("v8", "V8.DeoptimizeCode"); if (FLAG_trace_deopt) { CodeTracer::Scope scope(isolate->GetCodeTracer()); PrintF(scope.file(), "[deoptimize marked code in all contexts]\n"); } DisallowHeapAllocation no_allocation; // For all contexts, deoptimize code already marked. Object context = isolate->heap()->native_contexts_list(); while (!context.IsUndefined(isolate)) { NativeContext native_context = NativeContext::cast(context); DeoptimizeMarkedCodeForContext(native_context); context = native_context.next_context_link(); } } void Deoptimizer::MarkAllCodeForContext(NativeContext native_context) { Object element = native_context.OptimizedCodeListHead(); Isolate* isolate = native_context.GetIsolate(); while (!element.IsUndefined(isolate)) { Code code = Code::cast(element); CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION); code.set_marked_for_deoptimization(true); element = code.next_code_link(); } } void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) { Isolate* isolate = function.GetIsolate(); RuntimeCallTimerScope runtimeTimer(isolate, RuntimeCallCounterId::kDeoptimizeCode); TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); TRACE_EVENT0("v8", "V8.DeoptimizeCode"); function.ResetIfBytecodeFlushed(); if (code.is_null()) code = function.code(); if (code.kind() == Code::OPTIMIZED_FUNCTION) { // Mark the code for deoptimization and unlink any functions that also // refer to that code. The code cannot be shared across native contexts, // so we only need to search one. code.set_marked_for_deoptimization(true); // The code in the function's optimized code feedback vector slot might // be different from the code on the function - evict it if necessary. function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization( function.shared(), "unlinking code marked for deopt"); if (!code.deopt_already_counted()) { code.set_deopt_already_counted(true); } DeoptimizeMarkedCodeForContext(function.context().native_context()); // TODO(mythria): Ideally EvictMarkCode should compact the cache without // having to explicitly call this. We don't do this currently because // compacting causes GC and DeoptimizeMarkedCodeForContext uses raw // pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove // this call from here. OSROptimizedCodeCache::Compact( Handle<NativeContext>(function.context().native_context(), isolate)); } } void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { deoptimizer->DoComputeOutputFrames(); } const char* Deoptimizer::MessageFor(DeoptimizeKind kind) { switch (kind) { case DeoptimizeKind::kEager: return "eager"; case DeoptimizeKind::kSoft: return "soft"; case DeoptimizeKind::kLazy: return "lazy"; } FATAL("Unsupported deopt kind"); return nullptr; } namespace { uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) { static constexpr int kTheReceiver = 1; return sfi.internal_formal_parameter_count() + kTheReceiver; } } // namespace Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind, unsigned bailout_id, Address from, int fp_to_sp_delta) : isolate_(isolate), function_(function), bailout_id_(bailout_id), deopt_kind_(kind), from_(from), fp_to_sp_delta_(fp_to_sp_delta), deoptimizing_throw_(false), catch_handler_data_(-1), catch_handler_pc_offset_(-1), input_(nullptr), output_count_(0), jsframe_count_(0), output_(nullptr), caller_frame_top_(0), caller_fp_(0), caller_pc_(0), caller_constant_pool_(0), input_frame_context_(0), stack_fp_(0), trace_scope_(nullptr) { if (isolate->deoptimizer_lazy_throw()) { isolate->set_deoptimizer_lazy_throw(false); deoptimizing_throw_ = true; } DCHECK_NE(from, kNullAddress); compiled_code_ = FindOptimizedCode(); DCHECK(!compiled_code_.is_null()); DCHECK(function.IsJSFunction()); trace_scope_ = FLAG_trace_deopt ? new CodeTracer::Scope(isolate->GetCodeTracer()) : nullptr; #ifdef DEBUG DCHECK(AllowHeapAllocation::IsAllowed()); disallow_heap_allocation_ = new DisallowHeapAllocation(); #endif // DEBUG if ((compiled_code_.kind() != Code::OPTIMIZED_FUNCTION || !compiled_code_.deopt_already_counted()) && deopt_kind_ == DeoptimizeKind::kSoft) { isolate->counters()->soft_deopts_executed()->Increment(); } if (compiled_code_.kind() == Code::OPTIMIZED_FUNCTION) { compiled_code_.set_deopt_already_counted(true); PROFILE(isolate_, CodeDeoptEvent(compiled_code_, kind, from_, fp_to_sp_delta_)); } unsigned size = ComputeInputFrameSize(); const int parameter_count = InternalFormalParameterCountWithReceiver(function.shared()); input_ = new (size) FrameDescription(size, parameter_count); if (kSupportsFixedDeoptExitSize) { DCHECK_EQ(bailout_id_, kMaxUInt32); // Calculate bailout id from return address. DCHECK_GT(kDeoptExitSize, 0); DeoptimizationData deopt_data = DeoptimizationData::cast(compiled_code_.deoptimization_data()); Address deopt_start = compiled_code_.raw_instruction_start() + deopt_data.DeoptExitStart().value(); int offset = static_cast<int>(from_ - kDeoptExitSize - deopt_start); DCHECK_EQ(0, offset % kDeoptExitSize); bailout_id_ = offset / kDeoptExitSize; } } Code Deoptimizer::FindOptimizedCode() { Code compiled_code = FindDeoptimizingCode(from_); return !compiled_code.is_null() ? compiled_code : isolate_->FindCodeObject(from_); } void Deoptimizer::PrintFunctionName() { if (function_.IsHeapObject() && function_.IsJSFunction()) { function_.ShortPrint(trace_scope_->file()); } else { PrintF(trace_scope_->file(), "%s", Code::Kind2String(compiled_code_.kind())); } } Handle<JSFunction> Deoptimizer::function() const { return Handle<JSFunction>(function_, isolate()); } Handle<Code> Deoptimizer::compiled_code() const { return Handle<Code>(compiled_code_, isolate()); } Deoptimizer::~Deoptimizer() { DCHECK(input_ == nullptr && output_ == nullptr); DCHECK_NULL(disallow_heap_allocation_); delete trace_scope_; } void Deoptimizer::DeleteFrameDescriptions() { delete input_; for (int i = 0; i < output_count_; ++i) { if (output_[i] != input_) delete output_[i]; } delete[] output_; input_ = nullptr; output_ = nullptr; #ifdef DEBUG DCHECK(!AllowHeapAllocation::IsAllowed()); DCHECK_NOT_NULL(disallow_heap_allocation_); delete disallow_heap_allocation_; disallow_heap_allocation_ = nullptr; #endif // DEBUG } Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind) { DeoptimizerData* data = isolate->deoptimizer_data(); CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind); CHECK(!data->deopt_entry_code(kind).is_null()); return data->deopt_entry_code(kind).raw_instruction_start(); } bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr, DeoptimizeKind type) { DeoptimizerData* data = isolate->deoptimizer_data(); CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind); Code code = data->deopt_entry_code(type); if (code.is_null()) return false; return addr == code.raw_instruction_start(); } bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr, DeoptimizeKind* type) { if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) { *type = DeoptimizeKind::kEager; return true; } if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) { *type = DeoptimizeKind::kSoft; return true; } if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) { *type = DeoptimizeKind::kLazy; return true; } return false; } int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) { int length = 0; // Count all entries in the deoptimizing code list of every context. Object context = isolate->heap()->native_contexts_list(); while (!context.IsUndefined(isolate)) { NativeContext native_context = NativeContext::cast(context); Object element = native_context.DeoptimizedCodeListHead(); while (!element.IsUndefined(isolate)) { Code code = Code::cast(element); DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION); if (!code.marked_for_deoptimization()) { length++; } element = code.next_code_link(); } context = Context::cast(context).next_context_link(); } return length; } namespace { int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) { switch (translated_frame->kind()) { case TranslatedFrame::kInterpretedFunction: { int bytecode_offset = translated_frame->node_id().ToInt(); HandlerTable table( translated_frame->raw_shared_info().GetBytecodeArray()); return table.LookupRange(bytecode_offset, data_out, nullptr); } case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: { return 0; } default: break; } return -1; } } // namespace // We rely on this function not causing a GC. It is called from generated code // without having a real stack frame in place. void Deoptimizer::DoComputeOutputFrames() { // When we call this function, the return address of the previous frame has // been removed from the stack by GenerateDeoptimizationEntries() so the stack // is not iterable by the SafeStackFrameIterator. #if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable()); #endif base::ElapsedTimer timer; // Determine basic deoptimization information. The optimized frame is // described by the input data. DeoptimizationData input_data = DeoptimizationData::cast(compiled_code_.deoptimization_data()); { // Read caller's PC, caller's FP and caller's constant pool values // from input frame. Compute caller's frame top address. Register fp_reg = JavaScriptFrame::fp_register(); stack_fp_ = input_->GetRegister(fp_reg.code()); caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize(); Address fp_address = input_->GetFramePointerAddress(); caller_fp_ = Memory<intptr_t>(fp_address); caller_pc_ = Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset); input_frame_context_ = Memory<intptr_t>( fp_address + CommonFrameConstants::kContextOrFrameTypeOffset); if (FLAG_enable_embedded_constant_pool) { caller_constant_pool_ = Memory<intptr_t>( fp_address + CommonFrameConstants::kConstantPoolOffset); } } if (trace_scope_ != nullptr) { timer.Start(); PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ", MessageFor(deopt_kind_)); PrintFunctionName(); PrintF(trace_scope_->file(), " (opt #%d) @%d, FP to SP delta: %d, caller sp: " V8PRIxPTR_FMT "]\n", input_data.OptimizationId().value(), bailout_id_, fp_to_sp_delta_, caller_frame_top_); if (deopt_kind_ == DeoptimizeKind::kEager || deopt_kind_ == DeoptimizeKind::kSoft) { compiled_code_.PrintDeoptLocation( trace_scope_->file(), " ;;; deoptimize at ", from_); } } BailoutId node_id = input_data.BytecodeOffset(bailout_id_); ByteArray translations = input_data.TranslationByteArray(); unsigned translation_index = input_data.TranslationIndex(bailout_id_).value(); TranslationIterator state_iterator(translations, translation_index); translated_state_.Init( isolate_, input_->GetFramePointerAddress(), &state_iterator, input_data.LiteralArray(), input_->GetRegisterValues(), trace_scope_ == nullptr ? nullptr : trace_scope_->file(), function_.IsHeapObject() ? function_.shared().internal_formal_parameter_count() : 0); // Do the input frame to output frame(s) translation. size_t count = translated_state_.frames().size(); // If we are supposed to go to the catch handler, find the catching frame // for the catch and make sure we only deoptimize upto that frame. if (deoptimizing_throw_) { size_t catch_handler_frame_index = count; for (size_t i = count; i-- > 0;) { catch_handler_pc_offset_ = LookupCatchHandler( &(translated_state_.frames()[i]), &catch_handler_data_); if (catch_handler_pc_offset_ >= 0) { catch_handler_frame_index = i; break; } } CHECK_LT(catch_handler_frame_index, count); count = catch_handler_frame_index + 1; } DCHECK_NULL(output_); output_ = new FrameDescription*[count]; for (size_t i = 0; i < count; ++i) { output_[i] = nullptr; } output_count_ = static_cast<int>(count); // Translate each output frame. int frame_index = 0; // output_frame_index for (size_t i = 0; i < count; ++i, ++frame_index) { // Read the ast node id, function, and frame height for this output frame. TranslatedFrame* translated_frame = &(translated_state_.frames()[i]); bool handle_exception = deoptimizing_throw_ && i == count - 1; switch (translated_frame->kind()) { case TranslatedFrame::kInterpretedFunction: DoComputeInterpretedFrame(translated_frame, frame_index, handle_exception); jsframe_count_++; break; case TranslatedFrame::kArgumentsAdaptor: DoComputeArgumentsAdaptorFrame(translated_frame, frame_index); break; case TranslatedFrame::kConstructStub: DoComputeConstructStubFrame(translated_frame, frame_index); break; case TranslatedFrame::kBuiltinContinuation: DoComputeBuiltinContinuation(translated_frame, frame_index, BuiltinContinuationMode::STUB); break; case TranslatedFrame::kJavaScriptBuiltinContinuation: DoComputeBuiltinContinuation(translated_frame, frame_index, BuiltinContinuationMode::JAVASCRIPT); break; case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: DoComputeBuiltinContinuation( translated_frame, frame_index, handle_exception ? BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION : BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH); break; case TranslatedFrame::kInvalid: FATAL("invalid frame"); break; } } FrameDescription* topmost = output_[count - 1]; topmost->GetRegisterValues()->SetRegister(kRootRegister.code(), isolate()->isolate_root()); // Print some helpful diagnostic information. if (trace_scope_ != nullptr) { double ms = timer.Elapsed().InMillisecondsF(); int index = output_count_ - 1; // Index of the topmost frame. PrintF(trace_scope_->file(), "[deoptimizing (%s): end ", MessageFor(deopt_kind_)); PrintFunctionName(); PrintF(trace_scope_->file(), " @%d => node=%d, pc=" V8PRIxPTR_FMT ", caller sp=" V8PRIxPTR_FMT ", took %0.3f ms]\n", bailout_id_, node_id.ToInt(), output_[index]->GetPc(), caller_frame_top_, ms); } } void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame, int frame_index, bool goto_catch_handler) { SharedFunctionInfo shared = translated_frame->raw_shared_info(); TranslatedFrame::iterator value_iterator = translated_frame->begin(); const bool is_bottommost = (0 == frame_index); const bool is_topmost = (output_count_ - 1 == frame_index); const int real_bytecode_offset = translated_frame->node_id().ToInt(); const int bytecode_offset = goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset; const int parameters_count = InternalFormalParameterCountWithReceiver(shared); const int locals_count = translated_frame->height(); InterpretedFrameInfo frame_info = InterpretedFrameInfo::Precise(parameters_count, locals_count, is_topmost); const uint32_t output_frame_size = frame_info.frame_size_in_bytes(); TranslatedFrame::iterator function_iterator = value_iterator++; if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " translating interpreted frame "); std::unique_ptr<char[]> name = shared.DebugName().ToCString(); PrintF(trace_scope_->file(), "%s", name.get()); PrintF(trace_scope_->file(), " => bytecode_offset=%d, variable_frame_size=%d, frame_size=%d%s\n", real_bytecode_offset, frame_info.frame_size_in_bytes_without_fixed(), output_frame_size, goto_catch_handler ? " (throw)" : ""); } // Allocate and store the output frame description. FrameDescription* output_frame = new (output_frame_size) FrameDescription(output_frame_size, parameters_count); FrameWriter frame_writer(this, output_frame, trace_scope_); CHECK(frame_index >= 0 && frame_index < output_count_); CHECK_NULL(output_[frame_index]); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous frame's top and // this frame's size. const intptr_t top_address = is_bottommost ? caller_frame_top_ - output_frame_size : output_[frame_index - 1]->GetTop() - output_frame_size; output_frame->SetTop(top_address); // Compute the incoming parameter translation. ReadOnlyRoots roots(isolate()); if (ShouldPadArguments(parameters_count)) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } for (int i = 0; i < parameters_count; ++i, ++value_iterator) { frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); } DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), frame_writer.top_offset()); if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " -------------------------\n"); } // There are no translation commands for the caller's pc and fp, the // context, the function and the bytecode offset. Synthesize // their values and set them up // explicitly. // // The caller's pc for the bottommost output frame is the same as in the // input frame. For all subsequent output frames, it can be read from the // previous one. This frame's pc can be computed from the non-optimized // function code and AST id of the bailout. const intptr_t caller_pc = is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); frame_writer.PushCallerPc(caller_pc); // The caller's frame pointer for the bottommost output frame is the same // as in the input frame. For all subsequent output frames, it can be // read from the previous one. Also compute and set this frame's frame // pointer. const intptr_t caller_fp = is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); frame_writer.PushCallerFp(caller_fp); const intptr_t fp_value = top_address + frame_writer.top_offset(); output_frame->SetFp(fp_value); if (is_topmost) { Register fp_reg = InterpretedFrame::fp_register(); output_frame->SetRegister(fp_reg.code(), fp_value); } if (FLAG_enable_embedded_constant_pool) { // For the bottommost output frame the constant pool pointer can be gotten // from the input frame. For subsequent output frames, it can be read from // the previous frame. const intptr_t caller_cp = is_bottommost ? caller_constant_pool_ : output_[frame_index - 1]->GetConstantPool(); frame_writer.PushCallerConstantPool(caller_cp); } // For the bottommost output frame the context can be gotten from the input // frame. For all subsequent output frames it can be gotten from the function // so long as we don't inline functions that need local contexts. // When deoptimizing into a catch block, we need to take the context // from a register that was specified in the handler table. TranslatedFrame::iterator context_pos = value_iterator++; if (goto_catch_handler) { // Skip to the translated value of the register specified // in the handler table. for (int i = 0; i < catch_handler_data_ + 1; ++i) { context_pos++; } } // Read the context from the translations. Object context = context_pos->GetRawValue(); output_frame->SetContext(static_cast<intptr_t>(context.ptr())); frame_writer.PushTranslatedValue(context_pos, "context"); // The function was mentioned explicitly in the BEGIN_FRAME. frame_writer.PushTranslatedValue(function_iterator, "function"); // Set the bytecode array pointer. Object bytecode_array = shared.HasBreakInfo() ? shared.GetDebugInfo().DebugBytecodeArray() : shared.GetBytecodeArray(); frame_writer.PushRawObject(bytecode_array, "bytecode array\n"); // The bytecode offset was mentioned explicitly in the BEGIN_FRAME. const int raw_bytecode_offset = BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset; Smi smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset); frame_writer.PushRawObject(smi_bytecode_offset, "bytecode offset\n"); if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " -------------------------\n"); } // Translate the rest of the interpreter registers in the frame. // The return_value_offset is counted from the top. Here, we compute the // register index (counted from the start). const int return_value_first_reg = locals_count - translated_frame->return_value_offset(); const int return_value_count = translated_frame->return_value_count(); for (int i = 0; i < locals_count; ++i, ++value_iterator) { // Ensure we write the return value if we have one and we are returning // normally to a lazy deopt point. if (is_topmost && !goto_catch_handler && deopt_kind_ == DeoptimizeKind::kLazy && i >= return_value_first_reg && i < return_value_first_reg + return_value_count) { const int return_index = i - return_value_first_reg; if (return_index == 0) { frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()), "return value 0\n"); // We do not handle the situation when one return value should go into // the accumulator and another one into an ordinary register. Since // the interpreter should never create such situation, just assert // this does not happen. CHECK_LE(return_value_first_reg + return_value_count, locals_count); } else { CHECK_EQ(return_index, 1); frame_writer.PushRawValue(input_->GetRegister(kReturnRegister1.code()), "return value 1\n"); } } else { // This is not return value, just write the value from the translations. frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); } } uint32_t register_slots_written = static_cast<uint32_t>(locals_count); DCHECK_LE(register_slots_written, frame_info.register_stack_slot_count()); // Some architectures must pad the stack frame with extra stack slots // to ensure the stack frame is aligned. Do this now. while (register_slots_written < frame_info.register_stack_slot_count()) { register_slots_written++; frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } // Translate the accumulator register (depending on frame position). if (is_topmost) { if (kPadArguments) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } // For topmost frame, put the accumulator on the stack. The // {NotifyDeoptimized} builtin pops it off the topmost frame (possibly // after materialization). if (goto_catch_handler) { // If we are lazy deopting to a catch handler, we set the accumulator to // the exception (which lives in the result register). intptr_t accumulator_value = input_->GetRegister(kInterpreterAccumulatorRegister.code()); frame_writer.PushRawObject(Object(accumulator_value), "accumulator\n"); } else { // If we are lazily deoptimizing make sure we store the deopt // return value into the appropriate slot. if (deopt_kind_ == DeoptimizeKind::kLazy && translated_frame->return_value_offset() == 0 && translated_frame->return_value_count() > 0) { CHECK_EQ(translated_frame->return_value_count(), 1); frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()), "return value 0\n"); } else { frame_writer.PushTranslatedValue(value_iterator, "accumulator"); } } ++value_iterator; // Move over the accumulator. } else { // For non-topmost frames, skip the accumulator translation. For those // frames, the return value from the callee will become the accumulator. ++value_iterator; } CHECK_EQ(translated_frame->end(), value_iterator); CHECK_EQ(0u, frame_writer.top_offset()); // Compute this frame's PC and state. The PC will be a special builtin that // continues the bytecode dispatch. Note that non-topmost and lazy-style // bailout handlers also advance the bytecode offset before dispatch, hence // simulating what normal handlers do upon completion of the operation. Builtins* builtins = isolate_->builtins(); Code dispatch_builtin = (!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) && !goto_catch_handler ? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) : builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch); output_frame->SetPc( static_cast<intptr_t>(dispatch_builtin.InstructionStart())); // Update constant pool. if (FLAG_enable_embedded_constant_pool) { intptr_t constant_pool_value = static_cast<intptr_t>(dispatch_builtin.constant_pool()); output_frame->SetConstantPool(constant_pool_value); if (is_topmost) { Register constant_pool_reg = InterpretedFrame::constant_pool_pointer_register(); output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value); } } // Clear the context register. The context might be a de-materialized object // and will be materialized by {Runtime_NotifyDeoptimized}. For additional // safety we use Smi(0) instead of the potential {arguments_marker} here. if (is_topmost) { intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); Register context_reg = JavaScriptFrame::context_register(); output_frame->SetRegister(context_reg.code(), context_value); // Set the continuation for the topmost frame. Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized); output_frame->SetContinuation( static_cast<intptr_t>(continuation.InstructionStart())); } } void Deoptimizer::DoComputeArgumentsAdaptorFrame( TranslatedFrame* translated_frame, int frame_index) { TranslatedFrame::iterator value_iterator = translated_frame->begin(); const bool is_bottommost = (0 == frame_index); const int parameters_count = translated_frame->height(); ArgumentsAdaptorFrameInfo frame_info = ArgumentsAdaptorFrameInfo::Precise(parameters_count); const uint32_t output_frame_size = frame_info.frame_size_in_bytes(); TranslatedFrame::iterator function_iterator = value_iterator++; if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " translating arguments adaptor => variable_frame_size=%d, " "frame_size=%d\n", frame_info.frame_size_in_bytes_without_fixed(), output_frame_size); } // Allocate and store the output frame description. FrameDescription* output_frame = new (output_frame_size) FrameDescription(output_frame_size, parameters_count); FrameWriter frame_writer(this, output_frame, trace_scope_); // Arguments adaptor can not be topmost. CHECK(frame_index < output_count_ - 1); CHECK_NULL(output_[frame_index]); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous frame's top and // this frame's size. const intptr_t top_address = is_bottommost ? caller_frame_top_ - output_frame_size : output_[frame_index - 1]->GetTop() - output_frame_size; output_frame->SetTop(top_address); ReadOnlyRoots roots(isolate()); if (ShouldPadArguments(parameters_count)) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } // Compute the incoming parameter translation. for (int i = 0; i < parameters_count; ++i, ++value_iterator) { frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); } DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), frame_writer.top_offset()); // Read caller's PC from the previous frame. const intptr_t caller_pc = is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); frame_writer.PushCallerPc(caller_pc); // Read caller's FP from the previous frame, and set this frame's FP. const intptr_t caller_fp = is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); frame_writer.PushCallerFp(caller_fp); intptr_t fp_value = top_address + frame_writer.top_offset(); output_frame->SetFp(fp_value); if (FLAG_enable_embedded_constant_pool) { // Read the caller's constant pool from the previous frame. const intptr_t caller_cp = is_bottommost ? caller_constant_pool_ : output_[frame_index - 1]->GetConstantPool(); frame_writer.PushCallerConstantPool(caller_cp); } // A marker value is used in place of the context. intptr_t marker = StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR); frame_writer.PushRawValue(marker, "context (adaptor sentinel)\n"); // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. frame_writer.PushTranslatedValue(function_iterator, "function\n"); // Number of incoming arguments. const uint32_t parameters_count_without_receiver = parameters_count - 1; frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver), "argc\n"); frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); CHECK_EQ(translated_frame->end(), value_iterator); DCHECK_EQ(0, frame_writer.top_offset()); Builtins* builtins = isolate_->builtins(); Code adaptor_trampoline = builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); intptr_t pc_value = static_cast<intptr_t>( adaptor_trampoline.InstructionStart() + isolate_->heap()->arguments_adaptor_deopt_pc_offset().value()); output_frame->SetPc(pc_value); if (FLAG_enable_embedded_constant_pool) { intptr_t constant_pool_value = static_cast<intptr_t>(adaptor_trampoline.constant_pool()); output_frame->SetConstantPool(constant_pool_value); } } void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame, int frame_index) { TranslatedFrame::iterator value_iterator = translated_frame->begin(); const bool is_topmost = (output_count_ - 1 == frame_index); // The construct frame could become topmost only if we inlined a constructor // call which does a tail call (otherwise the tail callee's frame would be // the topmost one). So it could only be the DeoptimizeKind::kLazy case. CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy); Builtins* builtins = isolate_->builtins(); Code construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); BailoutId bailout_id = translated_frame->node_id(); const int parameters_count = translated_frame->height(); ConstructStubFrameInfo frame_info = ConstructStubFrameInfo::Precise(parameters_count, is_topmost); const uint32_t output_frame_size = frame_info.frame_size_in_bytes(); TranslatedFrame::iterator function_iterator = value_iterator++; if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " translating construct stub => bailout_id=%d (%s), " "variable_frame_size=%d, frame_size=%d\n", bailout_id.ToInt(), bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke", frame_info.frame_size_in_bytes_without_fixed(), output_frame_size); } // Allocate and store the output frame description. FrameDescription* output_frame = new (output_frame_size) FrameDescription(output_frame_size, parameters_count); FrameWriter frame_writer(this, output_frame, trace_scope_); // Construct stub can not be topmost. DCHECK(frame_index > 0 && frame_index < output_count_); DCHECK_NULL(output_[frame_index]); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous frame's top and // this frame's size. const intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; output_frame->SetTop(top_address); ReadOnlyRoots roots(isolate()); if (ShouldPadArguments(parameters_count)) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } // The allocated receiver of a construct stub frame is passed as the // receiver parameter through the translation. It might be encoding // a captured object, so we need save it for later. TranslatedFrame::iterator receiver_iterator = value_iterator; // Compute the incoming parameter translation. for (int i = 0; i < parameters_count; ++i, ++value_iterator) { frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); } DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), frame_writer.top_offset()); // Read caller's PC from the previous frame. const intptr_t caller_pc = output_[frame_index - 1]->GetPc(); frame_writer.PushCallerPc(caller_pc); // Read caller's FP from the previous frame, and set this frame's FP. const intptr_t caller_fp = output_[frame_index - 1]->GetFp(); frame_writer.PushCallerFp(caller_fp); const intptr_t fp_value = top_address + frame_writer.top_offset(); output_frame->SetFp(fp_value); if (is_topmost) { Register fp_reg = JavaScriptFrame::fp_register(); output_frame->SetRegister(fp_reg.code(), fp_value); } if (FLAG_enable_embedded_constant_pool) { // Read the caller's constant pool from the previous frame. const intptr_t caller_cp = output_[frame_index - 1]->GetConstantPool(); frame_writer.PushCallerConstantPool(caller_cp); } // A marker value is used to mark the frame. intptr_t marker = StackFrame::TypeToMarker(StackFrame::CONSTRUCT); frame_writer.PushRawValue(marker, "context (construct stub sentinel)\n"); frame_writer.PushTranslatedValue(value_iterator++, "context"); // Number of incoming arguments. const uint32_t parameters_count_without_receiver = parameters_count - 1; frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver), "argc\n"); // The constructor function was mentioned explicitly in the // CONSTRUCT_STUB_FRAME. frame_writer.PushTranslatedValue(function_iterator, "constructor function\n"); // The deopt info contains the implicit receiver or the new target at the // position of the receiver. Copy it to the top of stack, with the hole value // as padding to maintain alignment. frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); CHECK(bailout_id == BailoutId::ConstructStubCreate() || bailout_id == BailoutId::ConstructStubInvoke()); const char* debug_hint = bailout_id == BailoutId::ConstructStubCreate() ? "new target\n" : "allocated receiver\n"; frame_writer.PushTranslatedValue(receiver_iterator, debug_hint); if (is_topmost) { if (kPadArguments) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } // Ensure the result is restored back when we return to the stub. Register result_reg = kReturnRegister0; intptr_t result = input_->GetRegister(result_reg.code()); frame_writer.PushRawValue(result, "subcall result\n"); } CHECK_EQ(translated_frame->end(), value_iterator); CHECK_EQ(0u, frame_writer.top_offset()); // Compute this frame's PC. DCHECK(bailout_id.IsValidForConstructStub()); Address start = construct_stub.InstructionStart(); const int pc_offset = bailout_id == BailoutId::ConstructStubCreate() ? isolate_->heap()->construct_stub_create_deopt_pc_offset().value() : isolate_->heap()->construct_stub_invoke_deopt_pc_offset().value(); intptr_t pc_value = static_cast<intptr_t>(start + pc_offset); output_frame->SetPc(pc_value); // Update constant pool. if (FLAG_enable_embedded_constant_pool) { intptr_t constant_pool_value = static_cast<intptr_t>(construct_stub.constant_pool()); output_frame->SetConstantPool(constant_pool_value); if (is_topmost) { Register constant_pool_reg = JavaScriptFrame::constant_pool_pointer_register(); output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value); } } // Clear the context register. The context might be a de-materialized object // and will be materialized by {Runtime_NotifyDeoptimized}. For additional // safety we use Smi(0) instead of the potential {arguments_marker} here. if (is_topmost) { intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); Register context_reg = JavaScriptFrame::context_register(); output_frame->SetRegister(context_reg.code(), context_value); } // Set the continuation for the topmost frame. if (is_topmost) { Builtins* builtins = isolate_->builtins(); DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_); Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized); output_frame->SetContinuation( static_cast<intptr_t>(continuation.InstructionStart())); } } namespace { bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode) { switch (mode) { case BuiltinContinuationMode::STUB: return false; case BuiltinContinuationMode::JAVASCRIPT: case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: return true; } UNREACHABLE(); } StackFrame::Type BuiltinContinuationModeToFrameType( BuiltinContinuationMode mode) { switch (mode) { case BuiltinContinuationMode::STUB: return StackFrame::BUILTIN_CONTINUATION; case BuiltinContinuationMode::JAVASCRIPT: return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION; case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH; case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH; } UNREACHABLE(); } } // namespace Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation( BuiltinContinuationMode mode, bool must_handle_result) { switch (mode) { case BuiltinContinuationMode::STUB: return must_handle_result ? Builtins::kContinueToCodeStubBuiltinWithResult : Builtins::kContinueToCodeStubBuiltin; case BuiltinContinuationMode::JAVASCRIPT: case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: return must_handle_result ? Builtins::kContinueToJavaScriptBuiltinWithResult : Builtins::kContinueToJavaScriptBuiltin; } UNREACHABLE(); } // BuiltinContinuationFrames capture the machine state that is expected as input // to a builtin, including both input register values and stack parameters. When // the frame is reactivated (i.e. the frame below it returns), a // ContinueToBuiltin stub restores the register state from the frame and tail // calls to the actual target builtin, making it appear that the stub had been // directly called by the frame above it. The input values to populate the frame // are taken from the deopt's FrameState. // // Frame translation happens in two modes, EAGER and LAZY. In EAGER mode, all of // the parameters to the Builtin are explicitly specified in the TurboFan // FrameState node. In LAZY mode, there is always one fewer parameters specified // in the FrameState than expected by the Builtin. In that case, construction of // BuiltinContinuationFrame adds the final missing parameter during // deoptimization, and that parameter is always on the stack and contains the // value returned from the callee of the call site triggering the LAZY deopt // (e.g. rax on x64). This requires that continuation Builtins for LAZY deopts // must have at least one stack parameter. // // TO // | .... | // +-------------------------+ // | arg padding (arch dept) |<- at most 1*kSystemPointerSize // +-------------------------+ // | builtin param 0 |<- FrameState input value n becomes // +-------------------------+ // | ... | // +-------------------------+ // | builtin param m |<- FrameState input value n+m-1, or in // +-----needs-alignment-----+ the LAZY case, return LAZY result value // | ContinueToBuiltin entry | // +-------------------------+ // | | saved frame (FP) | // | +=====needs=alignment=====+<- fpreg // | |constant pool (if ool_cp)| // v +-------------------------+ // |BUILTIN_CONTINUATION mark| // +-------------------------+ // | JSFunction (or zero) |<- only if JavaScript builtin // +-------------------------+ // | frame height above FP | // +-------------------------+ // | context |<- this non-standard context slot contains // +-------------------------+ the context, even for non-JS builtins. // | builtin index | // +-------------------------+ // | builtin input GPR reg0 |<- populated from deopt FrameState using // +-------------------------+ the builtin's CallInterfaceDescriptor // | ... | to map a FrameState's 0..n-1 inputs to // +-------------------------+ the builtin's n input register params. // | builtin input GPR regn | // +-------------------------+ // | reg padding (arch dept) | // +-----needs--alignment----+ // | res padding (arch dept) |<- only if {is_topmost}; result is pop'd by // +-------------------------+<- kNotifyDeopt ASM stub and moved to acc // | result value |<- reg, as ContinueToBuiltin stub expects. // +-----needs-alignment-----+<- spreg // void Deoptimizer::DoComputeBuiltinContinuation( TranslatedFrame* translated_frame, int frame_index, BuiltinContinuationMode mode) { TranslatedFrame::iterator value_iterator = translated_frame->begin(); const BailoutId bailout_id = translated_frame->node_id(); Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id); CallInterfaceDescriptor continuation_descriptor = Builtins::CallInterfaceDescriptorFor(builtin_name); const RegisterConfiguration* config = RegisterConfiguration::Default(); const bool is_bottommost = (0 == frame_index); const bool is_topmost = (output_count_ - 1 == frame_index); const int parameters_count = translated_frame->height(); BuiltinContinuationFrameInfo frame_info = BuiltinContinuationFrameInfo::Precise(parameters_count, continuation_descriptor, config, is_topmost, deopt_kind_, mode); const unsigned output_frame_size = frame_info.frame_size_in_bytes(); const unsigned output_frame_size_above_fp = frame_info.frame_size_in_bytes_above_fp(); // Validate types of parameters. They must all be tagged except for argc for // JS builtins. bool has_argc = false; const int register_parameter_count = continuation_descriptor.GetRegisterParameterCount(); for (int i = 0; i < register_parameter_count; ++i) { MachineType type = continuation_descriptor.GetParameterType(i); int code = continuation_descriptor.GetRegisterParameter(i).code(); // Only tagged and int32 arguments are supported, and int32 only for the // arguments count on JavaScript builtins. if (type == MachineType::Int32()) { CHECK_EQ(code, kJavaScriptCallArgCountRegister.code()); has_argc = true; } else { // Any other argument must be a tagged value. CHECK(IsAnyTagged(type.representation())); } } CHECK_EQ(BuiltinContinuationModeIsJavaScript(mode), has_argc); if (trace_scope_ != nullptr) { PrintF(trace_scope_->file(), " translating BuiltinContinuation to %s," " => register_param_count=%d," " stack_param_count=%d, frame_size=%d\n", Builtins::name(builtin_name), register_parameter_count, frame_info.stack_parameter_count(), output_frame_size); } FrameDescription* output_frame = new (output_frame_size) FrameDescription(output_frame_size, frame_info.stack_parameter_count()); output_[frame_index] = output_frame; FrameWriter frame_writer(this, output_frame, trace_scope_); // The top address of the frame is computed from the previous frame's top and // this frame's size. const intptr_t top_address = is_bottommost ? caller_frame_top_ - output_frame_size : output_[frame_index - 1]->GetTop() - output_frame_size; output_frame->SetTop(top_address); // Get the possible JSFunction for the case that this is a // JavaScriptBuiltinContinuationFrame, which needs the JSFunction pointer // like a normal JavaScriptFrame. const intptr_t maybe_function = value_iterator->GetRawValue().ptr(); ++value_iterator; ReadOnlyRoots roots(isolate()); if (ShouldPadArguments(frame_info.stack_parameter_count())) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } for (uint32_t i = 0; i < frame_info.translated_stack_parameter_count(); ++i, ++value_iterator) { frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); } switch (mode) { case BuiltinContinuationMode::STUB: break; case BuiltinContinuationMode::JAVASCRIPT: break; case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: { frame_writer.PushRawObject(roots.the_hole_value(), "placeholder for exception on lazy deopt\n"); } break; case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: { intptr_t accumulator_value = input_->GetRegister(kInterpreterAccumulatorRegister.code()); frame_writer.PushRawObject(Object(accumulator_value), "exception (from accumulator)\n"); } break; } if (frame_info.frame_has_result_stack_slot()) { frame_writer.PushRawObject(roots.the_hole_value(), "placeholder for return result on lazy deopt\n"); } DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), frame_writer.top_offset()); std::vector<TranslatedFrame::iterator> register_values; int total_registers = config->num_general_registers(); register_values.resize(total_registers, {value_iterator}); for (int i = 0; i < register_parameter_count; ++i, ++value_iterator) { int code = continuation_descriptor.GetRegisterParameter(i).code(); register_values[code] = value_iterator; } // The context register is always implicit in the CallInterfaceDescriptor but // its register must be explicitly set when continuing to the builtin. Make // sure that it's harvested from the translation and copied into the register // set (it was automatically added at the end of the FrameState by the // instruction selector). Object context = value_iterator->GetRawValue(); const intptr_t value = context.ptr(); TranslatedFrame::iterator context_register_value = value_iterator++; register_values[kContextRegister.code()] = context_register_value; output_frame->SetContext(value); output_frame->SetRegister(kContextRegister.code(), value); // Set caller's PC (JSFunction continuation). const intptr_t caller_pc = is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); frame_writer.PushCallerPc(caller_pc); // Read caller's FP from the previous frame, and set this frame's FP. const intptr_t caller_fp = is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); frame_writer.PushCallerFp(caller_fp); const intptr_t fp_value = top_address + frame_writer.top_offset(); output_frame->SetFp(fp_value); DCHECK_EQ(output_frame_size_above_fp, frame_writer.top_offset()); if (FLAG_enable_embedded_constant_pool) { // Read the caller's constant pool from the previous frame. const intptr_t caller_cp = is_bottommost ? caller_constant_pool_ : output_[frame_index - 1]->GetConstantPool(); frame_writer.PushCallerConstantPool(caller_cp); } // A marker value is used in place of the context. const intptr_t marker = StackFrame::TypeToMarker(BuiltinContinuationModeToFrameType(mode)); frame_writer.PushRawValue(marker, "context (builtin continuation sentinel)\n"); if (BuiltinContinuationModeIsJavaScript(mode)) { frame_writer.PushRawValue(maybe_function, "JSFunction\n"); } else { frame_writer.PushRawValue(0, "unused\n"); } // The delta from the SP to the FP; used to reconstruct SP in // Isolate::UnwindAndFindHandler. frame_writer.PushRawObject(Smi::FromInt(output_frame_size_above_fp), "frame height at deoptimization\n"); // The context even if this is a stub contininuation frame. We can't use the // usual context slot, because we must store the frame marker there. frame_writer.PushTranslatedValue(context_register_value, "builtin JavaScript context\n"); // The builtin to continue to. frame_writer.PushRawObject(Smi::FromInt(builtin_name), "builtin index\n"); const int allocatable_register_count = config->num_allocatable_general_registers(); for (int i = 0; i < allocatable_register_count; ++i) { int code = config->GetAllocatableGeneralCode(i); ScopedVector<char> str(128); if (trace_scope_ != nullptr) { if (BuiltinContinuationModeIsJavaScript(mode) && code == kJavaScriptCallArgCountRegister.code()) { SNPrintF( str, "tagged argument count %s (will be untagged by continuation)\n", RegisterName(Register::from_code(code))); } else { SNPrintF(str, "builtin register argument %s\n", RegisterName(Register::from_code(code))); } } frame_writer.PushTranslatedValue( register_values[code], trace_scope_ != nullptr ? str.begin() : ""); } // Some architectures must pad the stack frame with extra stack slots // to ensure the stack frame is aligned. const int padding_slot_count = BuiltinContinuationFrameConstants::PaddingSlotCount( allocatable_register_count); for (int i = 0; i < padding_slot_count; ++i) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } if (is_topmost) { if (kPadArguments) { frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); } // Ensure the result is restored back when we return to the stub. if (frame_info.frame_has_result_stack_slot()) { Register result_reg = kReturnRegister0; frame_writer.PushRawValue(input_->GetRegister(result_reg.code()), "callback result\n"); } else { frame_writer.PushRawObject(roots.undefined_value(), "callback result\n"); } } CHECK_EQ(translated_frame->end(), value_iterator); CHECK_EQ(0u, frame_writer.top_offset()); // Clear the context register. The context might be a de-materialized object // and will be materialized by {Runtime_NotifyDeoptimized}. For additional // safety we use Smi(0) instead of the potential {arguments_marker} here. if (is_topmost) { intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); Register context_reg = JavaScriptFrame::context_register(); output_frame->SetRegister(context_reg.code(), context_value); } // Ensure the frame pointer register points to the callee's frame. The builtin // will build its own frame once we continue to it. Register fp_reg = JavaScriptFrame::fp_register(); output_frame->SetRegister(fp_reg.code(), fp_value); Code continue_to_builtin = isolate()->builtins()->builtin(TrampolineForBuiltinContinuation( mode, frame_info.frame_has_result_stack_slot())); output_frame->SetPc( static_cast<intptr_t>(continue_to_builtin.InstructionStart())); Code continuation = isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized); output_frame->SetContinuation( static_cast<intptr_t>(continuation.InstructionStart())); } void Deoptimizer::MaterializeHeapObjects() { translated_state_.Prepare(static_cast<Address>(stack_fp_)); if (FLAG_deopt_every_n_times > 0) { // Doing a GC here will find problems with the deoptimized frames. isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags, GarbageCollectionReason::kTesting); } for (auto& materialization : values_to_materialize_) { Handle<Object> value = materialization.value_->GetValue(); if (trace_scope_ != nullptr) { PrintF("Materialization [" V8PRIxPTR_FMT "] <- " V8PRIxPTR_FMT " ; ", static_cast<intptr_t>(materialization.output_slot_address_), value->ptr()); value->ShortPrint(trace_scope_->file()); PrintF(trace_scope_->file(), "\n"); } *(reinterpret_cast<Address*>(materialization.output_slot_address_)) = value->ptr(); } translated_state_.VerifyMaterializedObjects(); bool feedback_updated = translated_state_.DoUpdateFeedback(); if (trace_scope_ != nullptr && feedback_updated) { PrintF(trace_scope_->file(), "Feedback updated"); compiled_code_.PrintDeoptLocation(trace_scope_->file(), " from deoptimization at ", from_); } isolate_->materialized_object_store()->Remove( static_cast<Address>(stack_fp_)); } void Deoptimizer::QueueValueForMaterialization( Address output_address, Object obj, const TranslatedFrame::iterator& iterator) { if (obj == ReadOnlyRoots(isolate_).arguments_marker()) { values_to_materialize_.push_back({output_address, iterator}); } } unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const { unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp; // TODO(jkummerow): If {function_->IsSmi()} can indeed be true, then // {function_} should not have type {JSFunction}. if (!function_.IsSmi()) { fixed_size += ComputeIncomingArgumentSize(function_.shared()); } return fixed_size; } unsigned Deoptimizer::ComputeInputFrameSize() const { // The fp-to-sp delta already takes the context, constant pool pointer and the // function into account so we have to avoid double counting them. unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize(); unsigned result = fixed_size_above_fp + fp_to_sp_delta_; if (compiled_code_.kind() == Code::OPTIMIZED_FUNCTION) { unsigned stack_slots = compiled_code_.stack_slots(); unsigned outgoing_size = 0; // ComputeOutgoingArgumentSize(compiled_code_, bailout_id_); CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) - CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size, result); } return result; } // static unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) { int parameter_slots = InternalFormalParameterCountWithReceiver(shared); if (ShouldPadArguments(parameter_slots)) parameter_slots++; return parameter_slots * kSystemPointerSize; } void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind) { CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft || kind == DeoptimizeKind::kLazy); DeoptimizerData* data = isolate->deoptimizer_data(); if (!data->deopt_entry_code(kind).is_null()) return; MacroAssembler masm(isolate, CodeObjectRequired::kYes, NewAssemblerBuffer(16 * KB)); masm.set_emit_debug_code(false); GenerateDeoptimizationEntries(&masm, masm.isolate(), kind); CodeDesc desc; masm.GetCode(isolate, &desc); DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc)); // Allocate the code as immovable since the entry addresses will be used // directly and there is no support for relocating them. Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).set_immovable().Build(); CHECK(isolate->heap()->IsImmovable(*code)); CHECK(data->deopt_entry_code(kind).is_null()); data->set_deopt_entry_code(kind, *code); } void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) { EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager); EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy); EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft); } FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count) : frame_size_(frame_size), parameter_count_(parameter_count), top_(kZapUint32), pc_(kZapUint32), fp_(kZapUint32), context_(kZapUint32), constant_pool_(kZapUint32) { // Zap all the registers. for (int r = 0; r < Register::kNumRegisters; r++) { // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register // isn't used before the next safepoint, the GC will try to scan it as a // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't. #if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64) // x18 is reserved as platform register on Windows arm64 platform const int kPlatformRegister = 18; if (r != kPlatformRegister) { SetRegister(r, kZapUint32); } #else SetRegister(r, kZapUint32); #endif } // Zap all the slots. for (unsigned o = 0; o < frame_size; o += kSystemPointerSize) { SetFrameSlot(o, kZapUint32); } } void TranslationBuffer::Add(int32_t value) { // This wouldn't handle kMinInt correctly if it ever encountered it. DCHECK_NE(value, kMinInt); // Encode the sign bit in the least significant bit. bool is_negative = (value < 0); uint32_t bits = (static_cast<uint32_t>(is_negative ? -value : value) << 1) | static_cast<uint32_t>(is_negative); // Encode the individual bytes using the least significant bit of // each byte to indicate whether or not more bytes follow. do { uint32_t next = bits >> 7; contents_.push_back(((bits << 1) & 0xFF) | (next != 0)); bits = next; } while (bits != 0); } TranslationIterator::TranslationIterator(ByteArray buffer, int index) : buffer_(buffer), index_(index) { DCHECK(index >= 0 && index < buffer.length()); } int32_t TranslationIterator::Next() { // Run through the bytes until we reach one with a least significant // bit of zero (marks the end). uint32_t bits = 0; for (int i = 0; true; i += 7) { DCHECK(HasNext()); uint8_t next = buffer_.get(index_++); bits |= (next >> 1) << i; if ((next & 1) == 0) break; } // The bits encode the sign in the least significant bit. bool is_negative = (bits & 1) == 1; int32_t result = bits >> 1; return is_negative ? -result : result; } bool TranslationIterator::HasNext() const { return index_ < buffer_.length(); } Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) { Handle<ByteArray> result = factory->NewByteArray(CurrentIndex(), AllocationType::kOld); contents_.CopyTo(result->GetDataStartAddress()); return result; } void Translation::BeginBuiltinContinuationFrame(BailoutId bailout_id, int literal_id, unsigned height) { buffer_->Add(BUILTIN_CONTINUATION_FRAME); buffer_->Add(bailout_id.ToInt()); buffer_->Add(literal_id); buffer_->Add(height); } void Translation::BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id, int literal_id, unsigned height) { buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME); buffer_->Add(bailout_id.ToInt()); buffer_->Add(literal_id); buffer_->Add(height); } void Translation::BeginJavaScriptBuiltinContinuationWithCatchFrame( BailoutId bailout_id, int literal_id, unsigned height) { buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME); buffer_->Add(bailout_id.ToInt()); buffer_->Add(literal_id); buffer_->Add(height); } void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id, unsigned height) { buffer_->Add(CONSTRUCT_STUB_FRAME); buffer_->Add(bailout_id.ToInt()); buffer_->Add(literal_id); buffer_->Add(height); } void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) { buffer_->Add(ARGUMENTS_ADAPTOR_FRAME); buffer_->Add(literal_id); buffer_->Add(height); } void Translation::BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id, unsigned height, int return_value_offset, int return_value_count) { buffer_->Add(INTERPRETED_FRAME); buffer_->Add(bytecode_offset.ToInt()); buffer_->Add(literal_id); buffer_->Add(height); buffer_->Add(return_value_offset); buffer_->Add(return_value_count); } void Translation::ArgumentsElements(CreateArgumentsType type) { buffer_->Add(ARGUMENTS_ELEMENTS); buffer_->Add(static_cast<uint8_t>(type)); } void Translation::ArgumentsLength(CreateArgumentsType type) { buffer_->Add(ARGUMENTS_LENGTH); buffer_->Add(static_cast<uint8_t>(type)); } void Translation::BeginCapturedObject(int length) { buffer_->Add(CAPTURED_OBJECT); buffer_->Add(length); } void Translation::DuplicateObject(int object_index) { buffer_->Add(DUPLICATED_OBJECT); buffer_->Add(object_index); } void Translation::StoreRegister(Register reg) { buffer_->Add(REGISTER); buffer_->Add(reg.code()); } void Translation::StoreInt32Register(Register reg) { buffer_->Add(INT32_REGISTER); buffer_->Add(reg.code()); } void Translation::StoreInt64Register(Register reg) { buffer_->Add(INT64_REGISTER); buffer_->Add(reg.code()); } void Translation::StoreUint32Register(Register reg) { buffer_->Add(UINT32_REGISTER); buffer_->Add(reg.code()); } void Translation::StoreBoolRegister(Register reg) { buffer_->Add(BOOL_REGISTER); buffer_->Add(reg.code()); } void Translation::StoreFloatRegister(FloatRegister reg) { buffer_->Add(FLOAT_REGISTER); buffer_->Add(reg.code()); } void Translation::StoreDoubleRegister(DoubleRegister reg) { buffer_->Add(DOUBLE_REGISTER); buffer_->Add(reg.code()); } void Translation::StoreStackSlot(int index) { buffer_->Add(STACK_SLOT); buffer_->Add(index); } void Translation::StoreInt32StackSlot(int index) { buffer_->Add(INT32_STACK_SLOT); buffer_->Add(index); } void Translation::StoreInt64StackSlot(int index) { buffer_->Add(INT64_STACK_SLOT); buffer_->Add(index); } void Translation::StoreUint32StackSlot(int index) { buffer_->Add(UINT32_STACK_SLOT); buffer_->Add(index); } void Translation::StoreBoolStackSlot(int index) { buffer_->Add(BOOL_STACK_SLOT); buffer_->Add(index); } void Translation::StoreFloatStackSlot(int index) { buffer_->Add(FLOAT_STACK_SLOT); buffer_->Add(index); } void Translation::StoreDoubleStackSlot(int index) { buffer_->Add(DOUBLE_STACK_SLOT); buffer_->Add(index); } void Translation::StoreLiteral(int literal_id) { buffer_->Add(LITERAL); buffer_->Add(literal_id); } void Translation::AddUpdateFeedback(int vector_literal, int slot) { buffer_->Add(UPDATE_FEEDBACK); buffer_->Add(vector_literal); buffer_->Add(slot); } void Translation::StoreJSFrameFunction() { StoreStackSlot((StandardFrameConstants::kCallerPCOffset - StandardFrameConstants::kFunctionOffset) / kSystemPointerSize); } int Translation::NumberOfOperandsFor(Opcode opcode) { switch (opcode) { case DUPLICATED_OBJECT: case ARGUMENTS_ELEMENTS: case ARGUMENTS_LENGTH: case CAPTURED_OBJECT: case REGISTER: case INT32_REGISTER: case INT64_REGISTER: case UINT32_REGISTER: case BOOL_REGISTER: case FLOAT_REGISTER: case DOUBLE_REGISTER: case STACK_SLOT: case INT32_STACK_SLOT: case INT64_STACK_SLOT: case UINT32_STACK_SLOT: case BOOL_STACK_SLOT: case FLOAT_STACK_SLOT: case DOUBLE_STACK_SLOT: case LITERAL: return 1; case ARGUMENTS_ADAPTOR_FRAME: case UPDATE_FEEDBACK: return 2; case BEGIN: case CONSTRUCT_STUB_FRAME: case BUILTIN_CONTINUATION_FRAME: case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: return 3; case INTERPRETED_FRAME: return 5; } FATAL("Unexpected translation type"); return -1; } #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) const char* Translation::StringFor(Opcode opcode) { #define TRANSLATION_OPCODE_CASE(item) \ case item: \ return #item; switch (opcode) { TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE) } #undef TRANSLATION_OPCODE_CASE UNREACHABLE(); } #endif Handle<FixedArray> MaterializedObjectStore::Get(Address fp) { int index = StackIdToIndex(fp); if (index == -1) { return Handle<FixedArray>::null(); } Handle<FixedArray> array = GetStackEntries(); CHECK_GT(array->length(), index); return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate())); } void MaterializedObjectStore::Set(Address fp, Handle<FixedArray> materialized_objects) { int index = StackIdToIndex(fp); if (index == -1) { index = static_cast<int>(frame_fps_.size()); frame_fps_.push_back(fp); } Handle<FixedArray> array = EnsureStackEntries(index + 1); array->set(index, *materialized_objects); } bool MaterializedObjectStore::Remove(Address fp) { auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp); if (it == frame_fps_.end()) return false; int index = static_cast<int>(std::distance(frame_fps_.begin(), it)); frame_fps_.erase(it); FixedArray array = isolate()->heap()->materialized_objects(); CHECK_LT(index, array.length()); int fps_size = static_cast<int>(frame_fps_.size()); for (int i = index; i < fps_size; i++) { array.set(i, array.get(i + 1)); } array.set(fps_size, ReadOnlyRoots(isolate()).undefined_value()); return true; } int MaterializedObjectStore::StackIdToIndex(Address fp) { auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp); return it == frame_fps_.end() ? -1 : static_cast<int>(std::distance(frame_fps_.begin(), it)); } Handle<FixedArray> MaterializedObjectStore::GetStackEntries() { return Handle<FixedArray>(isolate()->heap()->materialized_objects(), isolate()); } Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) { Handle<FixedArray> array = GetStackEntries(); if (array->length() >= length) { return array; } int new_length = length > 10 ? length : 10; if (new_length < 2 * array->length()) { new_length = 2 * array->length(); } Handle<FixedArray> new_array = isolate()->factory()->NewFixedArray(new_length, AllocationType::kOld); for (int i = 0; i < array->length(); i++) { new_array->set(i, array->get(i)); } HeapObject undefined_value = ReadOnlyRoots(isolate()).undefined_value(); for (int i = array->length(); i < length; i++) { new_array->set(i, undefined_value); } isolate()->heap()->SetRootMaterializedObjects(*new_array); return new_array; } namespace { Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it, Isolate* isolate) { if (it->GetRawValue() == ReadOnlyRoots(isolate).arguments_marker()) { if (!it->IsMaterializableByDebugger()) { return isolate->factory()->optimized_out(); } } return it->GetValue(); } } // namespace DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state, TranslatedState::iterator frame_it, Isolate* isolate) { int parameter_count = frame_it->shared_info()->internal_formal_parameter_count(); TranslatedFrame::iterator stack_it = frame_it->begin(); // Get the function. Note that this might materialize the function. // In case the debugger mutates this value, we should deoptimize // the function and remember the value in the materialized value store. function_ = Handle<JSFunction>::cast(stack_it->GetValue()); stack_it++; // Skip the function. stack_it++; // Skip the receiver. DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind()); source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray( *frame_it->shared_info(), frame_it->node_id()); DCHECK_EQ(parameter_count, function_->shared().internal_formal_parameter_count()); parameters_.resize(static_cast<size_t>(parameter_count)); for (int i = 0; i < parameter_count; i++) { Handle<Object> parameter = GetValueForDebugger(stack_it, isolate); SetParameter(i, parameter); stack_it++; } // Get the context. context_ = GetValueForDebugger(stack_it, isolate); stack_it++; // Get the expression stack. DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind()); const int stack_height = frame_it->height(); // Accumulator *not* included. expression_stack_.resize(static_cast<size_t>(stack_height)); for (int i = 0; i < stack_height; i++) { Handle<Object> expression = GetValueForDebugger(stack_it, isolate); SetExpression(i, expression); stack_it++; } DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind()); stack_it++; // Skip the accumulator. CHECK(stack_it == frame_it->end()); } Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) { CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd()); SourcePosition last_position = SourcePosition::Unknown(); DeoptimizeReason last_reason = DeoptimizeReason::kUnknown; int last_deopt_id = kNoDeoptimizationId; int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) | RelocInfo::ModeMask(RelocInfo::DEOPT_ID) | RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) | RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID); for (RelocIterator it(code, mask); !it.done(); it.next()) { RelocInfo* info = it.rinfo(); if (info->pc() >= pc) break; if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) { int script_offset = static_cast<int>(info->data()); it.next(); DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID); int inlining_id = static_cast<int>(it.rinfo()->data()); last_position = SourcePosition(script_offset, inlining_id); } else if (info->rmode() == RelocInfo::DEOPT_ID) { last_deopt_id = static_cast<int>(info->data()); } else if (info->rmode() == RelocInfo::DEOPT_REASON) { last_reason = static_cast<DeoptimizeReason>(info->data()); } } return DeoptInfo(last_position, last_reason, last_deopt_id); } // static int Deoptimizer::ComputeSourcePositionFromBytecodeArray( SharedFunctionInfo shared, BailoutId node_id) { DCHECK(shared.HasBytecodeArray()); return AbstractCode::cast(shared.GetBytecodeArray()) .SourcePosition(node_id.ToInt()); } // static TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container, int length, int object_index) { TranslatedValue slot(container, kCapturedObject); slot.materialization_info_ = {object_index, length}; return slot; } // static TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container, int id) { TranslatedValue slot(container, kDuplicatedObject); slot.materialization_info_ = {id, -1}; return slot; } // static TranslatedValue TranslatedValue::NewFloat(TranslatedState* container, Float32 value) { TranslatedValue slot(container, kFloat); slot.float_value_ = value; return slot; } // static TranslatedValue TranslatedValue::NewDouble(TranslatedState* container, Float64 value) { TranslatedValue slot(container, kDouble); slot.double_value_ = value; return slot; } // static TranslatedValue TranslatedValue::NewInt32(TranslatedState* container, int32_t value) { TranslatedValue slot(container, kInt32); slot.int32_value_ = value; return slot; } // static TranslatedValue TranslatedValue::NewInt64(TranslatedState* container, int64_t value) { TranslatedValue slot(container, kInt64); slot.int64_value_ = value; return slot; } // static TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container, uint32_t value) { TranslatedValue slot(container, kUInt32); slot.uint32_value_ = value; return slot; } // static TranslatedValue TranslatedValue::NewBool(TranslatedState* container, uint32_t value) { TranslatedValue slot(container, kBoolBit); slot.uint32_value_ = value; return slot; } // static TranslatedValue TranslatedValue::NewTagged(TranslatedState* container, Object literal) { TranslatedValue slot(container, kTagged); slot.raw_literal_ = literal; return slot; } // static TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) { return TranslatedValue(container, kInvalid); } Isolate* TranslatedValue::isolate() const { return container_->isolate(); } Object TranslatedValue::raw_literal() const { DCHECK_EQ(kTagged, kind()); return raw_literal_; } int32_t TranslatedValue::int32_value() const { DCHECK_EQ(kInt32, kind()); return int32_value_; } int64_t TranslatedValue::int64_value() const { DCHECK_EQ(kInt64, kind()); return int64_value_; } uint32_t TranslatedValue::uint32_value() const { DCHECK(kind() == kUInt32 || kind() == kBoolBit); return uint32_value_; } Float32 TranslatedValue::float_value() const { DCHECK_EQ(kFloat, kind()); return float_value_; } Float64 TranslatedValue::double_value() const { DCHECK_EQ(kDouble, kind()); return double_value_; } int TranslatedValue::object_length() const { DCHECK_EQ(kind(), kCapturedObject); return materialization_info_.length_; } int TranslatedValue::object_index() const { DCHECK(kind() == kCapturedObject || kind() == kDuplicatedObject); return materialization_info_.id_; } Object TranslatedValue::GetRawValue() const { // If we have a value, return it. if (materialization_state() == kFinished) { return *storage_; } // Otherwise, do a best effort to get the value without allocation. switch (kind()) { case kTagged: return raw_literal(); case kInt32: { bool is_smi = Smi::IsValid(int32_value()); if (is_smi) { return Smi::FromInt(int32_value()); } break; } case kInt64: { bool is_smi = (int64_value() >= static_cast<int64_t>(Smi::kMinValue) && int64_value() <= static_cast<int64_t>(Smi::kMaxValue)); if (is_smi) { return Smi::FromIntptr(static_cast<intptr_t>(int64_value())); } break; } case kUInt32: { bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue)); if (is_smi) { return Smi::FromInt(static_cast<int32_t>(uint32_value())); } break; } case kBoolBit: { if (uint32_value() == 0) { return ReadOnlyRoots(isolate()).false_value(); } else { CHECK_EQ(1U, uint32_value()); return ReadOnlyRoots(isolate()).true_value(); } } default: break; } // If we could not get the value without allocation, return the arguments // marker. return ReadOnlyRoots(isolate()).arguments_marker(); } void TranslatedValue::set_initialized_storage(Handle<Object> storage) { DCHECK_EQ(kUninitialized, materialization_state()); storage_ = storage; materialization_state_ = kFinished; } Handle<Object> TranslatedValue::GetValue() { // If we already have a value, then get it. if (materialization_state() == kFinished) return storage_; // Otherwise we have to materialize. switch (kind()) { case TranslatedValue::kTagged: case TranslatedValue::kInt32: case TranslatedValue::kInt64: case TranslatedValue::kUInt32: case TranslatedValue::kBoolBit: case TranslatedValue::kFloat: case TranslatedValue::kDouble: { MaterializeSimple(); return storage_; } case TranslatedValue::kCapturedObject: case TranslatedValue::kDuplicatedObject: { // We need to materialize the object (or possibly even object graphs). // To make the object verifier happy, we materialize in two steps. // 1. Allocate storage for reachable objects. This makes sure that for // each object we have allocated space on heap. The space will be // a byte array that will be later initialized, or a fully // initialized object if it is safe to allocate one that will // pass the verifier. container_->EnsureObjectAllocatedAt(this); // 2. Initialize the objects. If we have allocated only byte arrays // for some objects, we now overwrite the byte arrays with the // correct object fields. Note that this phase does not allocate // any new objects, so it does not trigger the object verifier. return container_->InitializeObjectAt(this); } case TranslatedValue::kInvalid: FATAL("unexpected case"); return Handle<Object>::null(); } FATAL("internal error: value missing"); return Handle<Object>::null(); } void TranslatedValue::MaterializeSimple() { // If we already have materialized, return. if (materialization_state() == kFinished) return; Object raw_value = GetRawValue(); if (raw_value != ReadOnlyRoots(isolate()).arguments_marker()) { // We can get the value without allocation, just return it here. set_initialized_storage(Handle<Object>(raw_value, isolate())); return; } switch (kind()) { case kInt32: set_initialized_storage( Handle<Object>(isolate()->factory()->NewNumber(int32_value()))); return; case kInt64: set_initialized_storage(Handle<Object>( isolate()->factory()->NewNumber(static_cast<double>(int64_value())))); return; case kUInt32: set_initialized_storage( Handle<Object>(isolate()->factory()->NewNumber(uint32_value()))); return; case kFloat: { double scalar_value = float_value().get_scalar(); set_initialized_storage( Handle<Object>(isolate()->factory()->NewNumber(scalar_value))); return; } case kDouble: { double scalar_value = double_value().get_scalar(); set_initialized_storage( Handle<Object>(isolate()->factory()->NewNumber(scalar_value))); return; } case kCapturedObject: case kDuplicatedObject: case kInvalid: case kTagged: case kBoolBit: FATAL("internal error: unexpected materialization."); break; } } bool TranslatedValue::IsMaterializedObject() const { switch (kind()) { case kCapturedObject: case kDuplicatedObject: return true; default: return false; } } bool TranslatedValue::IsMaterializableByDebugger() const { // At the moment, we only allow materialization of doubles. return (kind() == kDouble); } int TranslatedValue::GetChildrenCount() const { if (kind() == kCapturedObject) { return object_length(); } else { return 0; } } uint64_t TranslatedState::GetUInt64Slot(Address fp, int slot_offset) { #if V8_TARGET_ARCH_32_BIT return ReadUnalignedValue<uint64_t>(fp + slot_offset); #else return Memory<uint64_t>(fp + slot_offset); #endif } uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) { Address address = fp + slot_offset; #if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT return Memory<uint32_t>(address + kIntSize); #else return Memory<uint32_t>(address); #endif } Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) { #if !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 return Float32::FromBits(GetUInt32Slot(fp, slot_offset)); #else return Float32::FromBits(Memory<uint32_t>(fp + slot_offset)); #endif } Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) { return Float64::FromBits(GetUInt64Slot(fp, slot_offset)); } void TranslatedValue::Handlify() { if (kind() == kTagged) { set_initialized_storage(Handle<Object>(raw_literal(), isolate())); raw_literal_ = Object(); } } TranslatedFrame TranslatedFrame::InterpretedFrame( BailoutId bytecode_offset, SharedFunctionInfo shared_info, int height, int return_value_offset, int return_value_count) { TranslatedFrame frame(kInterpretedFunction, shared_info, height, return_value_offset, return_value_count); frame.node_id_ = bytecode_offset; return frame; } TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame( SharedFunctionInfo shared_info, int height) { return TranslatedFrame(kArgumentsAdaptor, shared_info, height); } TranslatedFrame TranslatedFrame::ConstructStubFrame( BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { TranslatedFrame frame(kConstructStub, shared_info, height); frame.node_id_ = bailout_id; return frame; } TranslatedFrame TranslatedFrame::BuiltinContinuationFrame( BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { TranslatedFrame frame(kBuiltinContinuation, shared_info, height); frame.node_id_ = bailout_id; return frame; } TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame( BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { TranslatedFrame frame(kJavaScriptBuiltinContinuation, shared_info, height); frame.node_id_ = bailout_id; return frame; } TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame( BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { TranslatedFrame frame(kJavaScriptBuiltinContinuationWithCatch, shared_info, height); frame.node_id_ = bailout_id; return frame; } int TranslatedFrame::GetValueCount() { // The function is added to all frame state descriptors in // InstructionSelector::AddInputsToFrameStateDescriptor. static constexpr int kTheFunction = 1; switch (kind()) { case kInterpretedFunction: { int parameter_count = InternalFormalParameterCountWithReceiver(raw_shared_info_); static constexpr int kTheContext = 1; static constexpr int kTheAccumulator = 1; return height() + parameter_count + kTheContext + kTheFunction + kTheAccumulator; } case kArgumentsAdaptor: return height() + kTheFunction; case kConstructStub: case kBuiltinContinuation: case kJavaScriptBuiltinContinuation: case kJavaScriptBuiltinContinuationWithCatch: { static constexpr int kTheContext = 1; return height() + kTheContext + kTheFunction; } case kInvalid: UNREACHABLE(); } UNREACHABLE(); } void TranslatedFrame::Handlify() { if (!raw_shared_info_.is_null()) { shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_, raw_shared_info_.GetIsolate()); raw_shared_info_ = SharedFunctionInfo(); } for (auto& value : values_) { value.Handlify(); } } TranslatedFrame TranslatedState::CreateNextTranslatedFrame( TranslationIterator* iterator, FixedArray literal_array, Address fp, FILE* trace_file) { Translation::Opcode opcode = static_cast<Translation::Opcode>(iterator->Next()); switch (opcode) { case Translation::INTERPRETED_FRAME: { BailoutId bytecode_offset = BailoutId(iterator->Next()); SharedFunctionInfo shared_info = SharedFunctionInfo::cast(literal_array.get(iterator->Next())); int height = iterator->Next(); int return_value_offset = iterator->Next(); int return_value_count = iterator->Next(); if (trace_file != nullptr) { std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); PrintF(trace_file, " reading input frame %s", name.get()); int arg_count = InternalFormalParameterCountWithReceiver(shared_info); PrintF(trace_file, " => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); " "inputs:\n", bytecode_offset.ToInt(), arg_count, height, return_value_offset, return_value_count); } return TranslatedFrame::InterpretedFrame(bytecode_offset, shared_info, height, return_value_offset, return_value_count); } case Translation::ARGUMENTS_ADAPTOR_FRAME: { SharedFunctionInfo shared_info = SharedFunctionInfo::cast(literal_array.get(iterator->Next())); int height = iterator->Next(); if (trace_file != nullptr) { std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); PrintF(trace_file, " reading arguments adaptor frame %s", name.get()); PrintF(trace_file, " => height=%d; inputs:\n", height); } return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height); } case Translation::CONSTRUCT_STUB_FRAME: { BailoutId bailout_id = BailoutId(iterator->Next()); SharedFunctionInfo shared_info = SharedFunctionInfo::cast(literal_array.get(iterator->Next())); int height = iterator->Next(); if (trace_file != nullptr) { std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); PrintF(trace_file, " reading construct stub frame %s", name.get()); PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", bailout_id.ToInt(), height); } return TranslatedFrame::ConstructStubFrame(bailout_id, shared_info, height); } case Translation::BUILTIN_CONTINUATION_FRAME: { BailoutId bailout_id = BailoutId(iterator->Next()); SharedFunctionInfo shared_info = SharedFunctionInfo::cast(literal_array.get(iterator->Next())); int height = iterator->Next(); if (trace_file != nullptr) { std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); PrintF(trace_file, " reading builtin continuation frame %s", name.get()); PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", bailout_id.ToInt(), height); } return TranslatedFrame::BuiltinContinuationFrame(bailout_id, shared_info, height); } case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: { BailoutId bailout_id = BailoutId(iterator->Next()); SharedFunctionInfo shared_info = SharedFunctionInfo::cast(literal_array.get(iterator->Next())); int height = iterator->Next(); if (trace_file != nullptr) { std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); PrintF(trace_file, " reading JavaScript builtin continuation frame %s", name.get()); PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", bailout_id.ToInt(), height); } return TranslatedFrame::JavaScriptBuiltinContinuationFrame( bailout_id, shared_info, height); } case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: { BailoutId bailout_id = BailoutId(iterator->Next()); SharedFunctionInfo shared_info = SharedFunctionInfo::cast(literal_array.get(iterator->Next())); int height = iterator->Next(); if (trace_file != nullptr) { std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); PrintF(trace_file, " reading JavaScript builtin continuation frame with catch %s", name.get()); PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", bailout_id.ToInt(), height); } return TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame( bailout_id, shared_info, height); } case Translation::UPDATE_FEEDBACK: case Translation::BEGIN: case Translation::DUPLICATED_OBJECT: case Translation::ARGUMENTS_ELEMENTS: case Translation::ARGUMENTS_LENGTH: case Translation::CAPTURED_OBJECT: case Translation::REGISTER: case Translation::INT32_REGISTER: case Translation::INT64_REGISTER: case Translation::UINT32_REGISTER: case Translation::BOOL_REGISTER: case Translation::FLOAT_REGISTER: case Translation::DOUBLE_REGISTER: case Translation::STACK_SLOT: case Translation::INT32_STACK_SLOT: case Translation::INT64_STACK_SLOT: case Translation::UINT32_STACK_SLOT: case Translation::BOOL_STACK_SLOT: case Translation::FLOAT_STACK_SLOT: case Translation::DOUBLE_STACK_SLOT: case Translation::LITERAL: break; } FATAL("We should never get here - unexpected deopt info."); return TranslatedFrame::InvalidFrame(); } // static void TranslatedFrame::AdvanceIterator( std::deque<TranslatedValue>::iterator* iter) { int values_to_skip = 1; while (values_to_skip > 0) { // Consume the current element. values_to_skip--; // Add all the children. values_to_skip += (*iter)->GetChildrenCount(); (*iter)++; } } Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer, CreateArgumentsType type, int* length) { Address parent_frame_pointer = *reinterpret_cast<Address*>( input_frame_pointer + StandardFrameConstants::kCallerFPOffset); intptr_t parent_frame_type = Memory<intptr_t>( parent_frame_pointer + CommonFrameConstants::kContextOrFrameTypeOffset); Address arguments_frame; if (parent_frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)) { if (length) *length = Smi::cast(*FullObjectSlot( parent_frame_pointer + ArgumentsAdaptorFrameConstants::kLengthOffset)) .value(); arguments_frame = parent_frame_pointer; } else { if (length) *length = formal_parameter_count_; arguments_frame = input_frame_pointer; } if (type == CreateArgumentsType::kRestParameter) { // If the actual number of arguments is less than the number of formal // parameters, we have zero rest parameters. if (length) *length = std::max(0, *length - formal_parameter_count_); } return arguments_frame; } // Creates translated values for an arguments backing store, or the backing // store for rest parameters depending on the given {type}. The TranslatedValue // objects for the fields are not read from the TranslationIterator, but instead // created on-the-fly based on dynamic information in the optimized frame. void TranslatedState::CreateArgumentsElementsTranslatedValues( int frame_index, Address input_frame_pointer, CreateArgumentsType type, FILE* trace_file) { TranslatedFrame& frame = frames_[frame_index]; int length; Address arguments_frame = ComputeArgumentsPosition(input_frame_pointer, type, &length); int object_index = static_cast<int>(object_positions_.size()); int value_index = static_cast<int>(frame.values_.size()); if (trace_file != nullptr) { PrintF(trace_file, "arguments elements object #%d (type = %d, length = %d)", object_index, static_cast<uint8_t>(type), length); } object_positions_.push_back({frame_index, value_index}); frame.Add(TranslatedValue::NewDeferredObject( this, length + FixedArray::kHeaderSize / kTaggedSize, object_index)); ReadOnlyRoots roots(isolate_); frame.Add(TranslatedValue::NewTagged(this, roots.fixed_array_map())); frame.Add(TranslatedValue::NewInt32(this, length)); int number_of_holes = 0; if (type == CreateArgumentsType::kMappedArguments) { // If the actual number of arguments is less than the number of formal // parameters, we have fewer holes to fill to not overshoot the length. number_of_holes = Min(formal_parameter_count_, length); } for (int i = 0; i < number_of_holes; ++i) { frame.Add(TranslatedValue::NewTagged(this, roots.the_hole_value())); } for (int i = length - number_of_holes - 1; i >= 0; --i) { Address argument_slot = arguments_frame + CommonFrameConstants::kFixedFrameSizeAboveFp + i * kSystemPointerSize; frame.Add(TranslatedValue::NewTagged(this, *FullObjectSlot(argument_slot))); } } // We can't intermix stack decoding and allocations because the deoptimization // infrastracture is not GC safe. // Thus we build a temporary structure in malloced space. // The TranslatedValue objects created correspond to the static translation // instructions from the TranslationIterator, except for // Translation::ARGUMENTS_ELEMENTS, where the number and values of the // FixedArray elements depend on dynamic information from the optimized frame. // Returns the number of expected nested translations from the // TranslationIterator. int TranslatedState::CreateNextTranslatedValue( int frame_index, TranslationIterator* iterator, FixedArray literal_array, Address fp, RegisterValues* registers, FILE* trace_file) { disasm::NameConverter converter; TranslatedFrame& frame = frames_[frame_index]; int value_index = static_cast<int>(frame.values_.size()); Translation::Opcode opcode = static_cast<Translation::Opcode>(iterator->Next()); switch (opcode) { case Translation::BEGIN: case Translation::INTERPRETED_FRAME: case Translation::ARGUMENTS_ADAPTOR_FRAME: case Translation::CONSTRUCT_STUB_FRAME: case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: case Translation::BUILTIN_CONTINUATION_FRAME: case Translation::UPDATE_FEEDBACK: // Peeled off before getting here. break; case Translation::DUPLICATED_OBJECT: { int object_id = iterator->Next(); if (trace_file != nullptr) { PrintF(trace_file, "duplicated object #%d", object_id); } object_positions_.push_back(object_positions_[object_id]); TranslatedValue translated_value = TranslatedValue::NewDuplicateObject(this, object_id); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::ARGUMENTS_ELEMENTS: { CreateArgumentsType arguments_type = static_cast<CreateArgumentsType>(iterator->Next()); CreateArgumentsElementsTranslatedValues(frame_index, fp, arguments_type, trace_file); return 0; } case Translation::ARGUMENTS_LENGTH: { CreateArgumentsType arguments_type = static_cast<CreateArgumentsType>(iterator->Next()); int length; ComputeArgumentsPosition(fp, arguments_type, &length); if (trace_file != nullptr) { PrintF(trace_file, "arguments length field (type = %d, length = %d)", static_cast<uint8_t>(arguments_type), length); } frame.Add(TranslatedValue::NewInt32(this, length)); return 0; } case Translation::CAPTURED_OBJECT: { int field_count = iterator->Next(); int object_index = static_cast<int>(object_positions_.size()); if (trace_file != nullptr) { PrintF(trace_file, "captured object #%d (length = %d)", object_index, field_count); } object_positions_.push_back({frame_index, value_index}); TranslatedValue translated_value = TranslatedValue::NewDeferredObject(this, field_count, object_index); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::REGISTER: { int input_reg = iterator->Next(); if (registers == nullptr) { TranslatedValue translated_value = TranslatedValue::NewInvalid(this); frame.Add(translated_value); return translated_value.GetChildrenCount(); } intptr_t value = registers->GetRegister(input_reg); Address uncompressed_value = DecompressIfNeeded(value); if (trace_file != nullptr) { PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", uncompressed_value, converter.NameOfCPURegister(input_reg)); Object(uncompressed_value).ShortPrint(trace_file); } TranslatedValue translated_value = TranslatedValue::NewTagged(this, Object(uncompressed_value)); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::INT32_REGISTER: { int input_reg = iterator->Next(); if (registers == nullptr) { TranslatedValue translated_value = TranslatedValue::NewInvalid(this); frame.Add(translated_value); return translated_value.GetChildrenCount(); } intptr_t value = registers->GetRegister(input_reg); if (trace_file != nullptr) { PrintF(trace_file, "%" V8PRIdPTR " ; %s (int32)", value, converter.NameOfCPURegister(input_reg)); } TranslatedValue translated_value = TranslatedValue::NewInt32(this, static_cast<int32_t>(value)); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::INT64_REGISTER: { int input_reg = iterator->Next(); if (registers == nullptr) { TranslatedValue translated_value = TranslatedValue::NewInvalid(this); frame.Add(translated_value); return translated_value.GetChildrenCount(); } intptr_t value = registers->GetRegister(input_reg); if (trace_file != nullptr) { PrintF(trace_file, "%" V8PRIdPTR " ; %s (int64)", value, converter.NameOfCPURegister(input_reg)); } TranslatedValue translated_value = TranslatedValue::NewInt64(this, static_cast<int64_t>(value)); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::UINT32_REGISTER: { int input_reg = iterator->Next(); if (registers == nullptr) { TranslatedValue translated_value = TranslatedValue::NewInvalid(this); frame.Add(translated_value); return translated_value.GetChildrenCount(); } intptr_t value = registers->GetRegister(input_reg); if (trace_file != nullptr) { PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint32)", value, converter.NameOfCPURegister(input_reg)); } TranslatedValue translated_value = TranslatedValue::NewUInt32(this, static_cast<uint32_t>(value)); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::BOOL_REGISTER: { int input_reg = iterator->Next(); if (registers == nullptr) { TranslatedValue translated_value = TranslatedValue::NewInvalid(this); frame.Add(translated_value); return translated_value.GetChildrenCount(); } intptr_t value = registers->GetRegister(input_reg); if (trace_file != nullptr) { PrintF(trace_file, "%" V8PRIdPTR " ; %s (bool)", value, converter.NameOfCPURegister(input_reg)); } TranslatedValue translated_value = TranslatedValue::NewBool(this, static_cast<uint32_t>(value)); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::FLOAT_REGISTER: { int input_reg = iterator->Next(); if (registers == nullptr) { TranslatedValue translated_value = TranslatedValue::NewInvalid(this); frame.Add(translated_value); return translated_value.GetChildrenCount(); } Float32 value = registers->GetFloatRegister(input_reg); if (trace_file != nullptr) { PrintF(trace_file, "%e ; %s (float)", value.get_scalar(), RegisterName(FloatRegister::from_code(input_reg))); } TranslatedValue translated_value = TranslatedValue::NewFloat(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::DOUBLE_REGISTER: { int input_reg = iterator->Next(); if (registers == nullptr) { TranslatedValue translated_value = TranslatedValue::NewInvalid(this); frame.Add(translated_value); return translated_value.GetChildrenCount(); } Float64 value = registers->GetDoubleRegister(input_reg); if (trace_file != nullptr) { PrintF(trace_file, "%e ; %s (double)", value.get_scalar(), RegisterName(DoubleRegister::from_code(input_reg))); } TranslatedValue translated_value = TranslatedValue::NewDouble(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::STACK_SLOT: { int slot_offset = OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset)); Address uncompressed_value = DecompressIfNeeded(value); if (trace_file != nullptr) { PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ", uncompressed_value, slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); Object(uncompressed_value).ShortPrint(trace_file); } TranslatedValue translated_value = TranslatedValue::NewTagged(this, Object(uncompressed_value)); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::INT32_STACK_SLOT: { int slot_offset = OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); uint32_t value = GetUInt32Slot(fp, slot_offset); if (trace_file != nullptr) { PrintF(trace_file, "%d ; (int32) [fp %c %3d] ", static_cast<int32_t>(value), slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); } TranslatedValue translated_value = TranslatedValue::NewInt32(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::INT64_STACK_SLOT: { int slot_offset = OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); uint64_t value = GetUInt64Slot(fp, slot_offset); if (trace_file != nullptr) { PrintF(trace_file, "%" V8PRIdPTR " ; (int64) [fp %c %3d] ", static_cast<intptr_t>(value), slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); } TranslatedValue translated_value = TranslatedValue::NewInt64(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::UINT32_STACK_SLOT: { int slot_offset = OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); uint32_t value = GetUInt32Slot(fp, slot_offset); if (trace_file != nullptr) { PrintF(trace_file, "%u ; (uint32) [fp %c %3d] ", value, slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); } TranslatedValue translated_value = TranslatedValue::NewUInt32(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::BOOL_STACK_SLOT: { int slot_offset = OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); uint32_t value = GetUInt32Slot(fp, slot_offset); if (trace_file != nullptr) { PrintF(trace_file, "%u ; (bool) [fp %c %3d] ", value, slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); } TranslatedValue translated_value = TranslatedValue::NewBool(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::FLOAT_STACK_SLOT: { int slot_offset = OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); Float32 value = GetFloatSlot(fp, slot_offset); if (trace_file != nullptr) { PrintF(trace_file, "%e ; (float) [fp %c %3d] ", value.get_scalar(), slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); } TranslatedValue translated_value = TranslatedValue::NewFloat(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::DOUBLE_STACK_SLOT: { int slot_offset = OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); Float64 value = GetDoubleSlot(fp, slot_offset); if (trace_file != nullptr) { PrintF(trace_file, "%e ; (double) [fp %c %d] ", value.get_scalar(), slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); } TranslatedValue translated_value = TranslatedValue::NewDouble(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } case Translation::LITERAL: { int literal_index = iterator->Next(); Object value = literal_array.get(literal_index); if (trace_file != nullptr) { PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value.ptr(), literal_index); value.ShortPrint(trace_file); } TranslatedValue translated_value = TranslatedValue::NewTagged(this, value); frame.Add(translated_value); return translated_value.GetChildrenCount(); } } FATAL("We should never get here - unexpected deopt info."); } Address TranslatedState::DecompressIfNeeded(intptr_t value) { if (COMPRESS_POINTERS_BOOL) { return DecompressTaggedAny(isolate()->isolate_root(), static_cast<uint32_t>(value)); } else { return value; } } TranslatedState::TranslatedState(const JavaScriptFrame* frame) { int deopt_index = Safepoint::kNoDeoptimizationIndex; DeoptimizationData data = static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData( &deopt_index); DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex); TranslationIterator it(data.TranslationByteArray(), data.TranslationIndex(deopt_index).value()); Init(frame->isolate(), frame->fp(), &it, data.LiteralArray(), nullptr /* registers */, nullptr /* trace file */, frame->function().shared().internal_formal_parameter_count()); } void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer, TranslationIterator* iterator, FixedArray literal_array, RegisterValues* registers, FILE* trace_file, int formal_parameter_count) { DCHECK(frames_.empty()); formal_parameter_count_ = formal_parameter_count; isolate_ = isolate; // Read out the 'header' translation. Translation::Opcode opcode = static_cast<Translation::Opcode>(iterator->Next()); CHECK(opcode == Translation::BEGIN); int count = iterator->Next(); frames_.reserve(count); iterator->Next(); // Drop JS frames count. int update_feedback_count = iterator->Next(); CHECK_GE(update_feedback_count, 0); CHECK_LE(update_feedback_count, 1); if (update_feedback_count == 1) { ReadUpdateFeedback(iterator, literal_array, trace_file); } std::stack<int> nested_counts; // Read the frames for (int frame_index = 0; frame_index < count; frame_index++) { // Read the frame descriptor. frames_.push_back(CreateNextTranslatedFrame( iterator, literal_array, input_frame_pointer, trace_file)); TranslatedFrame& frame = frames_.back(); // Read the values. int values_to_process = frame.GetValueCount(); while (values_to_process > 0 || !nested_counts.empty()) { if (trace_file != nullptr) { if (nested_counts.empty()) { // For top level values, print the value number. PrintF(trace_file, " %3i: ", frame.GetValueCount() - values_to_process); } else { // Take care of indenting for nested values. PrintF(trace_file, " "); for (size_t j = 0; j < nested_counts.size(); j++) { PrintF(trace_file, " "); } } } int nested_count = CreateNextTranslatedValue(frame_index, iterator, literal_array, input_frame_pointer, registers, trace_file); if (trace_file != nullptr) { PrintF(trace_file, "\n"); } // Update the value count and resolve the nesting. values_to_process--; if (nested_count > 0) { nested_counts.push(values_to_process); values_to_process = nested_count; } else { while (values_to_process == 0 && !nested_counts.empty()) { values_to_process = nested_counts.top(); nested_counts.pop(); } } } } CHECK(!iterator->HasNext() || static_cast<Translation::Opcode>( iterator->Next()) == Translation::BEGIN); } void TranslatedState::Prepare(Address stack_frame_pointer) { for (auto& frame : frames_) frame.Handlify(); if (!feedback_vector_.is_null()) { feedback_vector_handle_ = Handle<FeedbackVector>(feedback_vector_, isolate()); feedback_vector_ = FeedbackVector(); } stack_frame_pointer_ = stack_frame_pointer; UpdateFromPreviouslyMaterializedObjects(); } TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) { CHECK_LT(static_cast<size_t>(object_index), object_positions_.size()); TranslatedState::ObjectPosition pos = object_positions_[object_index]; return &(frames_[pos.frame_index_].values_[pos.value_index_]); } Handle<Object> TranslatedState::InitializeObjectAt(TranslatedValue* slot) { slot = ResolveCapturedObject(slot); DisallowHeapAllocation no_allocation; if (slot->materialization_state() != TranslatedValue::kFinished) { std::stack<int> worklist; worklist.push(slot->object_index()); slot->mark_finished(); while (!worklist.empty()) { int index = worklist.top(); worklist.pop(); InitializeCapturedObjectAt(index, &worklist, no_allocation); } } return slot->GetStorage(); } void TranslatedState::InitializeCapturedObjectAt( int object_index, std::stack<int>* worklist, const DisallowHeapAllocation& no_allocation) { CHECK_LT(static_cast<size_t>(object_index), object_positions_.size()); TranslatedState::ObjectPosition pos = object_positions_[object_index]; int value_index = pos.value_index_; TranslatedFrame* frame = &(frames_[pos.frame_index_]); TranslatedValue* slot = &(frame->values_[value_index]); value_index++; CHECK_EQ(TranslatedValue::kFinished, slot->materialization_state()); CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); // Ensure all fields are initialized. int children_init_index = value_index; for (int i = 0; i < slot->GetChildrenCount(); i++) { // If the field is an object that has not been initialized yet, queue it // for initialization (and mark it as such). TranslatedValue* child_slot = frame->ValueAt(children_init_index); if (child_slot->kind() == TranslatedValue::kCapturedObject || child_slot->kind() == TranslatedValue::kDuplicatedObject) { child_slot = ResolveCapturedObject(child_slot); if (child_slot->materialization_state() != TranslatedValue::kFinished) { DCHECK_EQ(TranslatedValue::kAllocated, child_slot->materialization_state()); worklist->push(child_slot->object_index()); child_slot->mark_finished(); } } SkipSlots(1, frame, &children_init_index); } // Read the map. // The map should never be materialized, so let us check we already have // an existing object here. CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged); Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue()); CHECK(map->IsMap()); value_index++; // Handle the special cases. switch (map->instance_type()) { case HEAP_NUMBER_TYPE: case FIXED_DOUBLE_ARRAY_TYPE: return; case FIXED_ARRAY_TYPE: case AWAIT_CONTEXT_TYPE: case BLOCK_CONTEXT_TYPE: case CATCH_CONTEXT_TYPE: case DEBUG_EVALUATE_CONTEXT_TYPE: case EVAL_CONTEXT_TYPE: case FUNCTION_CONTEXT_TYPE: case MODULE_CONTEXT_TYPE: case NATIVE_CONTEXT_TYPE: case SCRIPT_CONTEXT_TYPE: case WITH_CONTEXT_TYPE: case OBJECT_BOILERPLATE_DESCRIPTION_TYPE: case HASH_TABLE_TYPE: case ORDERED_HASH_MAP_TYPE: case ORDERED_HASH_SET_TYPE: case NAME_DICTIONARY_TYPE: case GLOBAL_DICTIONARY_TYPE: case NUMBER_DICTIONARY_TYPE: case SIMPLE_NUMBER_DICTIONARY_TYPE: case STRING_TABLE_TYPE: case PROPERTY_ARRAY_TYPE: case SCRIPT_CONTEXT_TABLE_TYPE: InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map, no_allocation); break; default: CHECK(map->IsJSObjectMap()); InitializeJSObjectAt(frame, &value_index, slot, map, no_allocation); break; } CHECK_EQ(value_index, children_init_index); } void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) { slot = ResolveCapturedObject(slot); if (slot->materialization_state() == TranslatedValue::kUninitialized) { std::stack<int> worklist; worklist.push(slot->object_index()); slot->mark_allocated(); while (!worklist.empty()) { int index = worklist.top(); worklist.pop(); EnsureCapturedObjectAllocatedAt(index, &worklist); } } } void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame, int* value_index, TranslatedValue* slot, Handle<Map> map) { int length = Smi::cast(frame->values_[*value_index].GetRawValue()).value(); (*value_index)++; Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast( isolate()->factory()->NewFixedDoubleArray(length)); CHECK_GT(length, 0); for (int i = 0; i < length; i++) { CHECK_NE(TranslatedValue::kCapturedObject, frame->values_[*value_index].kind()); Handle<Object> value = frame->values_[*value_index].GetValue(); if (value->IsNumber()) { array->set(i, value->Number()); } else { CHECK(value.is_identical_to(isolate()->factory()->the_hole_value())); array->set_the_hole(isolate(), i); } (*value_index)++; } slot->set_storage(array); } void TranslatedState::MaterializeHeapNumber(TranslatedFrame* frame, int* value_index, TranslatedValue* slot) { CHECK_NE(TranslatedValue::kCapturedObject, frame->values_[*value_index].kind()); Handle<Object> value = frame->values_[*value_index].GetValue(); CHECK(value->IsNumber()); Handle<HeapNumber> box = isolate()->factory()->NewHeapNumber(value->Number()); (*value_index)++; slot->set_storage(box); } namespace { enum DoubleStorageKind : uint8_t { kStoreTagged, kStoreUnboxedDouble, kStoreMutableHeapNumber, }; } // namespace void TranslatedState::SkipSlots(int slots_to_skip, TranslatedFrame* frame, int* value_index) { while (slots_to_skip > 0) { TranslatedValue* slot = &(frame->values_[*value_index]); (*value_index)++; slots_to_skip--; if (slot->kind() == TranslatedValue::kCapturedObject) { slots_to_skip += slot->GetChildrenCount(); } } } void TranslatedState::EnsureCapturedObjectAllocatedAt( int object_index, std::stack<int>* worklist) { CHECK_LT(static_cast<size_t>(object_index), object_positions_.size()); TranslatedState::ObjectPosition pos = object_positions_[object_index]; int value_index = pos.value_index_; TranslatedFrame* frame = &(frames_[pos.frame_index_]); TranslatedValue* slot = &(frame->values_[value_index]); value_index++; CHECK_EQ(TranslatedValue::kAllocated, slot->materialization_state()); CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); // Read the map. // The map should never be materialized, so let us check we already have // an existing object here. CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged); Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue()); CHECK(map->IsMap()); value_index++; // Handle the special cases. switch (map->instance_type()) { case FIXED_DOUBLE_ARRAY_TYPE: // Materialize (i.e. allocate&initialize) the array and return since // there is no need to process the children. return MaterializeFixedDoubleArray(frame, &value_index, slot, map); case HEAP_NUMBER_TYPE: // Materialize (i.e. allocate&initialize) the heap number and return. // There is no need to process the children. return MaterializeHeapNumber(frame, &value_index, slot); case FIXED_ARRAY_TYPE: case SCRIPT_CONTEXT_TABLE_TYPE: case AWAIT_CONTEXT_TYPE: case BLOCK_CONTEXT_TYPE: case CATCH_CONTEXT_TYPE: case DEBUG_EVALUATE_CONTEXT_TYPE: case EVAL_CONTEXT_TYPE: case FUNCTION_CONTEXT_TYPE: case MODULE_CONTEXT_TYPE: case NATIVE_CONTEXT_TYPE: case SCRIPT_CONTEXT_TYPE: case WITH_CONTEXT_TYPE: case HASH_TABLE_TYPE: case ORDERED_HASH_MAP_TYPE: case ORDERED_HASH_SET_TYPE: case NAME_DICTIONARY_TYPE: case GLOBAL_DICTIONARY_TYPE: case NUMBER_DICTIONARY_TYPE: case SIMPLE_NUMBER_DICTIONARY_TYPE: case STRING_TABLE_TYPE: { // Check we have the right size. int array_length = Smi::cast(frame->values_[value_index].GetRawValue()).value(); int instance_size = FixedArray::SizeFor(array_length); CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize); // Canonicalize empty fixed array. if (*map == ReadOnlyRoots(isolate()).empty_fixed_array().map() && array_length == 0) { slot->set_storage(isolate()->factory()->empty_fixed_array()); } else { slot->set_storage(AllocateStorageFor(slot)); } // Make sure all the remaining children (after the map) are allocated. return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame, &value_index, worklist); } case PROPERTY_ARRAY_TYPE: { // Check we have the right size. int length_or_hash = Smi::cast(frame->values_[value_index].GetRawValue()).value(); int array_length = PropertyArray::LengthField::decode(length_or_hash); int instance_size = PropertyArray::SizeFor(array_length); CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize); slot->set_storage(AllocateStorageFor(slot)); // Make sure all the remaining children (after the map) are allocated. return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame, &value_index, worklist); } default: CHECK(map->IsJSObjectMap()); EnsureJSObjectAllocated(slot, map); TranslatedValue* properties_slot = &(frame->values_[value_index]); value_index++; if (properties_slot->kind() == TranslatedValue::kCapturedObject) { // If we are materializing the property array, make sure we put // the mutable heap numbers at the right places. EnsurePropertiesAllocatedAndMarked(properties_slot, map); EnsureChildrenAllocated(properties_slot->GetChildrenCount(), frame, &value_index, worklist); } // Make sure all the remaining children (after the map and properties) are // allocated. return EnsureChildrenAllocated(slot->GetChildrenCount() - 2, frame, &value_index, worklist); } UNREACHABLE(); } void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame, int* value_index, std::stack<int>* worklist) { // Ensure all children are allocated. for (int i = 0; i < count; i++) { // If the field is an object that has not been allocated yet, queue it // for initialization (and mark it as such). TranslatedValue* child_slot = frame->ValueAt(*value_index); if (child_slot->kind() == TranslatedValue::kCapturedObject || child_slot->kind() == TranslatedValue::kDuplicatedObject) { child_slot = ResolveCapturedObject(child_slot); if (child_slot->materialization_state() == TranslatedValue::kUninitialized) { worklist->push(child_slot->object_index()); child_slot->mark_allocated(); } } else { // Make sure the simple values (heap numbers, etc.) are properly // initialized. child_slot->MaterializeSimple(); } SkipSlots(1, frame, value_index); } } void TranslatedState::EnsurePropertiesAllocatedAndMarked( TranslatedValue* properties_slot, Handle<Map> map) { CHECK_EQ(TranslatedValue::kUninitialized, properties_slot->materialization_state()); Handle<ByteArray> object_storage = AllocateStorageFor(properties_slot); properties_slot->mark_allocated(); properties_slot->set_storage(object_storage); // Set markers for the double properties. Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); int field_count = map->NumberOfOwnDescriptors(); for (int i = 0; i < field_count; i++) { FieldIndex index = FieldIndex::ForDescriptor(*map, i); if (descriptors->GetDetails(i).representation().IsDouble() && !index.is_inobject()) { CHECK(!map->IsUnboxedDoubleField(index)); int outobject_index = index.outobject_array_index(); int array_index = outobject_index * kTaggedSize; object_storage->set(array_index, kStoreMutableHeapNumber); } } } Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) { int allocate_size = ByteArray::LengthFor(slot->GetChildrenCount() * kTaggedSize); // It is important to allocate all the objects tenured so that the marker // does not visit them. Handle<ByteArray> object_storage = isolate()->factory()->NewByteArray(allocate_size, AllocationType::kOld); for (int i = 0; i < object_storage->length(); i++) { object_storage->set(i, kStoreTagged); } return object_storage; } void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot, Handle<Map> map) { CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kTaggedSize); Handle<ByteArray> object_storage = AllocateStorageFor(slot); // Now we handle the interesting (JSObject) case. Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); int field_count = map->NumberOfOwnDescriptors(); // Set markers for the double properties. for (int i = 0; i < field_count; i++) { FieldIndex index = FieldIndex::ForDescriptor(*map, i); if (descriptors->GetDetails(i).representation().IsDouble() && index.is_inobject()) { CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize); int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize; uint8_t marker = map->IsUnboxedDoubleField(index) ? kStoreUnboxedDouble : kStoreMutableHeapNumber; object_storage->set(array_index, marker); } } slot->set_storage(object_storage); } Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame, int* value_index) { TranslatedValue* slot = frame->ValueAt(*value_index); SkipSlots(1, frame, value_index); if (slot->kind() == TranslatedValue::kDuplicatedObject) { slot = ResolveCapturedObject(slot); } CHECK_NE(TranslatedValue::kUninitialized, slot->materialization_state()); return slot->GetStorage(); } void TranslatedState::InitializeJSObjectAt( TranslatedFrame* frame, int* value_index, TranslatedValue* slot, Handle<Map> map, const DisallowHeapAllocation& no_allocation) { Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_); DCHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); // The object should have at least a map and some payload. CHECK_GE(slot->GetChildrenCount(), 2); // Notify the concurrent marker about the layout change. isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation); // Fill the property array field. { Handle<Object> properties = GetValueAndAdvance(frame, value_index); WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset, *properties); WRITE_BARRIER(*object_storage, JSObject::kPropertiesOrHashOffset, *properties); } // For all the other fields we first look at the fixed array and check the // marker to see if we store an unboxed double. DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset); for (int i = 2; i < slot->GetChildrenCount(); i++) { // Initialize and extract the value from its slot. Handle<Object> field_value = GetValueAndAdvance(frame, value_index); // Read out the marker and ensure the field is consistent with // what the markers in the storage say (note that all heap numbers // should be fully initialized by now). int offset = i * kTaggedSize; uint8_t marker = object_storage->ReadField<uint8_t>(offset); if (marker == kStoreUnboxedDouble) { double double_field_value; if (field_value->IsSmi()) { double_field_value = Smi::cast(*field_value).value(); } else { CHECK(field_value->IsHeapNumber()); double_field_value = HeapNumber::cast(*field_value).value(); } object_storage->WriteField<double>(offset, double_field_value); } else if (marker == kStoreMutableHeapNumber) { CHECK(field_value->IsHeapNumber()); WRITE_FIELD(*object_storage, offset, *field_value); WRITE_BARRIER(*object_storage, offset, *field_value); } else { CHECK_EQ(kStoreTagged, marker); WRITE_FIELD(*object_storage, offset, *field_value); WRITE_BARRIER(*object_storage, offset, *field_value); } } object_storage->synchronized_set_map(*map); } void TranslatedState::InitializeObjectWithTaggedFieldsAt( TranslatedFrame* frame, int* value_index, TranslatedValue* slot, Handle<Map> map, const DisallowHeapAllocation& no_allocation) { Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_); // Skip the writes if we already have the canonical empty fixed array. if (*object_storage == ReadOnlyRoots(isolate()).empty_fixed_array()) { CHECK_EQ(2, slot->GetChildrenCount()); Handle<Object> length_value = GetValueAndAdvance(frame, value_index); CHECK_EQ(*length_value, Smi::FromInt(0)); return; } // Notify the concurrent marker about the layout change. isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation); // Write the fields to the object. for (int i = 1; i < slot->GetChildrenCount(); i++) { Handle<Object> field_value = GetValueAndAdvance(frame, value_index); int offset = i * kTaggedSize; uint8_t marker = object_storage->ReadField<uint8_t>(offset); if (i > 1 && marker == kStoreMutableHeapNumber) { CHECK(field_value->IsHeapNumber()); } else { CHECK(marker == kStoreTagged || i == 1); } WRITE_FIELD(*object_storage, offset, *field_value); WRITE_BARRIER(*object_storage, offset, *field_value); } object_storage->synchronized_set_map(*map); } TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) { while (slot->kind() == TranslatedValue::kDuplicatedObject) { slot = GetValueByObjectIndex(slot->object_index()); } CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); return slot; } TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) { for (size_t i = 0; i < frames_.size(); i++) { if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction || frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation || frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) { if (jsframe_index > 0) { jsframe_index--; } else { return &(frames_[i]); } } } return nullptr; } TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex( int jsframe_index, int* args_count) { for (size_t i = 0; i < frames_.size(); i++) { if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction || frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation || frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) { if (jsframe_index > 0) { jsframe_index--; } else { // We have the JS function frame, now check if it has arguments // adaptor. if (i > 0 && frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) { *args_count = frames_[i - 1].height(); return &(frames_[i - 1]); } // JavaScriptBuiltinContinuation frames that are not preceeded by // a arguments adapter frame are currently only used by C++ API calls // from TurboFan. Calls to C++ API functions from TurboFan need // a special marker frame state, otherwise the API call wouldn't // be shown in a stack trace. if (frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation && frames_[i].shared_info()->internal_formal_parameter_count() == SharedFunctionInfo::kDontAdaptArgumentsSentinel) { DCHECK(frames_[i].shared_info()->IsApiFunction()); // The argument count for this special case is always the second // to last value in the TranslatedFrame. It should also always be // {1}, as the GenericLazyDeoptContinuation builtin only has one // argument (the receiver). static constexpr int kTheContext = 1; const int height = frames_[i].height() + kTheContext; Object argc_object = frames_[i].ValueAt(height - 1)->GetRawValue(); CHECK(argc_object.IsSmi()); *args_count = Smi::ToInt(argc_object); DCHECK_EQ(*args_count, 1); } else { *args_count = InternalFormalParameterCountWithReceiver( *frames_[i].shared_info()); } return &(frames_[i]); } } } return nullptr; } void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) { MaterializedObjectStore* materialized_store = isolate_->materialized_object_store(); Handle<FixedArray> previously_materialized_objects = materialized_store->Get(stack_frame_pointer_); Handle<Object> marker = isolate_->factory()->arguments_marker(); int length = static_cast<int>(object_positions_.size()); bool new_store = false; if (previously_materialized_objects.is_null()) { previously_materialized_objects = isolate_->factory()->NewFixedArray(length, AllocationType::kOld); for (int i = 0; i < length; i++) { previously_materialized_objects->set(i, *marker); } new_store = true; } CHECK_EQ(length, previously_materialized_objects->length()); bool value_changed = false; for (int i = 0; i < length; i++) { TranslatedState::ObjectPosition pos = object_positions_[i]; TranslatedValue* value_info = &(frames_[pos.frame_index_].values_[pos.value_index_]); CHECK(value_info->IsMaterializedObject()); // Skip duplicate objects (i.e., those that point to some // other object id). if (value_info->object_index() != i) continue; Handle<Object> value(value_info->GetRawValue(), isolate_); if (!value.is_identical_to(marker)) { if (previously_materialized_objects->get(i) == *marker) { previously_materialized_objects->set(i, *value); value_changed = true; } else { CHECK(previously_materialized_objects->get(i) == *value); } } } if (new_store && value_changed) { materialized_store->Set(stack_frame_pointer_, previously_materialized_objects); CHECK_EQ(frames_[0].kind(), TranslatedFrame::kInterpretedFunction); CHECK_EQ(frame->function(), frames_[0].front().GetRawValue()); Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode()); } } void TranslatedState::UpdateFromPreviouslyMaterializedObjects() { MaterializedObjectStore* materialized_store = isolate_->materialized_object_store(); Handle<FixedArray> previously_materialized_objects = materialized_store->Get(stack_frame_pointer_); // If we have no previously materialized objects, there is nothing to do. if (previously_materialized_objects.is_null()) return; Handle<Object> marker = isolate_->factory()->arguments_marker(); int length = static_cast<int>(object_positions_.size()); CHECK_EQ(length, previously_materialized_objects->length()); for (int i = 0; i < length; i++) { // For a previously materialized objects, inject their value into the // translated values. if (previously_materialized_objects->get(i) != *marker) { TranslatedState::ObjectPosition pos = object_positions_[i]; TranslatedValue* value_info = &(frames_[pos.frame_index_].values_[pos.value_index_]); CHECK(value_info->IsMaterializedObject()); if (value_info->kind() == TranslatedValue::kCapturedObject) { value_info->set_initialized_storage( Handle<Object>(previously_materialized_objects->get(i), isolate_)); } } } } void TranslatedState::VerifyMaterializedObjects() { #if VERIFY_HEAP int length = static_cast<int>(object_positions_.size()); for (int i = 0; i < length; i++) { TranslatedValue* slot = GetValueByObjectIndex(i); if (slot->kind() == TranslatedValue::kCapturedObject) { CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index())); if (slot->materialization_state() == TranslatedValue::kFinished) { slot->GetStorage()->ObjectVerify(isolate()); } else { CHECK_EQ(slot->materialization_state(), TranslatedValue::kUninitialized); } } } #endif } bool TranslatedState::DoUpdateFeedback() { if (!feedback_vector_handle_.is_null()) { CHECK(!feedback_slot_.IsInvalid()); isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation); FeedbackNexus nexus(feedback_vector_handle_, feedback_slot_); nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation); return true; } return false; } void TranslatedState::ReadUpdateFeedback(TranslationIterator* iterator, FixedArray literal_array, FILE* trace_file) { CHECK_EQ(Translation::UPDATE_FEEDBACK, iterator->Next()); feedback_vector_ = FeedbackVector::cast(literal_array.get(iterator->Next())); feedback_slot_ = FeedbackSlot(iterator->Next()); if (trace_file != nullptr) { PrintF(trace_file, " reading FeedbackVector (slot %d)\n", feedback_slot_.ToInt()); } } } // namespace internal } // namespace v8 // Undefine the heap manipulation macros. #include "src/objects/object-macros-undef.h"
37.809726
80
0.687509
hamzahamidi
4f9f8c54ef1eba803a4e284055d11294a4990743
4,414
cpp
C++
llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
smit-hinsu/iree
a385d311b701cdc06cb825000ddb34c8a11c6eef
[ "Apache-2.0" ]
1
2022-02-13T15:27:08.000Z
2022-02-13T15:27:08.000Z
llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
iree-github-actions-bot/iree
9982f10090527a1a86cd280b4beff9a579b96b38
[ "Apache-2.0" ]
null
null
null
llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
iree-github-actions-bot/iree
9982f10090527a1a86cd280b4beff9a579b96b38
[ "Apache-2.0" ]
null
null
null
// Copyright 2021 The IREE Authors // // Licensed under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtDialect.h" #include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h" #include "iree-dialects/Dialect/LinalgExt/Transforms/PassDetail.h" #include "iree-dialects/Dialect/LinalgExt/Transforms/Passes.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "mlir/Dialect/Linalg/IR/Linalg.h" #include "mlir/Dialect/Math/IR/Math.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/SCF.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" using namespace mlir; namespace IREE = mlir::iree_compiler::IREE; using namespace IREE::LinalgExt; /// Recursive method that lowers one dimension of the `TiledOpInterface` to /// scalar loops at a time. static LogicalResult lowerToLoopsImpl(OpBuilder &builder, TiledOpInterface tilableOp, ArrayRef<Range> loopRanges, unsigned loopDepth, SmallVectorImpl<Value> &ivs) { Location loc = tilableOp.getLoc(); if (loopDepth == loopRanges.size()) { return tilableOp.generateScalarImplementation(builder, loc, ivs); } LogicalResult status = success(); builder.create<scf::ForOp>( loc, loopRanges[loopDepth].offset, loopRanges[loopDepth].size, loopRanges[loopDepth].stride, ValueRange{}, [&](OpBuilder &b, Location loc, Value iv, ValueRange args) { ivs.push_back(iv); status = lowerToLoopsImpl(b, tilableOp, loopRanges, loopDepth + 1, ivs); b.create<scf::YieldOp>(loc); }); return status; } /// Main entry point for lowering `TiledOpInterface` op to loops. static LogicalResult lowerToLoops(OpBuilder &builder, TiledOpInterface tilableOp) { SmallVector<Range> loopBounds = tilableOp.getIterationDomain(builder); SmallVector<Value> ivs; return lowerToLoopsImpl(builder, tilableOp, loopBounds, 0, ivs); } /// Pattern rewriter hook to lower a `TiledOpInterface` to loops. namespace { struct TiledOpInterfaceLowerToLoopsPattern : public RewritePattern { TiledOpInterfaceLowerToLoopsPattern(MLIRContext *context, PatternBenefit benefit = 1) : RewritePattern(MatchAnyOpTypeTag(), benefit, context) {} LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { auto tilableOp = dyn_cast<TiledOpInterface>(op); if (!tilableOp) { return failure(); } if (llvm::any_of(tilableOp->getResults(), [&](Value v) { return v.getType().isa<ShapedType>(); })) { return rewriter.notifyMatchFailure( tilableOp, "lower to loops needs to have tensor semantics"); } if (failed(lowerToLoops(rewriter, tilableOp))) { return failure(); } rewriter.eraseOp(op); return success(); } }; } // namespace //===----------------------------------------------------------------------===// // Pass //===----------------------------------------------------------------------===// namespace { struct LinalgExtToLoopsPass : public LinalgExtToLoopsBase<LinalgExtToLoopsPass> { void getDependentDialects(DialectRegistry &registry) const override { registry.insert<linalg::LinalgDialect, StandardOpsDialect, mlir::arith::ArithmeticDialect, math::MathDialect, memref::MemRefDialect, scf::SCFDialect>(); } void runOnOperation() override { MLIRContext *context = &getContext(); RewritePatternSet patterns(context); patterns.insert<TiledOpInterfaceLowerToLoopsPattern>(context); if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) { return signalPassFailure(); } } }; } // namespace std::unique_ptr<OperationPass<FuncOp>> IREE::LinalgExt::createLinalgExtToLoopsPass() { return std::make_unique<LinalgExtToLoopsPass>(); }
38.051724
80
0.654282
smit-hinsu
4fa25ae6059c19eaf59014eee5b9c58e7cae2cb2
547
hh
C++
SimpleProxy/dispatcher/ibusiness_event.hh
svcx8/SimpleProxy
d8617ecfa64a12e1613108265f4c6c0b59627422
[ "MIT" ]
null
null
null
SimpleProxy/dispatcher/ibusiness_event.hh
svcx8/SimpleProxy
d8617ecfa64a12e1613108265f4c6c0b59627422
[ "MIT" ]
null
null
null
SimpleProxy/dispatcher/ibusiness_event.hh
svcx8/SimpleProxy
d8617ecfa64a12e1613108265f4c6c0b59627422
[ "MIT" ]
null
null
null
#ifndef IBUSINESS_EVENT_HEADER #define IBUSINESS_EVENT_HEADER #include <cstdint> #include <absl/status/status.h> class IPoller; class IBusinessEvent { public: virtual ~IBusinessEvent() {} virtual absl::Status OnAcceptable(int) { return absl::OkStatus(); }; virtual absl::Status OnCloseable(int) { return absl::OkStatus(); }; virtual absl::Status OnReadable(int) { return absl::OkStatus(); }; virtual absl::Status OnWritable(int) { return absl::OkStatus(); }; IPoller* poller_ = nullptr; }; #endif // ibusiness_event.hh
27.35
72
0.71298
svcx8
4fa2cc3c058b167a62eef396e859fc793a96653f
603
cpp
C++
1_Games/[C++] Chess/chess/KillerList.cpp
Team-on/works
16978b61c0d6bcb37e910efb4b5b80e9a2460230
[ "MIT" ]
10
2018-11-12T19:43:28.000Z
2020-09-09T18:48:30.000Z
1_Games/[C++] Chess/chess/KillerList.cpp
Team-on/works
16978b61c0d6bcb37e910efb4b5b80e9a2460230
[ "MIT" ]
null
null
null
1_Games/[C++] Chess/chess/KillerList.cpp
Team-on/works
16978b61c0d6bcb37e910efb4b5b80e9a2460230
[ "MIT" ]
null
null
null
#include "precompiledHeaders.h" #include "KillerList.h" KillerList::KillerList(){ killers = new figureBasic*[16]; killerSize = 0; } KillerList::KillerList(const KillerList &rhs) { killerSize = rhs.killerSize; killers = new figureBasic*[16]; for (char i = 0; i < killerSize; ++i) killers[i] = rhs.killers[i]; } KillerList::~KillerList(){ delete[] killers; } void KillerList::AddKiller(figureBasic *fig) { killers[killerSize++] = fig; } void KillerList::operator=(const KillerList &rhs) { killerSize = rhs.killerSize; for (char i = 0; i < killerSize; ++i) killers[i] = rhs.killers[i]; }
20.793103
51
0.688226
Team-on
4fa39bebec315f4c3905e866dfd5dd6f499c793d
4,428
cc
C++
plugins/kyotocabinet/nb_db_kyotocabinet.cc
rtsisyk/mininb
959aa346f5294ee7fa0afb8e4e8b306a65afb8bd
[ "BSD-2-Clause" ]
6
2015-07-01T13:12:58.000Z
2016-03-28T05:15:46.000Z
plugins/kyotocabinet/nb_db_kyotocabinet.cc
rtsisyk/mininb
959aa346f5294ee7fa0afb8e4e8b306a65afb8bd
[ "BSD-2-Clause" ]
1
2015-07-01T13:16:39.000Z
2015-07-01T13:16:39.000Z
plugins/kyotocabinet/nb_db_kyotocabinet.cc
rtsisyk/mininb
959aa346f5294ee7fa0afb8e4e8b306a65afb8bd
[ "BSD-2-Clause" ]
null
null
null
/* * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "../../nb_plugin_api.h" #include <stdlib.h> #include <string.h> #include <stdio.h> #include <assert.h> #include <sys/stat.h> #include <sys/types.h> #include <kcpolydb.h> struct nb_db_kyotocabinet { struct nb_db base; kyotocabinet::TreeDB instance; }; static struct nb_db * nb_db_kyotocabinet_open(const struct nb_db_opts *opts) { struct nb_db_kyotocabinet *kyotocabinet = new struct nb_db_kyotocabinet(); assert (kyotocabinet != NULL); int r; r = mkdir(opts->path, 0777); if (r != 0 && errno != EEXIST) { fprintf(stderr, "mkdir: %d\n", r); return NULL; } char path[FILENAME_MAX]; snprintf(path, FILENAME_MAX - 4, "%s/db", opts->path); path[FILENAME_MAX - 1] = 0; int open_options = kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE; int tune_options = kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR; kyotocabinet->instance.tune_options(tune_options); //kyotocabinet->instance.tune_page(1024); if (!kyotocabinet->instance.open(path, open_options)) { fprintf(stderr, "db->open failed: %s\n", kyotocabinet->instance.error().name()); goto error_2; } kyotocabinet->base.opts = opts; return &kyotocabinet->base; error_2: delete kyotocabinet; return NULL; } static void nb_db_kyotocabinet_close(struct nb_db *db) { struct nb_db_kyotocabinet *kyotocabinet = (struct nb_db_kyotocabinet *) db; if (!kyotocabinet->instance.close()) { fprintf(stderr, "db->close failed: %s\n", kyotocabinet->instance.error().name()); } delete kyotocabinet; } static int nb_db_kyotocabinet_replace(struct nb_db *db, const void *key, size_t key_len, const void *val, size_t val_len) { struct nb_db_kyotocabinet *kc = (struct nb_db_kyotocabinet *) db; if (!kc->instance.set((const char *) key, key_len, (const char *) val, val_len)) { fprintf(stderr, "db->set() failed\n"); return -1; } return 0; } static int nb_db_kyotocabinet_remove(struct nb_db *db, const void *key, size_t key_len) { struct nb_db_kyotocabinet *kc = (struct nb_db_kyotocabinet *) db; if (!kc->instance.remove((const char *) key, key_len)) { fprintf(stderr, "db->remove() failed\n"); return -1; } return 0; } static int nb_db_kyotocabinet_select(struct nb_db *db, const void *key, size_t key_len, void **pval, size_t *pval_len) { struct nb_db_kyotocabinet *kc = (struct nb_db_kyotocabinet *) db; assert (pval == NULL); (void) pval; (void) pval_len; if (!kc->instance.get((const char *) key, key_len, NULL, 0)) { fprintf(stderr, "db->select() failed\n"); return -1; } return 0; } static void nb_db_kyotocabinet_valfree(struct nb_db *db, void *val) { (void) db; free(val); } static struct nb_db_if plugin = { .name = "kyotocabinet", .open = nb_db_kyotocabinet_open, .close = nb_db_kyotocabinet_close, .replace = nb_db_kyotocabinet_replace, .remove = nb_db_kyotocabinet_remove, .select = nb_db_kyotocabinet_select, .valfree = nb_db_kyotocabinet_valfree, }; extern "C" NB_DB_PLUGIN const struct nb_db_if * nb_db_kyotocabinet_plugin(void) { return &plugin; }
25.894737
77
0.712963
rtsisyk
4fa3e2e008182a61975450f3953dca6f99a7ee42
30,213
cpp
C++
level_zero/api/core/ze_core_loader.cpp
mattcarter2017/compute-runtime
1f52802aac02c78c19d5493dd3a2402830bbe438
[ "Intel", "MIT" ]
null
null
null
level_zero/api/core/ze_core_loader.cpp
mattcarter2017/compute-runtime
1f52802aac02c78c19d5493dd3a2402830bbe438
[ "Intel", "MIT" ]
null
null
null
level_zero/api/core/ze_core_loader.cpp
mattcarter2017/compute-runtime
1f52802aac02c78c19d5493dd3a2402830bbe438
[ "Intel", "MIT" ]
null
null
null
/* * Copyright (C) 2020-2022 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include "level_zero/experimental/source/tracing/tracing_imp.h" #include "level_zero/source/inc/ze_intel_gpu.h" #include <level_zero/ze_api.h> #include <level_zero/ze_ddi.h> #include <level_zero/zet_api.h> #include <level_zero/zet_ddi.h> #include "ze_ddi_tables.h" ze_gpu_driver_dditable_t driver_ddiTable; ZE_APIEXPORT ze_result_t ZE_APICALL zeGetDriverProcAddrTable( ze_api_version_t version, ze_driver_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnGet = zeDriverGet; pDdiTable->pfnGetApiVersion = zeDriverGetApiVersion; pDdiTable->pfnGetProperties = zeDriverGetProperties; pDdiTable->pfnGetIpcProperties = zeDriverGetIpcProperties; pDdiTable->pfnGetExtensionProperties = zeDriverGetExtensionProperties; pDdiTable->pfnGetExtensionFunctionAddress = zeDriverGetExtensionFunctionAddress; driver_ddiTable.core_ddiTable.Driver = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnGet = zeDriverGet_Tracing; pDdiTable->pfnGetApiVersion = zeDriverGetApiVersion_Tracing; pDdiTable->pfnGetProperties = zeDriverGetProperties_Tracing; pDdiTable->pfnGetIpcProperties = zeDriverGetIpcProperties_Tracing; pDdiTable->pfnGetExtensionProperties = zeDriverGetExtensionProperties_Tracing; } return result; } ZE_DLLEXPORT ze_result_t ZE_APICALL zeGetMemProcAddrTable( ze_api_version_t version, ze_mem_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnAllocShared = zeMemAllocShared; pDdiTable->pfnAllocDevice = zeMemAllocDevice; pDdiTable->pfnAllocHost = zeMemAllocHost; pDdiTable->pfnFree = zeMemFree; pDdiTable->pfnFreeExt = zeMemFreeExt; pDdiTable->pfnGetAllocProperties = zeMemGetAllocProperties; pDdiTable->pfnGetAddressRange = zeMemGetAddressRange; pDdiTable->pfnGetIpcHandle = zeMemGetIpcHandle; pDdiTable->pfnOpenIpcHandle = zeMemOpenIpcHandle; pDdiTable->pfnCloseIpcHandle = zeMemCloseIpcHandle; driver_ddiTable.core_ddiTable.Mem = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnAllocShared = zeMemAllocShared_Tracing; pDdiTable->pfnAllocDevice = zeMemAllocDevice_Tracing; pDdiTable->pfnAllocHost = zeMemAllocHost_Tracing; pDdiTable->pfnFree = zeMemFree_Tracing; pDdiTable->pfnGetAllocProperties = zeMemGetAllocProperties_Tracing; pDdiTable->pfnGetAddressRange = zeMemGetAddressRange_Tracing; pDdiTable->pfnGetIpcHandle = zeMemGetIpcHandle_Tracing; pDdiTable->pfnOpenIpcHandle = zeMemOpenIpcHandle_Tracing; pDdiTable->pfnCloseIpcHandle = zeMemCloseIpcHandle_Tracing; } return result; } ZE_DLLEXPORT ze_result_t ZE_APICALL zeGetContextProcAddrTable( ze_api_version_t version, ze_context_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeContextCreate; pDdiTable->pfnCreateEx = zeContextCreateEx; pDdiTable->pfnDestroy = zeContextDestroy; pDdiTable->pfnGetStatus = zeContextGetStatus; pDdiTable->pfnSystemBarrier = zeContextSystemBarrier; pDdiTable->pfnMakeMemoryResident = zeContextMakeMemoryResident; pDdiTable->pfnEvictMemory = zeContextEvictMemory; pDdiTable->pfnMakeImageResident = zeContextMakeImageResident; pDdiTable->pfnEvictImage = zeContextEvictImage; driver_ddiTable.core_ddiTable.Context = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeContextCreate_Tracing; pDdiTable->pfnDestroy = zeContextDestroy_Tracing; pDdiTable->pfnGetStatus = zeContextGetStatus_Tracing; pDdiTable->pfnSystemBarrier = zeContextSystemBarrier_Tracing; pDdiTable->pfnMakeMemoryResident = zeContextMakeMemoryResident_Tracing; pDdiTable->pfnEvictMemory = zeContextEvictMemory_Tracing; pDdiTable->pfnMakeImageResident = zeContextMakeImageResident_Tracing; pDdiTable->pfnEvictImage = zeContextEvictImage_Tracing; } return result; } ZE_DLLEXPORT ze_result_t ZE_APICALL zeGetPhysicalMemProcAddrTable( ze_api_version_t version, ze_physical_mem_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zePhysicalMemCreate; pDdiTable->pfnDestroy = zePhysicalMemDestroy; driver_ddiTable.core_ddiTable.PhysicalMem = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zePhysicalMemCreate_Tracing; pDdiTable->pfnDestroy = zePhysicalMemDestroy_Tracing; } return result; } ZE_DLLEXPORT ze_result_t ZE_APICALL zeGetVirtualMemProcAddrTable( ze_api_version_t version, ze_virtual_mem_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnReserve = zeVirtualMemReserve; pDdiTable->pfnFree = zeVirtualMemFree; pDdiTable->pfnQueryPageSize = zeVirtualMemQueryPageSize; pDdiTable->pfnMap = zeVirtualMemMap; pDdiTable->pfnUnmap = zeVirtualMemUnmap; pDdiTable->pfnSetAccessAttribute = zeVirtualMemSetAccessAttribute; pDdiTable->pfnGetAccessAttribute = zeVirtualMemGetAccessAttribute; driver_ddiTable.core_ddiTable.VirtualMem = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnReserve = zeVirtualMemReserve_Tracing; pDdiTable->pfnFree = zeVirtualMemFree_Tracing; pDdiTable->pfnQueryPageSize = zeVirtualMemQueryPageSize_Tracing; pDdiTable->pfnMap = zeVirtualMemMap_Tracing; pDdiTable->pfnUnmap = zeVirtualMemUnmap_Tracing; pDdiTable->pfnSetAccessAttribute = zeVirtualMemSetAccessAttribute_Tracing; pDdiTable->pfnGetAccessAttribute = zeVirtualMemGetAccessAttribute_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetGlobalProcAddrTable( ze_api_version_t version, ze_global_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnInit = zeInit; driver_ddiTable.core_ddiTable.Global = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnInit = zeInit_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetDeviceProcAddrTable( ze_api_version_t version, ze_device_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnGet = zeDeviceGet; pDdiTable->pfnGetCommandQueueGroupProperties = zeDeviceGetCommandQueueGroupProperties; pDdiTable->pfnGetSubDevices = zeDeviceGetSubDevices; pDdiTable->pfnGetProperties = zeDeviceGetProperties; pDdiTable->pfnGetComputeProperties = zeDeviceGetComputeProperties; pDdiTable->pfnGetModuleProperties = zeDeviceGetModuleProperties; pDdiTable->pfnGetMemoryProperties = zeDeviceGetMemoryProperties; pDdiTable->pfnGetMemoryAccessProperties = zeDeviceGetMemoryAccessProperties; pDdiTable->pfnGetCacheProperties = zeDeviceGetCacheProperties; pDdiTable->pfnGetImageProperties = zeDeviceGetImageProperties; pDdiTable->pfnGetP2PProperties = zeDeviceGetP2PProperties; pDdiTable->pfnCanAccessPeer = zeDeviceCanAccessPeer; pDdiTable->pfnGetStatus = zeDeviceGetStatus; pDdiTable->pfnGetExternalMemoryProperties = zeDeviceGetExternalMemoryProperties; pDdiTable->pfnGetGlobalTimestamps = zeDeviceGetGlobalTimestamps; pDdiTable->pfnReserveCacheExt = zeDeviceReserveCacheExt; pDdiTable->pfnSetCacheAdviceExt = zeDeviceSetCacheAdviceExt; pDdiTable->pfnPciGetPropertiesExt = zeDevicePciGetPropertiesExt; driver_ddiTable.core_ddiTable.Device = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnGet = zeDeviceGet_Tracing; pDdiTable->pfnGetCommandQueueGroupProperties = zeDeviceGetCommandQueueGroupProperties_Tracing; pDdiTable->pfnGetSubDevices = zeDeviceGetSubDevices_Tracing; pDdiTable->pfnGetProperties = zeDeviceGetProperties_Tracing; pDdiTable->pfnGetComputeProperties = zeDeviceGetComputeProperties_Tracing; pDdiTable->pfnGetModuleProperties = zeDeviceGetModuleProperties_Tracing; pDdiTable->pfnGetMemoryProperties = zeDeviceGetMemoryProperties_Tracing; pDdiTable->pfnGetMemoryAccessProperties = zeDeviceGetMemoryAccessProperties_Tracing; pDdiTable->pfnGetCacheProperties = zeDeviceGetCacheProperties_Tracing; pDdiTable->pfnGetImageProperties = zeDeviceGetImageProperties_Tracing; pDdiTable->pfnGetP2PProperties = zeDeviceGetP2PProperties_Tracing; pDdiTable->pfnCanAccessPeer = zeDeviceCanAccessPeer_Tracing; pDdiTable->pfnGetStatus = zeDeviceGetStatus_Tracing; pDdiTable->pfnGetExternalMemoryProperties = zeDeviceGetExternalMemoryProperties_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetCommandQueueProcAddrTable( ze_api_version_t version, ze_command_queue_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeCommandQueueCreate; pDdiTable->pfnDestroy = zeCommandQueueDestroy; pDdiTable->pfnExecuteCommandLists = zeCommandQueueExecuteCommandLists; pDdiTable->pfnSynchronize = zeCommandQueueSynchronize; driver_ddiTable.core_ddiTable.CommandQueue = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeCommandQueueCreate_Tracing; pDdiTable->pfnDestroy = zeCommandQueueDestroy_Tracing; pDdiTable->pfnExecuteCommandLists = zeCommandQueueExecuteCommandLists_Tracing; pDdiTable->pfnSynchronize = zeCommandQueueSynchronize_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetCommandListProcAddrTable( ze_api_version_t version, ze_command_list_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnAppendBarrier = zeCommandListAppendBarrier; pDdiTable->pfnAppendMemoryRangesBarrier = zeCommandListAppendMemoryRangesBarrier; pDdiTable->pfnCreate = zeCommandListCreate; pDdiTable->pfnCreateImmediate = zeCommandListCreateImmediate; pDdiTable->pfnDestroy = zeCommandListDestroy; pDdiTable->pfnClose = zeCommandListClose; pDdiTable->pfnReset = zeCommandListReset; pDdiTable->pfnAppendMemoryCopy = zeCommandListAppendMemoryCopy; pDdiTable->pfnAppendMemoryCopyRegion = zeCommandListAppendMemoryCopyRegion; pDdiTable->pfnAppendMemoryFill = zeCommandListAppendMemoryFill; pDdiTable->pfnAppendImageCopy = zeCommandListAppendImageCopy; pDdiTable->pfnAppendImageCopyRegion = zeCommandListAppendImageCopyRegion; pDdiTable->pfnAppendImageCopyToMemory = zeCommandListAppendImageCopyToMemory; pDdiTable->pfnAppendImageCopyFromMemory = zeCommandListAppendImageCopyFromMemory; pDdiTable->pfnAppendMemoryPrefetch = zeCommandListAppendMemoryPrefetch; pDdiTable->pfnAppendMemAdvise = zeCommandListAppendMemAdvise; pDdiTable->pfnAppendSignalEvent = zeCommandListAppendSignalEvent; pDdiTable->pfnAppendWaitOnEvents = zeCommandListAppendWaitOnEvents; pDdiTable->pfnAppendEventReset = zeCommandListAppendEventReset; pDdiTable->pfnAppendLaunchKernel = zeCommandListAppendLaunchKernel; pDdiTable->pfnAppendLaunchCooperativeKernel = zeCommandListAppendLaunchCooperativeKernel; pDdiTable->pfnAppendLaunchKernelIndirect = zeCommandListAppendLaunchKernelIndirect; pDdiTable->pfnAppendLaunchMultipleKernelsIndirect = zeCommandListAppendLaunchMultipleKernelsIndirect; pDdiTable->pfnAppendWriteGlobalTimestamp = zeCommandListAppendWriteGlobalTimestamp; pDdiTable->pfnAppendMemoryCopyFromContext = zeCommandListAppendMemoryCopyFromContext; pDdiTable->pfnAppendQueryKernelTimestamps = zeCommandListAppendQueryKernelTimestamps; driver_ddiTable.core_ddiTable.CommandList = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnAppendBarrier = zeCommandListAppendBarrier_Tracing; pDdiTable->pfnAppendMemoryRangesBarrier = zeCommandListAppendMemoryRangesBarrier_Tracing; pDdiTable->pfnCreate = zeCommandListCreate_Tracing; pDdiTable->pfnCreateImmediate = zeCommandListCreateImmediate_Tracing; pDdiTable->pfnDestroy = zeCommandListDestroy_Tracing; pDdiTable->pfnClose = zeCommandListClose_Tracing; pDdiTable->pfnReset = zeCommandListReset_Tracing; pDdiTable->pfnAppendMemoryCopy = zeCommandListAppendMemoryCopy_Tracing; pDdiTable->pfnAppendMemoryCopyRegion = zeCommandListAppendMemoryCopyRegion_Tracing; pDdiTable->pfnAppendMemoryFill = zeCommandListAppendMemoryFill_Tracing; pDdiTable->pfnAppendImageCopy = zeCommandListAppendImageCopy_Tracing; pDdiTable->pfnAppendImageCopyRegion = zeCommandListAppendImageCopyRegion_Tracing; pDdiTable->pfnAppendImageCopyToMemory = zeCommandListAppendImageCopyToMemory_Tracing; pDdiTable->pfnAppendImageCopyFromMemory = zeCommandListAppendImageCopyFromMemory_Tracing; pDdiTable->pfnAppendMemoryPrefetch = zeCommandListAppendMemoryPrefetch_Tracing; pDdiTable->pfnAppendMemAdvise = zeCommandListAppendMemAdvise_Tracing; pDdiTable->pfnAppendSignalEvent = zeCommandListAppendSignalEvent_Tracing; pDdiTable->pfnAppendWaitOnEvents = zeCommandListAppendWaitOnEvents_Tracing; pDdiTable->pfnAppendEventReset = zeCommandListAppendEventReset_Tracing; pDdiTable->pfnAppendLaunchKernel = zeCommandListAppendLaunchKernel_Tracing; pDdiTable->pfnAppendLaunchCooperativeKernel = zeCommandListAppendLaunchCooperativeKernel_Tracing; pDdiTable->pfnAppendLaunchKernelIndirect = zeCommandListAppendLaunchKernelIndirect_Tracing; pDdiTable->pfnAppendLaunchMultipleKernelsIndirect = zeCommandListAppendLaunchMultipleKernelsIndirect_Tracing; pDdiTable->pfnAppendWriteGlobalTimestamp = zeCommandListAppendWriteGlobalTimestamp_Tracing; pDdiTable->pfnAppendMemoryCopyFromContext = zeCommandListAppendMemoryCopyFromContext_Tracing; pDdiTable->pfnAppendQueryKernelTimestamps = zeCommandListAppendQueryKernelTimestamps_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetFenceProcAddrTable( ze_api_version_t version, ze_fence_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeFenceCreate; pDdiTable->pfnDestroy = zeFenceDestroy; pDdiTable->pfnHostSynchronize = zeFenceHostSynchronize; pDdiTable->pfnQueryStatus = zeFenceQueryStatus; pDdiTable->pfnReset = zeFenceReset; driver_ddiTable.core_ddiTable.Fence = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeFenceCreate_Tracing; pDdiTable->pfnDestroy = zeFenceDestroy_Tracing; pDdiTable->pfnHostSynchronize = zeFenceHostSynchronize_Tracing; pDdiTable->pfnQueryStatus = zeFenceQueryStatus_Tracing; pDdiTable->pfnReset = zeFenceReset_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetEventPoolProcAddrTable( ze_api_version_t version, ze_event_pool_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeEventPoolCreate; pDdiTable->pfnDestroy = zeEventPoolDestroy; pDdiTable->pfnGetIpcHandle = zeEventPoolGetIpcHandle; pDdiTable->pfnOpenIpcHandle = zeEventPoolOpenIpcHandle; pDdiTable->pfnCloseIpcHandle = zeEventPoolCloseIpcHandle; driver_ddiTable.core_ddiTable.EventPool = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeEventPoolCreate_Tracing; pDdiTable->pfnDestroy = zeEventPoolDestroy_Tracing; pDdiTable->pfnGetIpcHandle = zeEventPoolGetIpcHandle_Tracing; pDdiTable->pfnOpenIpcHandle = zeEventPoolOpenIpcHandle_Tracing; pDdiTable->pfnCloseIpcHandle = zeEventPoolCloseIpcHandle_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetEventProcAddrTable( ze_api_version_t version, ze_event_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeEventCreate; pDdiTable->pfnDestroy = zeEventDestroy; pDdiTable->pfnHostSignal = zeEventHostSignal; pDdiTable->pfnHostSynchronize = zeEventHostSynchronize; pDdiTable->pfnQueryStatus = zeEventQueryStatus; pDdiTable->pfnHostReset = zeEventHostReset; pDdiTable->pfnQueryKernelTimestamp = zeEventQueryKernelTimestamp; driver_ddiTable.core_ddiTable.Event = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeEventCreate_Tracing; pDdiTable->pfnDestroy = zeEventDestroy_Tracing; pDdiTable->pfnHostSignal = zeEventHostSignal_Tracing; pDdiTable->pfnHostSynchronize = zeEventHostSynchronize_Tracing; pDdiTable->pfnQueryStatus = zeEventQueryStatus_Tracing; pDdiTable->pfnHostReset = zeEventHostReset_Tracing; pDdiTable->pfnQueryKernelTimestamp = zeEventQueryKernelTimestamp_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetEventExpProcAddrTable( ze_api_version_t version, ze_event_exp_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnQueryTimestampsExp = zeEventQueryTimestampsExp; return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetImageProcAddrTable( ze_api_version_t version, ze_image_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnGetProperties = zeImageGetProperties; pDdiTable->pfnCreate = zeImageCreate; pDdiTable->pfnDestroy = zeImageDestroy; pDdiTable->pfnGetAllocPropertiesExt = zeImageGetAllocPropertiesExt; driver_ddiTable.core_ddiTable.Image = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnGetProperties = zeImageGetProperties_Tracing; pDdiTable->pfnCreate = zeImageCreate_Tracing; pDdiTable->pfnDestroy = zeImageDestroy_Tracing; pDdiTable->pfnGetAllocPropertiesExt = zeImageGetAllocPropertiesExt; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetModuleProcAddrTable( ze_api_version_t version, ze_module_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeModuleCreate; pDdiTable->pfnDestroy = zeModuleDestroy; pDdiTable->pfnDynamicLink = zeModuleDynamicLink; pDdiTable->pfnGetNativeBinary = zeModuleGetNativeBinary; pDdiTable->pfnGetGlobalPointer = zeModuleGetGlobalPointer; pDdiTable->pfnGetKernelNames = zeModuleGetKernelNames; pDdiTable->pfnGetFunctionPointer = zeModuleGetFunctionPointer; pDdiTable->pfnGetProperties = zeModuleGetProperties; driver_ddiTable.core_ddiTable.Module = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeModuleCreate_Tracing; pDdiTable->pfnDestroy = zeModuleDestroy_Tracing; pDdiTable->pfnGetNativeBinary = zeModuleGetNativeBinary_Tracing; pDdiTable->pfnDynamicLink = zeModuleDynamicLink_Tracing; pDdiTable->pfnGetGlobalPointer = zeModuleGetGlobalPointer_Tracing; pDdiTable->pfnGetFunctionPointer = zeModuleGetFunctionPointer_Tracing; pDdiTable->pfnGetKernelNames = zeModuleGetKernelNames_Tracing; pDdiTable->pfnGetProperties = zeModuleGetProperties_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetModuleBuildLogProcAddrTable( ze_api_version_t version, ze_module_build_log_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnDestroy = zeModuleBuildLogDestroy; pDdiTable->pfnGetString = zeModuleBuildLogGetString; driver_ddiTable.core_ddiTable.ModuleBuildLog = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnDestroy = zeModuleBuildLogDestroy_Tracing; pDdiTable->pfnGetString = zeModuleBuildLogGetString_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetKernelProcAddrTable( ze_api_version_t version, ze_kernel_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeKernelCreate; pDdiTable->pfnDestroy = zeKernelDestroy; pDdiTable->pfnSetGroupSize = zeKernelSetGroupSize; pDdiTable->pfnSuggestGroupSize = zeKernelSuggestGroupSize; pDdiTable->pfnSuggestMaxCooperativeGroupCount = zeKernelSuggestMaxCooperativeGroupCount; pDdiTable->pfnSetArgumentValue = zeKernelSetArgumentValue; pDdiTable->pfnSetIndirectAccess = zeKernelSetIndirectAccess; pDdiTable->pfnGetIndirectAccess = zeKernelGetIndirectAccess; pDdiTable->pfnGetSourceAttributes = zeKernelGetSourceAttributes; pDdiTable->pfnGetProperties = zeKernelGetProperties; pDdiTable->pfnSetCacheConfig = zeKernelSetCacheConfig; pDdiTable->pfnGetName = zeKernelGetName; driver_ddiTable.core_ddiTable.Kernel = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeKernelCreate_Tracing; pDdiTable->pfnDestroy = zeKernelDestroy_Tracing; pDdiTable->pfnSetGroupSize = zeKernelSetGroupSize_Tracing; pDdiTable->pfnSuggestGroupSize = zeKernelSuggestGroupSize_Tracing; pDdiTable->pfnSuggestMaxCooperativeGroupCount = zeKernelSuggestMaxCooperativeGroupCount_Tracing; pDdiTable->pfnSetArgumentValue = zeKernelSetArgumentValue_Tracing; pDdiTable->pfnSetIndirectAccess = zeKernelSetIndirectAccess_Tracing; pDdiTable->pfnGetIndirectAccess = zeKernelGetIndirectAccess_Tracing; pDdiTable->pfnGetSourceAttributes = zeKernelGetSourceAttributes_Tracing; pDdiTable->pfnGetProperties = zeKernelGetProperties_Tracing; pDdiTable->pfnSetCacheConfig = zeKernelSetCacheConfig_Tracing; pDdiTable->pfnGetName = zeKernelGetName_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetSamplerProcAddrTable( ze_api_version_t version, ze_sampler_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; driver_ddiTable.enableTracing = getenv_tobool("ZET_ENABLE_API_TRACING_EXP"); ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnCreate = zeSamplerCreate; pDdiTable->pfnDestroy = zeSamplerDestroy; driver_ddiTable.core_ddiTable.Sampler = *pDdiTable; if (driver_ddiTable.enableTracing) { pDdiTable->pfnCreate = zeSamplerCreate_Tracing; pDdiTable->pfnDestroy = zeSamplerDestroy_Tracing; } return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetKernelExpProcAddrTable( ze_api_version_t version, ze_kernel_exp_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnSetGlobalOffsetExp = zeKernelSetGlobalOffsetExp; pDdiTable->pfnSchedulingHintExp = zeKernelSchedulingHintExp; driver_ddiTable.core_ddiTable.KernelExp = *pDdiTable; return result; } ZE_APIEXPORT ze_result_t ZE_APICALL zeGetImageExpProcAddrTable( ze_api_version_t version, ze_image_exp_dditable_t *pDdiTable) { if (nullptr == pDdiTable) return ZE_RESULT_ERROR_INVALID_ARGUMENT; if (ZE_MAJOR_VERSION(driver_ddiTable.version) != ZE_MAJOR_VERSION(version) || ZE_MINOR_VERSION(driver_ddiTable.version) > ZE_MINOR_VERSION(version)) return ZE_RESULT_ERROR_UNSUPPORTED_VERSION; ze_result_t result = ZE_RESULT_SUCCESS; pDdiTable->pfnGetMemoryPropertiesExp = zeImageGetMemoryPropertiesExp; pDdiTable->pfnViewCreateExp = zeImageViewCreateExp; driver_ddiTable.core_ddiTable.ImageExp = *pDdiTable; return result; }
48.186603
117
0.787873
mattcarter2017
4fa49dfc22566a12573aaf42fe3b51b02c76c4e9
338
cpp
C++
COSC1076/week5/question02.cpp
davidkevork/RMIT
41c17de726f28c06ee1321fd2e7ee699acfdd611
[ "MIT" ]
null
null
null
COSC1076/week5/question02.cpp
davidkevork/RMIT
41c17de726f28c06ee1321fd2e7ee699acfdd611
[ "MIT" ]
null
null
null
COSC1076/week5/question02.cpp
davidkevork/RMIT
41c17de726f28c06ee1321fd2e7ee699acfdd611
[ "MIT" ]
null
null
null
#include <iostream> int stringLength(char* string) { int length = 0; char lastChar = string[0]; while (lastChar != '\0') { length += 1; lastChar = string[length]; } return length; } int main() { char string[20] = "hello world"; std::cout << string << std::endl; std::cout << stringLength(string) << std::endl; }
18.777778
49
0.600592
davidkevork
4fa5e3f37b31c1a52391f2d82cca7cf4c44f1d12
2,396
cpp
C++
src/common/file_buffer.cpp
shenyunlong/duckdb
ecb90f22b36a50b051fdd8e0d681bade3365c430
[ "MIT" ]
null
null
null
src/common/file_buffer.cpp
shenyunlong/duckdb
ecb90f22b36a50b051fdd8e0d681bade3365c430
[ "MIT" ]
7
2020-08-25T22:24:16.000Z
2020-09-06T00:16:49.000Z
src/common/file_buffer.cpp
shenyunlong/duckdb
ecb90f22b36a50b051fdd8e0d681bade3365c430
[ "MIT" ]
null
null
null
#include "duckdb/common/file_buffer.hpp" #include "duckdb/common/file_system.hpp" #include "duckdb/common/helper.hpp" #include "duckdb/common/checksum.hpp" #include "duckdb/common/exception.hpp" #include <cstring> namespace duckdb { using namespace std; FileBuffer::FileBuffer(FileBufferType type, uint64_t bufsiz) : type(type) { const int SECTOR_SIZE = Storage::SECTOR_SIZE; // round up to the nearest SECTOR_SIZE, thi sis only really necessary if the file buffer will be used for Direct IO if (bufsiz % SECTOR_SIZE != 0) { bufsiz += SECTOR_SIZE - (bufsiz % SECTOR_SIZE); } assert(bufsiz % SECTOR_SIZE == 0); assert(bufsiz >= SECTOR_SIZE); // we add (SECTOR_SIZE - 1) to ensure that we can align the buffer to SECTOR_SIZE malloced_buffer = (data_ptr_t)malloc(bufsiz + (SECTOR_SIZE - 1)); if (!malloced_buffer) { throw std::bad_alloc(); } // round to multiple of SECTOR_SIZE uint64_t num = (uint64_t)malloced_buffer; uint64_t remainder = num % SECTOR_SIZE; if (remainder != 0) { num = num + SECTOR_SIZE - remainder; } assert(num % SECTOR_SIZE == 0); assert(num + bufsiz <= ((uint64_t)malloced_buffer + bufsiz + (SECTOR_SIZE - 1))); assert(num >= (uint64_t)malloced_buffer); // construct the FileBuffer object internal_buffer = (data_ptr_t)num; internal_size = bufsiz; buffer = internal_buffer + Storage::BLOCK_HEADER_SIZE; size = internal_size - Storage::BLOCK_HEADER_SIZE; } FileBuffer::~FileBuffer() { free(malloced_buffer); } void FileBuffer::Read(FileHandle &handle, uint64_t location) { // read the buffer from disk handle.Read(internal_buffer, internal_size, location); // compute the checksum uint64_t stored_checksum = *((uint64_t *)internal_buffer); uint64_t computed_checksum = Checksum(buffer, size); // verify the checksum if (stored_checksum != computed_checksum) { throw IOException("Corrupt database file: computed checksum %llu does not match stored checksum %llu in block", computed_checksum, stored_checksum); } } void FileBuffer::Write(FileHandle &handle, uint64_t location) { // compute the checksum and write it to the start of the buffer uint64_t checksum = Checksum(buffer, size); *((uint64_t *)internal_buffer) = checksum; // now write the buffer handle.Write(internal_buffer, internal_size, location); } void FileBuffer::Clear() { memset(internal_buffer, 0, internal_size); } } // namespace duckdb
34.228571
116
0.738314
shenyunlong
4fa6098a8545c15491b9629bfd8f756c5152f9f9
7,546
hpp
C++
library/src/auxiliary/rocauxiliary_orm2l_unm2l.hpp
YvanMokwinski/rocSOLVER
3ff9ad6e60da6dd5f6d3fe5ab02cf0d5bed9aa5f
[ "BSD-2-Clause" ]
null
null
null
library/src/auxiliary/rocauxiliary_orm2l_unm2l.hpp
YvanMokwinski/rocSOLVER
3ff9ad6e60da6dd5f6d3fe5ab02cf0d5bed9aa5f
[ "BSD-2-Clause" ]
null
null
null
library/src/auxiliary/rocauxiliary_orm2l_unm2l.hpp
YvanMokwinski/rocSOLVER
3ff9ad6e60da6dd5f6d3fe5ab02cf0d5bed9aa5f
[ "BSD-2-Clause" ]
null
null
null
/************************************************************************ * Derived from the BSD3-licensed * LAPACK routine (version 3.7.0) -- * Univ. of Tennessee, Univ. of California Berkeley, * Univ. of Colorado Denver and NAG Ltd.. * December 2016 * Copyright (c) 2019-2021 Advanced Micro Devices, Inc. * ***********************************************************************/ #pragma once #include "rocauxiliary_lacgv.hpp" #include "rocauxiliary_larf.hpp" #include "rocblas.hpp" #include "rocsolver.h" template <typename T, bool BATCHED> void rocsolver_orm2l_unm2l_getMemorySize(const rocblas_side side, const rocblas_int m, const rocblas_int n, const rocblas_int k, const rocblas_int batch_count, size_t* size_scalars, size_t* size_Abyx, size_t* size_diag, size_t* size_workArr) { // if quick return no workspace needed if(m == 0 || n == 0 || k == 0 || batch_count == 0) { *size_scalars = 0; *size_Abyx = 0; *size_diag = 0; *size_workArr = 0; return; } // size of temporary array for diagonal elements *size_diag = sizeof(T) * batch_count; // memory requirements to call larf rocsolver_larf_getMemorySize<T, BATCHED>(side, m, n, batch_count, size_scalars, size_Abyx, size_workArr); } template <bool COMPLEX, typename T, typename U> rocblas_status rocsolver_orm2l_ormql_argCheck(rocblas_handle handle, const rocblas_side side, const rocblas_operation trans, const rocblas_int m, const rocblas_int n, const rocblas_int k, const rocblas_int lda, const rocblas_int ldc, T A, T C, U ipiv) { // order is important for unit tests: // 1. invalid/non-supported values if(side != rocblas_side_left && side != rocblas_side_right) return rocblas_status_invalid_value; if(trans != rocblas_operation_none && trans != rocblas_operation_transpose && trans != rocblas_operation_conjugate_transpose) return rocblas_status_invalid_value; if((COMPLEX && trans == rocblas_operation_transpose) || (!COMPLEX && trans == rocblas_operation_conjugate_transpose)) return rocblas_status_invalid_value; bool left = (side == rocblas_side_left); // 2. invalid size if(m < 0 || n < 0 || k < 0 || ldc < m) return rocblas_status_invalid_size; if(left && (lda < m || k > m)) return rocblas_status_invalid_size; if(!left && (lda < n || k > n)) return rocblas_status_invalid_size; // skip pointer check if querying memory size if(rocblas_is_device_memory_size_query(handle)) return rocblas_status_continue; // 3. invalid pointers if((m * n && !C) || (k && !ipiv) || (left && m * k && !A) || (!left && n * k && !A)) return rocblas_status_invalid_pointer; return rocblas_status_continue; } template <typename T, typename U, bool COMPLEX = is_complex<T>> rocblas_status rocsolver_orm2l_unm2l_template(rocblas_handle handle, const rocblas_side side, const rocblas_operation trans, const rocblas_int m, const rocblas_int n, const rocblas_int k, U A, const rocblas_int shiftA, const rocblas_int lda, const rocblas_stride strideA, T* ipiv, const rocblas_stride strideP, U C, const rocblas_int shiftC, const rocblas_int ldc, const rocblas_stride strideC, const rocblas_int batch_count, T* scalars, T* Abyx, T* diag, T** workArr) { ROCSOLVER_ENTER("orm2l_unm2l", "side:", side, "trans:", trans, "m:", m, "n:", n, "k:", k, "shiftA:", shiftA, "lda:", lda, "shiftC:", shiftC, "ldc:", ldc, "bc:", batch_count); // quick return if(!n || !m || !k || !batch_count) return rocblas_status_success; hipStream_t stream; rocblas_get_stream(handle, &stream); // determine limits and indices bool left = (side == rocblas_side_left); bool transpose = (trans != rocblas_operation_none); rocblas_int start, step, nq, ncol, nrow; if(left) { nq = m; ncol = n; if(!transpose) { start = -1; step = 1; } else { start = k; step = -1; } } else { nq = n; nrow = m; if(!transpose) { start = k; step = -1; } else { start = -1; step = 1; } } // conjugate tau if(COMPLEX && transpose) rocsolver_lacgv_template<T>(handle, k, ipiv, 0, 1, strideP, batch_count); rocblas_int i; for(rocblas_int j = 1; j <= k; ++j) { i = start + step * j; // current householder vector if(left) { nrow = m - k + i + 1; } else { ncol = n - k + i + 1; } // insert one in A(nq-k+i,i), i.e. the i-th element of the (nq-k)-th // subdiagonal, to build/apply the householder matrix hipLaunchKernelGGL(set_diag<T>, dim3(batch_count, 1, 1), dim3(1, 1, 1), 0, stream, diag, 0, 1, A, shiftA + idx2D(nq - k + i, i, lda), lda, strideA, 1, true); // Apply current Householder reflector rocsolver_larf_template(handle, side, nrow, ncol, A, shiftA + idx2D(0, i, lda), 1, strideA, (ipiv + i), strideP, C, shiftC, ldc, strideC, batch_count, scalars, Abyx, workArr); // restore original value of A(nq-k+i,i) hipLaunchKernelGGL(restore_diag<T>, dim3(batch_count, 1, 1), dim3(1, 1, 1), 0, stream, diag, 0, 1, A, shiftA + idx2D(nq - k + i, i, lda), lda, strideA, 1); } // restore tau if(COMPLEX && transpose) rocsolver_lacgv_template<T>(handle, k, ipiv, 0, 1, strideP, batch_count); return rocblas_status_success; }
38.111111
100
0.45428
YvanMokwinski
4fa7f8ce9d56c11e043c9a554c84ced11d52eb2e
13,467
cpp
C++
LUMA/src/ObjectManager.cpp
ElsevierSoftwareX/SOFTX-D-18-00007
15f2319a67ebb8e3ff577bdd0f93890c3c0e8d0b
[ "Apache-2.0" ]
8
2019-06-06T16:28:20.000Z
2022-02-05T16:32:11.000Z
LUMA/src/ObjectManager.cpp
ExuberantWitness/LUMA
15f2319a67ebb8e3ff577bdd0f93890c3c0e8d0b
[ "Apache-2.0" ]
null
null
null
LUMA/src/ObjectManager.cpp
ExuberantWitness/LUMA
15f2319a67ebb8e3ff577bdd0f93890c3c0e8d0b
[ "Apache-2.0" ]
1
2021-04-22T14:37:10.000Z
2021-04-22T14:37:10.000Z
/* * -------------------------------------------------------------- * * ------ Lattice Boltzmann @ The University of Manchester ------ * * -------------------------- L-U-M-A --------------------------- * * Copyright 2018 The University of Manchester * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.* */ #include "../inc/stdafx.h" #include "../inc/ObjectManager.h" #include "../inc/GridObj.h" // Static declarations ObjectManager* ObjectManager::me; // ************************************************************************* // /// Instance creator ObjectManager* ObjectManager::getInstance() { if (!me) me = new ObjectManager; // Private construction return me; // Return pointer to new object } /// \brief Instance creator with grid hierarchy assignment. /// \param g pointer to grid hierarchy. ObjectManager* ObjectManager::getInstance(GridObj* g) { if (!me) me = new ObjectManager(g); // Private construction return me; // Return pointer to new object } /// Instance destuctor void ObjectManager::destroyInstance() { if (me) delete me; // Delete pointer from static context not destructor } // ************************************************************************* // /// Default constructor ObjectManager::ObjectManager(void) { _Grids = nullptr; }; /// Default destructor ObjectManager::~ObjectManager(void) { me = nullptr; }; /// \brief Constructor with grid hierarchy assignment. /// \param g pointer to grid hierarchy. ObjectManager::ObjectManager(GridObj* g) : _Grids(g) { // Resize vector of flexible body flags hasIBMBodies.resize(L_NUM_LEVELS+1 ,false); hasFlexibleBodies.resize(L_NUM_LEVELS+1 ,false); // Set sub-iteration loop values timeav_subResidual = 0.0; timeav_subIterations = 0.0; }; // ************************************************************************* // /// \brief Compute forces on a BB rigid object. /// /// Uses momentum exchange to compute forces on rigid bodies. /// Currently working with bounce-back objects only. There is no /// bounding box so if we have walls in the domain they will be counted /// as well. /// /// \param i local i-index of solid site. /// \param j local j-index of solid site. /// \param k local k-index of solid site. /// \param g pointer to grid on which object resides. void ObjectManager::computeLiftDrag(int i, int j, int k, GridObj *g) { // TODO: Need a bounding box for object if we have walls in the domain otherwise they will also be counted // TODO: Also need to be able to identify which body this site relates to so we can differentiate int N_lim = g->N_lim; int M_lim = g->M_lim; int K_lim = g->K_lim; // For MPI builds, ignore if part of object is in halo region #ifdef L_BUILD_FOR_MPI if (!GridUtils::isOnRecvLayer(g->XPos[i], g->YPos[j], g->ZPos[k])) #endif { #ifdef L_MOMEX_DEBUG // Write position of solid site if (debugstream.is_open()) debugstream << std::endl << g->XPos[i] << "," << g->YPos[j] << "," << g->ZPos[k]; #endif // Declare some local stores double contrib_x = 0.0, contrib_y = 0.0, contrib_z = 0.0; // Loop over directions from solid site for (int n = 0; n < L_NUM_VELS; n++) { // Get incoming direction int n_opp = GridUtils::getOpposite(n); // Compute destination coordinates (does not assume any periodicity) int xdest = i + c[eXDirection][n]; int ydest = j + c[eYDirection][n]; int zdest = k + c[eZDirection][n]; // Reject site on grid edges (like single-cell walls) if (GridUtils::isOffGrid(xdest, ydest, zdest, g)) return; // Only apply if streams to a fluid site if (g->LatTyp(xdest, ydest, zdest, M_lim, K_lim) == eFluid) { /* For HWBB: * * Force = * (pre-stream population toward wall + * post-stream population away from wall) * * since population is simply bounced-back, we can write as: * * Force = * (2 * pre-stream population toward wall) * * Multiplication by c unit vector resolves the result in * appropriate direction. */ // Store contribution in this direction contrib_x += 2.0 * c[eXDirection][n_opp] * g->f(xdest, ydest, zdest, n_opp, M_lim, K_lim, L_NUM_VELS); contrib_y += 2.0 * c[eYDirection][n_opp] * g->f(xdest, ydest, zdest, n_opp, M_lim, K_lim, L_NUM_VELS); contrib_z += 2.0 * c[eZDirection][n_opp] * g->f(xdest, ydest, zdest, n_opp, M_lim, K_lim, L_NUM_VELS); } // Add the total contribution of every direction of this site to the body forces bbbForceOnObjectX += contrib_x; bbbForceOnObjectY += contrib_y; bbbForceOnObjectZ += contrib_z; #ifdef L_MOMEX_DEBUG // Write contribution to file for this site if (debugstream.is_open()) debugstream << "," << std::to_string(contrib_x) << "," << std::to_string(contrib_y) << "," << std::to_string(contrib_z); #endif } } } // ************************************************************************* // /// \brief Compute forces on a BFL rigid object. /// /// Uses momentum exchange to compute forces on a marker than makes up /// a BFL body. Currently only works with a single BFL body but can /// easily be upgraded. /// /// \param v lattice direction of link being considered. /// \param id collapsed ijk index for site on which BFL BC is being applied. /// \param g pointer to grid on which marker resides. /// \param markerID id of marker on which force is to be updated. void ObjectManager::computeLiftDrag(int v, int id, GridObj *g, int markerID) { // Get opposite once int v_opp = GridUtils::getOpposite(v); // Similar to BBB but we cannot assume that bounced-back population is the same anymore pBody[0].markers[markerID].forceX += c[eXDirection][v_opp] * (g->f[v_opp + id * L_NUM_VELS] + g->fNew[v + id * L_NUM_VELS]); pBody[0].markers[markerID].forceY += c[eYDirection][v_opp] * (g->f[v_opp + id * L_NUM_VELS] + g->fNew[v + id * L_NUM_VELS]); pBody[0].markers[markerID].forceZ += c[eZDirection][v_opp] * (g->f[v_opp + id * L_NUM_VELS] + g->fNew[v + id * L_NUM_VELS]); } // ************************************************************************* // /// \brief Resets the body force members prior to a new force calculation /// using momentum exchange. /// /// \param grid Grid object on which method was called void ObjectManager::resetMomexBodyForces(GridObj * grid) { if (grid->level == bbbOnGridLevel && grid->region_number == bbbOnGridReg) { bbbForceOnObjectX = 0.0; bbbForceOnObjectY = 0.0; bbbForceOnObjectZ = 0.0; #ifdef L_MOMEX_DEBUG // Open file for momentum exchange information toggleDebugStream(grid); #endif } // Reset the BFL body marker forces for (BFLBody& body : pBody) { // Only reset if body on this grid if (body._Owner->level == grid->level && body._Owner->region_number == grid->region_number) { for (BFLMarker& marker : body.markers) { marker.forceX = 0.0; marker.forceY = 0.0; marker.forceZ = 0.0; } } } } // ************************************************************************* // /// \brief Adds a bounce-back body to the grid by labelling sites. /// /// Override of the usual method which tries to place the object on the /// finest grid it can rather than a given grid. This will allow objects /// to span multiple levels. /// /// \param geom pointer to structure containing object information read from config file. /// \param _PCpts pointer to point cloud information. void ObjectManager::addBouncebackObject(GeomPacked *geom, PCpts *_PCpts) { // Store information about the body in the Object Manager bbbOnGridLevel = geom->onGridLev; bbbOnGridReg = geom->onGridReg; // Declarations std::vector<int> ijk; eLocationOnRank loc = eNone; GridObj *g = nullptr; bool bPointAdded = false; eType localType; // Loop over the points for (int a = 0; a < static_cast<int>(_PCpts->x.size()); a++) { // Reset flag bPointAdded = false; // Loop over possible grids from bottom up for (int lev = L_NUM_LEVELS; lev >= 0; lev--) { for (int reg = 0; reg < L_NUM_REGIONS; reg++) { GridUtils::getGrid(lev, reg, g); // Skip if cannot find grid if (!g) continue; // If found grid then check in range if (GridUtils::isOnThisRank(_PCpts->x[a], _PCpts->y[a], _PCpts->z[a], &loc, g, &ijk)) { localType = g->LatTyp(ijk[0], ijk[1], ijk[2], g->M_lim, g->K_lim); /* Update Typing Matrix and correct macroscopic. * We must allow labelling on TL but recall that TL2C sites * which pull from a refined region may need to have BB applied * so also label all the refined sites behind the fine grids * with the solid shape to make sure this is consistent. */ if (localType != eVelocity) { // Change type g->LatTyp(ijk[0], ijk[1], ijk[2], g->M_lim, g->K_lim) = eSolid; // Change macro g->u(ijk[0], ijk[1], ijk[2], 0, g->M_lim, g->K_lim, L_DIMS) = 0.0; g->u(ijk[0], ijk[1], ijk[2], 1, g->M_lim, g->K_lim, L_DIMS) = 0.0; #if (L_DIMS == 3) g->u(ijk[0], ijk[1], ijk[2], 0, g->M_lim, g->K_lim, L_DIMS) = 0.0; #endif g->rho(ijk[0], ijk[1], ijk[2], g->M_lim, g->K_lim) = L_RHOIN; } } /* Do not break but try add the solid site on every grid behind * the finest grid -- see comment above as to why */ } } } } // ************************************************************************* // /// \brief Adds a bounce-back body to the grid by labelling sites. /// \param g pointer to grid on which object resides. /// \param geom pointer to structure containing object information read from config file. /// \param _PCpts pointer to point cloud information. void ObjectManager::addBouncebackObject(GridObj *g, GeomPacked *geom, PCpts *_PCpts) { // Store information about the body in the Object Manager bbbOnGridLevel = geom->onGridLev; bbbOnGridReg = geom->onGridReg; // Declarations std::vector<int> ijk; eLocationOnRank loc = eNone; // Label the grid sites for (int a = 0; a < static_cast<int>(_PCpts->x.size()); a++) { // Get indices if on this rank if (GridUtils::isOnThisRank(_PCpts->x[a], _PCpts->y[a], _PCpts->z[a], &loc, g, &ijk)) { // Update Typing Matrix and correct macroscopic if (g->LatTyp(ijk[0], ijk[1], ijk[2], g->M_lim, g->K_lim) == eFluid) { // Change type g->LatTyp(ijk[0], ijk[1], ijk[2], g->M_lim, g->K_lim) = eSolid; // Change macro g->u(ijk[0], ijk[1], ijk[2], 0, g->M_lim, g->K_lim, L_DIMS) = 0.0; g->u(ijk[0], ijk[1], ijk[2], 1, g->M_lim, g->K_lim, L_DIMS) = 0.0; #if (L_DIMS == 3) g->u(ijk[0], ijk[1], ijk[2], 0, g->M_lim, g->K_lim, L_DIMS) = 0.0; #endif g->rho(ijk[0], ijk[1], ijk[2], g->M_lim, g->K_lim) = L_RHOIN; } } } } // ************************************************************************* // /// Private method for opening/closing a debugging file /// \param g pointer to grid toggling the stream void ObjectManager::toggleDebugStream(GridObj *g) { // Only do this if on correct time interval if (g->t == 0 || (g->t + 1) % static_cast<int>(L_EXTRA_OUT_FREQ * (1.0 / g->refinement_ratio)) != 0) return; // Open file if not open, close if already open if (!debugstream.is_open()) { debugstream.open(GridUtils::path_str + "/momex_debug_" + std::to_string(static_cast<int>((g->t + 1) * g->refinement_ratio)) + "_Rnk" + std::to_string(GridUtils::safeGetRank()) + ".csv", std::ios::out); // Add header for MomEx debug debugstream << "X Position,Y Position,Z Position"; for (int v = 0; v < L_NUM_VELS; ++v) { debugstream << ",F" + std::to_string(v) + "X,F" + std::to_string(v) + "Y,F" + std::to_string(v) + "Z"; } } else { debugstream.close(); } } // ************************************************************************* // /// Geometry data structure container constructor. ObjectManager::GeomPacked::GeomPacked() { } /// Geometry data structure container destructor. ObjectManager::GeomPacked::~GeomPacked() { } /// Geometry data structure container custom constructor. ObjectManager::GeomPacked::GeomPacked( eObjectType objtype, int bodyID, std::string fileName, int onGridLev, int onGridReg, bool isCentreX, double refX, bool isCentreY, double refY, bool isCentreZ, double refZ, double bodyLength, eCartesianDirection scaleDirection, eMoveableType moveProperty, bool isClamped ) : objtype(objtype), bodyID(bodyID), fileName(fileName), onGridLev(onGridLev), onGridReg(onGridReg), isRefXCentre(isCentreX), bodyRefX(refX), isRefYCentre(isCentreY), bodyRefY(refY), isRefZCentre(isCentreZ), bodyRefZ(refZ), bodyLength(bodyLength), scaleDirection(scaleDirection), moveProperty(moveProperty), isClamped(isClamped) { } /// Method to interpret the reference type read in from the gerometry file bool ObjectManager::GeomPacked::interpretRef(std::string refType) { if (refType == "CENTRE") return true; else if (refType != "START") L_ERROR("Unknown reference type in geometry file. Exiting.", GridUtils::logfile); return false; }
32.294964
124
0.632509
ElsevierSoftwareX
4fa87be11855da78c6587ab7fcfc031daca74187
1,037
cpp
C++
tests/in_place.cpp
NathanSWard/optional
71de89349f982e7a6dd02b87a5f3d083161f8eca
[ "CC0-1.0" ]
2
2019-09-03T13:45:03.000Z
2019-09-03T13:45:21.000Z
tests/in_place.cpp
e-schumann/optional
e165e3554c888e7969513e1c5cdf3d9ffce642f7
[ "CC0-1.0" ]
null
null
null
tests/in_place.cpp
e-schumann/optional
e165e3554c888e7969513e1c5cdf3d9ffce642f7
[ "CC0-1.0" ]
null
null
null
#include "catch.hpp" #include <tl/optional.hpp> #include <tuple> #include <vector> struct takes_init_and_variadic { std::vector<int> v; std::tuple<int, int> t; template <class... Args> takes_init_and_variadic(std::initializer_list<int> l, Args &&... args) : v(l), t(std::forward<Args>(args)...) {} }; TEST_CASE("In place", "[in_place]") { tl::optional<int> o1{tl::in_place}; tl::optional<int> o2(tl::in_place); REQUIRE(o1); REQUIRE(o1 == 0); REQUIRE(o2); REQUIRE(o2 == 0); tl::optional<int> o3(tl::in_place, 42); REQUIRE(o3 == 42); tl::optional<std::tuple<int, int>> o4(tl::in_place, 0, 1); REQUIRE(o4); REQUIRE(std::get<0>(*o4) == 0); REQUIRE(std::get<1>(*o4) == 1); tl::optional<std::vector<int>> o5(tl::in_place, {0, 1}); REQUIRE(o5); REQUIRE((*o5)[0] == 0); REQUIRE((*o5)[1] == 1); tl::optional<takes_init_and_variadic> o6(tl::in_place, {0, 1}, 2, 3); REQUIRE(o6->v[0] == 0); REQUIRE(o6->v[1] == 1); REQUIRE(std::get<0>(o6->t) == 2); REQUIRE(std::get<1>(o6->t) == 3); }
24.690476
72
0.594021
NathanSWard
4fa9ab50fe9a773e9b70b04fa4da831c37e5f1f7
4,480
cpp
C++
ColliderBit/src/analyses/Analysis_Covariance.cpp
GambitBSM/gambit_2.0
a4742ac94a0352585a3b9dcb9b222048a5959b91
[ "Unlicense" ]
1
2021-09-17T22:53:26.000Z
2021-09-17T22:53:26.000Z
ColliderBit/src/analyses/Analysis_Covariance.cpp
GambitBSM/gambit_2.0
a4742ac94a0352585a3b9dcb9b222048a5959b91
[ "Unlicense" ]
3
2021-07-22T11:23:48.000Z
2021-08-22T17:24:41.000Z
ColliderBit/src/analyses/Analysis_Covariance.cpp
GambitBSM/gambit_2.0
a4742ac94a0352585a3b9dcb9b222048a5959b91
[ "Unlicense" ]
1
2021-08-14T10:31:41.000Z
2021-08-14T10:31:41.000Z
#include "gambit/ColliderBit/analyses/Analysis.hpp" #include "gambit/ColliderBit/ATLASEfficiencies.hpp" namespace Gambit { namespace ColliderBit { using namespace std; /// bjf> Experimental! But already useful for helping me convert the key /// numbers from these analyses to Python for the p-value calculuations. /// This is a dumb place to define this, but there is no cpp file for /// AnalysisData and I can't be bothered making one. void AnalysisData::pythonize_me() const { static std::set<std::string> done; // Only want this printed out once for each analysis if(done.find(analysis_name)==done.end()) { done.insert(analysis_name); std::ostringstream SR_names; std::ostringstream SR_n; std::ostringstream SR_b; std::ostringstream SR_b_sys; std::ostringstream SR_s_sys; std::ostringstream SR_s; SR_names << "a.SR_names = ["; SR_n << "a.SR_n = ["; SR_b << "a.SR_b = ["; SR_b_sys << "a.SR_b_sys = ["; //SR_s_sys << "a.SR_s_sys = ["; //SR_s << "a.SR_s = ["; int i = 0; for (auto srd = begin(); srd != end(); ++srd,++i) { SR_names << "\"" << srd->sr_label << "__i"<<i << "\", "; SR_n << srd->n_obs << ", "; SR_b << srd->n_bkg << ", "; SR_b_sys << srd->n_bkg_err << ", "; //SR_s_sys << srd->n_sig_MC_sys << ", "; //SR_s << srd->n_sig_MC << ", "; } SR_names << "]"; SR_n << "]"; SR_b << "]"; SR_b_sys << "]"; //SR_s_sys << "]"; //SR_s << "]"; std::ostringstream full; full << "a = Analysis(\""<<analysis_name<<"\")"<<std::endl; full << SR_names.str() << std::endl; full << SR_n.str() << std::endl; full << SR_b.str() << std::endl; full << SR_b_sys.str() << std::endl; //full << SR_s_sys.str() << std::endl; //full << SR_s.str() << std::endl; if(hasCorrs()) { full << "a.cov = "; Eigen::IOFormat PythonFmt(Eigen::FullPrecision, 0, ", ", ",\n", "[", "]", "[", "]"); full << srcov.format(PythonFmt) << std::endl; } full << "a.N_SR = len(a.SR_names)" << std::endl; if(hasCorrs()) { full << "if allow_corr: "; } full << "analyses += [a]" << std::endl << std::endl; /// Could record or something, but for now just dump to stdout std::cout << full.str(); } } /// Dummy analysis code with a hard-coded return including a SR covariance matrix class Analysis_Covariance : public Analysis{ private: // Variables that holds the number of events passing // signal region cuts double _numSR; public: // Required detector sim static constexpr const char* detector = "ATLAS"; Analysis_Covariance() { set_analysis_name("Covariance"); set_luminosity(30.); // fb } void run(const HEPUtils::Event*) {} /// Combine the variables of another copy of this analysis (typically on another thread) into this one. void combine(const Analysis*) {} void collect_results() { // Now fill a results object with the result for two signal regions // add_result(SignalRegionData("SR label", n_obs, {n_sig_MC, n_sig_MC_sys}, {n_bkg, n_bkg_err})); // Note: n_sig_MC is usually incremented in the analysis code -- here we just hardcoded a value add_result(SignalRegionData("SR1", 100., {120, 0.}, {95., 9.5})); add_result(SignalRegionData("SR2", 10., {15, 0.}, {9., 4.})); // Hard-code a covariance matrix between these (representing the bkg sys values above, rotated by 30 deg) Eigen::MatrixXd cov(2,2); cov << 71.6875, 32.1512, 32.1512, 34.5625; set_covariance(cov); } protected: void analysis_specific_reset() { _numSR = 0; } /////////////////// }; DEFINE_ANALYSIS_FACTORY(Covariance) } }
34.461538
114
0.497991
GambitBSM
4fabc4c1aa0861e2d0b9e4f93f0b6e742710578d
10,287
cc
C++
ui/views/widget/window_reorderer_unittest.cc
chromium/chromium
df46e572c3449a4b108d6e02fbe4f6d24cf98381
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
14,668
2015-01-01T01:57:10.000Z
2022-03-31T23:33:32.000Z
ui/views/widget/window_reorderer_unittest.cc
chromium/chromium
df46e572c3449a4b108d6e02fbe4f6d24cf98381
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
86
2015-10-21T13:02:42.000Z
2022-03-14T07:50:50.000Z
ui/views/widget/window_reorderer_unittest.cc
chromium/chromium
df46e572c3449a4b108d6e02fbe4f6d24cf98381
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
5,941
2015-01-02T11:32:21.000Z
2022-03-31T16:35:46.000Z
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <memory> #include "base/memory/raw_ptr.h" #include "ui/aura/test/test_windows.h" #include "ui/aura/window.h" #include "ui/aura/window_event_dispatcher.h" #include "ui/compositor/layer.h" #include "ui/compositor/test/test_layers.h" #include "ui/views/test/views_test_base.h" #include "ui/views/view.h" #include "ui/views/view_constants_aura.h" #include "ui/views/widget/widget.h" namespace views { namespace { // Sets the name of |window| and |window|'s layer to |name|. void SetWindowAndLayerName(aura::Window* window, const std::string& name) { window->SetName(name); window->layer()->SetName(name); } // Returns a string containing the name of each of the child windows (bottommost // first) of |parent|. The format of the string is "name1 name2 name3 ...". std::string ChildWindowNamesAsString(const aura::Window& parent) { std::string names; for (const auto* child : parent.children()) { if (!names.empty()) names += " "; names += child->GetName(); } return names; } class WindowReordererTest : public ViewsTestBase { public: Widget::InitParams CreateParams(Widget::InitParams::Type type) override { Widget::InitParams params = ViewsTestBase::CreateParams(type); params.parent = parent_; return params; } std::unique_ptr<Widget> CreateControlWidget(aura::Window* parent) { parent_ = parent; return CreateTestWidget(Widget::InitParams::TYPE_CONTROL); } private: raw_ptr<aura::Window> parent_ = nullptr; }; // Test that views with layers and views with associated windows are reordered // according to the view hierarchy. TEST_F(WindowReordererTest, Basic) { std::unique_ptr<Widget> parent = CreateControlWidget(root_window()); parent->Show(); aura::Window* parent_window = parent->GetNativeWindow(); View* contents_view = parent->SetContentsView(std::make_unique<View>()); // 1) Test that layers for views and layers for windows associated to a host // view are stacked below the layers for any windows not associated to a host // view. View* v = new View(); v->SetPaintToLayer(); v->layer()->SetName("v"); contents_view->AddChildView(v); std::unique_ptr<Widget> w1 = CreateControlWidget(parent_window); SetWindowAndLayerName(w1->GetNativeView(), "w1"); w1->Show(); std::unique_ptr<Widget> w2 = CreateControlWidget(parent_window); SetWindowAndLayerName(w2->GetNativeView(), "w2"); w2->Show(); EXPECT_EQ("w1 w2", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("v w1 w2", ui::test::ChildLayerNamesAsString(*parent_window->layer())); View* host_view2 = new View(); contents_view->AddChildView(host_view2); w2->GetNativeView()->SetProperty(kHostViewKey, host_view2); EXPECT_EQ("w2 w1", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("v w2 w1", ui::test::ChildLayerNamesAsString(*parent_window->layer())); View* host_view1 = new View(); w1->GetNativeView()->SetProperty(kHostViewKey, host_view1); contents_view->AddChildViewAt(host_view1, 0); EXPECT_EQ("w1 w2", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w1 v w2", ui::test::ChildLayerNamesAsString(*parent_window->layer())); // 2) Test the z-order of the windows and layers as a result of reordering the // views. contents_view->ReorderChildView(host_view1, -1); EXPECT_EQ("w2 w1", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("v w2 w1", ui::test::ChildLayerNamesAsString(*parent_window->layer())); contents_view->ReorderChildView(host_view2, -1); EXPECT_EQ("w1 w2", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("v w1 w2", ui::test::ChildLayerNamesAsString(*parent_window->layer())); // 3) Test the z-order of the windows and layers as a result of reordering the // views in situations where the window order remains unchanged. contents_view->ReorderChildView(v, -1); EXPECT_EQ("w1 w2", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w1 w2 v", ui::test::ChildLayerNamesAsString(*parent_window->layer())); contents_view->ReorderChildView(host_view2, -1); EXPECT_EQ("w1 w2", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w1 v w2", ui::test::ChildLayerNamesAsString(*parent_window->layer())); } // Test that different orderings of: // - adding a window to a parent widget // - adding a "host" view to a parent widget // - associating the "host" view and window // all correctly reorder the child windows and layers. TEST_F(WindowReordererTest, Association) { std::unique_ptr<Widget> parent = CreateControlWidget(root_window()); parent->Show(); aura::Window* parent_window = parent->GetNativeWindow(); View* contents_view = parent->SetContentsView(std::make_unique<View>()); aura::Window* w1 = aura::test::CreateTestWindowWithId(0, parent->GetNativeWindow()); SetWindowAndLayerName(w1, "w1"); aura::Window* w2 = aura::test::CreateTestWindowWithId(0, nullptr); SetWindowAndLayerName(w2, "w2"); View* host_view2 = new View(); // 1) Test that parenting the window to the parent widget last results in a // correct ordering of child windows and layers. contents_view->AddChildView(host_view2); w2->SetProperty(views::kHostViewKey, host_view2); EXPECT_EQ("w1", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w1", ui::test::ChildLayerNamesAsString(*parent_window->layer())); parent_window->AddChild(w2); EXPECT_EQ("w2 w1", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w2 w1", ui::test::ChildLayerNamesAsString(*parent_window->layer())); // 2) Test that associating the window and "host" view last results in a // correct ordering of child windows and layers. View* host_view1 = new View(); contents_view->AddChildViewAt(host_view1, 0); EXPECT_EQ("w2 w1", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w2 w1", ui::test::ChildLayerNamesAsString(*parent_window->layer())); w1->SetProperty(views::kHostViewKey, host_view1); EXPECT_EQ("w1 w2", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w1 w2", ui::test::ChildLayerNamesAsString(*parent_window->layer())); // 3) Test that parenting the "host" view to the parent widget last results // in a correct ordering of child windows and layers. contents_view->RemoveChildView(host_view2); contents_view->AddChildViewAt(host_view2, 0); EXPECT_EQ("w2 w1", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("w2 w1", ui::test::ChildLayerNamesAsString(*parent_window->layer())); } // It is possible to associate a window to a view which has a parent layer // (other than the widget layer). In this case, the parent layer of the host // view and the parent layer of the associated window are different. Test that // the layers and windows are properly reordered in this case. TEST_F(WindowReordererTest, HostViewParentHasLayer) { std::unique_ptr<Widget> parent = CreateControlWidget(root_window()); parent->Show(); aura::Window* parent_window = parent->GetNativeWindow(); View* contents_view = parent->SetContentsView(std::make_unique<View>()); // Create the following view hierarchy. (*) denotes views which paint to a // layer. // // contents_view // +-- v1 // +-- v11* // +-- v12 (attached window) // +-- v13* // +--v2* View* v1 = new View(); contents_view->AddChildView(v1); View* v11 = new View(); v11->SetPaintToLayer(); v11->layer()->SetName("v11"); v1->AddChildView(v11); std::unique_ptr<Widget> w = CreateControlWidget(parent_window); SetWindowAndLayerName(w->GetNativeView(), "w"); w->Show(); View* v12 = new View(); v1->AddChildView(v12); w->GetNativeView()->SetProperty(kHostViewKey, v12); View* v13 = new View(); v13->SetPaintToLayer(); v13->layer()->SetName("v13"); v1->AddChildView(v13); View* v2 = new View(); v2->SetPaintToLayer(); v2->layer()->SetName("v2"); contents_view->AddChildView(v2); // Test intial state. EXPECT_EQ("w", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("v11 w v13 v2", ui::test::ChildLayerNamesAsString(*parent_window->layer())); // |w|'s layer should be stacked above |v1|'s layer. v1->SetPaintToLayer(); v1->layer()->SetName("v1"); EXPECT_EQ("w", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("v1 w v2", ui::test::ChildLayerNamesAsString(*parent_window->layer())); // Test moving the host view from one view with a layer to another. v1->RemoveChildView(v12); v2->AddChildView(v12); EXPECT_EQ("w", ChildWindowNamesAsString(*parent_window)); EXPECT_EQ("v1 v2 w", ui::test::ChildLayerNamesAsString(*parent_window->layer())); } // Test that a layer added beneath a view is restacked correctly. TEST_F(WindowReordererTest, ViewWithLayerBeneath) { std::unique_ptr<Widget> parent = CreateControlWidget(root_window()); parent->Show(); aura::Window* parent_window = parent->GetNativeWindow(); View* contents_view = parent->SetContentsView(std::make_unique<View>()); View* view_with_layer_beneath = contents_view->AddChildView(std::make_unique<View>()); ui::Layer layer_beneath; view_with_layer_beneath->AddLayerBeneathView(&layer_beneath); ASSERT_NE(nullptr, view_with_layer_beneath->layer()); view_with_layer_beneath->layer()->SetName("view"); layer_beneath.SetName("beneath"); // Verify that the initial ordering is correct. EXPECT_EQ("beneath view", ui::test::ChildLayerNamesAsString(*parent_window->layer())); // Add a hosted window to make WindowReorderer::ReorderChildWindows() restack // layers. std::unique_ptr<Widget> child_widget = CreateControlWidget(parent_window); SetWindowAndLayerName(child_widget->GetNativeView(), "child_widget"); child_widget->Show(); View* host_view = contents_view->AddChildView(std::make_unique<View>()); child_widget->GetNativeView()->SetProperty(kHostViewKey, host_view); // Verify the new order is correct. EXPECT_EQ("beneath view child_widget", ui::test::ChildLayerNamesAsString(*parent_window->layer())); } } // namespace } // namespace views
36.478723
80
0.713522
chromium
4faea4ab3ce93c24e81efae27f6558f7c174b84b
47,171
cpp
C++
src/bdap/linkmanager.cpp
AmirAbrams/dynamic-swap
a102b9750f023b8617dcb4447025503307812959
[ "MIT" ]
76
2017-04-06T13:58:15.000Z
2022-01-04T16:36:58.000Z
src/bdap/linkmanager.cpp
AmirAbrams/dynamic-swap
a102b9750f023b8617dcb4447025503307812959
[ "MIT" ]
181
2016-11-19T21:09:35.000Z
2021-08-21T02:57:23.000Z
src/bdap/linkmanager.cpp
duality-solutions/Dynamic-2
432c340140307340d1babced012d0de51dbf64ae
[ "MIT" ]
61
2017-01-08T11:30:24.000Z
2021-08-13T07:06:46.000Z
// Copyright (c) 2019-2021 Duality Blockchain Solutions Developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "bdap/linkmanager.h" #include "bdap/domainentry.h" #include "bdap/domainentrydb.h" #include "bdap/linking.h" #include "bdap/utils.h" #include "bdap/vgp/include/encryption.h" // for VGP DecryptBDAPData #include "dht/ed25519.h" #include "pubkey.h" #include "wallet/wallet.h" CLinkManager* pLinkManager = NULL; //#ifdef ENABLE_WALLET std::string CLink::LinkState() const { if (nLinkState == 0) { return "Unknown"; } else if (nLinkState == 1) { return "Pending"; } else if (nLinkState == 2) { return "Complete"; } else if (nLinkState == 3) { return "Deleted"; } return "Undefined"; } std::string CLink::ToString() const { return strprintf( "CLink(\n" " nVersion = %d\n" " LinkID = %s\n" " fRequestFromMe = %s\n" " fAcceptFromMe = %s\n" " LinkState = %s\n" " RequestorFullObjectPath = %s\n" " RecipientFullObjectPath = %s\n" " RequestorPubKey = %s\n" " RecipientPubKey = %s\n" " SharedRequestPubKey = %s\n" " SharedAcceptPubKey = %s\n" " LinkMessage = %s\n" " nHeightRequest = %d\n" " nExpireTimeRequest = %d\n" " txHashRequest = %s\n" " nHeightAccept = %d\n" " nExpireTimeAccept = %d\n" " txHashAccept = %s\n" " SubjectID = %s\n" " RequestorWalletAddress = %s\n" " RecipientWalletAddress = %s\n" ")\n", nVersion, LinkID.ToString(), fRequestFromMe ? "true" : "false", fAcceptFromMe ? "true" : "false", LinkState(), stringFromVch(RequestorFullObjectPath), stringFromVch(RecipientFullObjectPath), stringFromVch(RequestorPubKey), stringFromVch(RecipientPubKey), stringFromVch(SharedRequestPubKey), stringFromVch(SharedAcceptPubKey), stringFromVch(LinkMessage), nHeightRequest, nExpireTimeRequest, txHashRequest.ToString(), nHeightAccept, nExpireTimeAccept, txHashAccept.ToString(), SubjectID.ToString(), stringFromVch(RequestorWalletAddress), stringFromVch(RecipientWalletAddress) ); } std::string CLink::RequestorFQDN() const { return stringFromVch(RequestorFullObjectPath); } std::string CLink::RecipientFQDN() const { return stringFromVch(RecipientFullObjectPath); } std::string CLink::RequestorPubKeyString() const { return stringFromVch(RequestorPubKey); } std::string CLink::RecipientPubKeyString() const { return stringFromVch(RecipientPubKey); } #ifdef ENABLE_WALLET bool CLinkManager::IsLinkFromMe(const std::vector<unsigned char>& vchLinkPubKey) { if (!pwalletMain) return false; CKeyID keyID(Hash160(vchLinkPubKey.begin(), vchLinkPubKey.end())); CKeyEd25519 keyOut; if (pwalletMain->GetDHTKey(keyID, keyOut)) return true; return false; } bool CLinkManager::IsLinkForMe(const std::vector<unsigned char>& vchLinkPubKey, const std::vector<unsigned char>& vchSharedPubKey) { if (!pwalletMain) return false; std::vector<std::vector<unsigned char>> vvchMyDHTPubKeys; if (!pwalletMain->GetDHTPubKeys(vvchMyDHTPubKeys)) return false; if (vvchMyDHTPubKeys.size() == 0) return false; for (const std::vector<unsigned char>& vchMyDHTPubKey : vvchMyDHTPubKeys) { CKeyID keyID(Hash160(vchMyDHTPubKey.begin(), vchMyDHTPubKey.end())); CKeyEd25519 dhtKey; if (pwalletMain->GetDHTKey(keyID, dhtKey)) { std::vector<unsigned char> vchGetSharedPubKey = GetLinkSharedPubKey(dhtKey, vchLinkPubKey); if (vchGetSharedPubKey == vchSharedPubKey) return true; } } return false; } bool CLinkManager::GetLinkPrivateKey(const std::vector<unsigned char>& vchSenderPubKey, const std::vector<unsigned char>& vchSharedPubKey, std::array<char, 32>& sharedSeed, std::string& strErrorMessage) { if (!pwalletMain) return false; std::vector<std::vector<unsigned char>> vvchDHTPubKeys; if (!pwalletMain->GetDHTPubKeys(vvchDHTPubKeys)) { strErrorMessage = "Failed to get DHT key vector."; return false; } // loop through each account key to check if it matches the shared key for (const std::vector<unsigned char>& vchPubKey : vvchDHTPubKeys) { CDomainEntry entry; if (pDomainEntryDB->ReadDomainEntryPubKey(vchPubKey, entry)) { CKeyEd25519 dhtKey; CKeyID keyID(Hash160(vchPubKey.begin(), vchPubKey.end())); if (pwalletMain->GetDHTKey(keyID, dhtKey)) { if (vchSharedPubKey == GetLinkSharedPubKey(dhtKey, vchSenderPubKey)) { sharedSeed = GetLinkSharedPrivateKey(dhtKey, vchSenderPubKey); return true; } } else { strErrorMessage = strErrorMessage + "Error getting DHT private key.\n"; } } } return false; } #endif // ENABLE_WALLET bool CLinkManager::FindLink(const uint256& id, CLink& link) { if (m_Links.count(id) > 0) { link = m_Links.at(id); return true; } return false; } bool CLinkManager::FindLinkBySubjectID(const uint256& subjectID, CLink& getLink) { for (const std::pair<uint256, CLink>& link : m_Links) { if (link.second.SubjectID == subjectID) // pending request { getLink = link.second; return true; } } return false; } #ifdef ENABLE_WALLET void CLinkManager::ProcessQueue() { if (!pwalletMain) return; if (pwalletMain->IsLocked()) return; // make sure we are not stuck in an infinite loop size_t size = QueueSize(); size_t counter = 0; LogPrintf("CLinkManager::%s -- Start links in queue = %d\n", __func__, size); while (!linkQueue.empty() && size > counter) { // TODO (BDAP): Do we need to lock the queue while processing? CLinkStorage storage = linkQueue.front(); ProcessLink(storage); linkQueue.pop(); counter++; } LogPrintf("CLinkManager::%s -- Finished links in queue = %d\n", __func__, QueueSize()); } #else void CLinkManager::ProcessQueue() { return; } #endif // ENABLE_WALLET bool CLinkManager::ListMyPendingRequests(std::vector<CLink>& vchLinks) { for (const std::pair<uint256, CLink>& link : m_Links) { if (link.second.nLinkState == 1 && link.second.fRequestFromMe) // pending request { vchLinks.push_back(link.second); } } return true; } bool CLinkManager::ListMyPendingAccepts(std::vector<CLink>& vchLinks) { for (const std::pair<uint256, CLink>& link : m_Links) { //LogPrintf("%s -- link:\n%s\n", __func__, link.second.ToString()); if (link.second.nLinkState == 1 && (!link.second.fRequestFromMe || (link.second.fRequestFromMe && link.second.fAcceptFromMe))) // pending accept { vchLinks.push_back(link.second); } } return true; } bool CLinkManager::ListMyCompleted(std::vector<CLink>& vchLinks) { for (const std::pair<uint256, CLink>& link : m_Links) { if (link.second.nLinkState == 2 && !link.second.txHashRequest.IsNull()) // completed link { vchLinks.push_back(link.second); } } return true; } bool CLinkManager::ProcessLink(const CLinkStorage& storage, const bool fStoreInQueueOnly) { #ifndef ENABLE_WALLET linkQueue.push(storage); return true; #else if (!pwalletMain) { linkQueue.push(storage); return true; } if (fStoreInQueueOnly || pwalletMain->IsLocked()) { linkQueue.push(storage); return true; } int nDataVersion = -1; if (!storage.Encrypted()) { if (storage.nType == 1) // Clear text link request { std::vector<unsigned char> vchData = RemoveVersionFromLinkData(storage.vchRawData, nDataVersion); CLinkRequest link(vchData, storage.txHash); LogPrint("bdap", "%s -- %s\n", __func__, link.ToString()); link.nHeight = storage.nHeight; link.txHash = storage.txHash; link.nExpireTime = storage.nExpireTime; CDomainEntry entry; if (GetDomainEntry(link.RequestorFullObjectPath, entry)) { if (SignatureProofIsValid(entry.GetWalletAddress(), link.RecipientFQDN(), link.SignatureProof)) { bool fIsLinkFromMe = IsLinkFromMe(storage.vchLinkPubKey); LogPrint("bdap", "%s -- Link request from me found with a valid signature proof. Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); uint256 linkID = GetLinkID(link); CLink record; std::map<uint256, CLink>::iterator it = m_Links.find(linkID); if (it != m_Links.end()) { record = it->second; } record.LinkID = linkID; record.fRequestFromMe = fIsLinkFromMe; if (record.nHeightAccept > 0) { record.nLinkState = 2; } else { record.nLinkState = 1; } record.RequestorFullObjectPath = link.RequestorFullObjectPath; record.RecipientFullObjectPath = link.RecipientFullObjectPath; record.RequestorPubKey = link.RequestorPubKey; record.SharedRequestPubKey = link.SharedPubKey; record.LinkMessage = link.LinkMessage; record.nHeightRequest = link.nHeight; record.nExpireTimeRequest = link.nExpireTime; record.txHashRequest = link.txHash; record.RequestorWalletAddress = entry.WalletAddress; if (record.SharedAcceptPubKey.size() > 0 && record.SharedRequestPubKey.size() > 0) { std::string strErrorMessage = ""; if (!GetMessageInfo(record, strErrorMessage)) { LogPrintf("%s -- Error getting message info %s\n", __func__, strErrorMessage); } else { pwalletMain->WriteLinkMessageInfo(record.SubjectID, record.vchSecretPubKeyBytes); m_LinkMessageInfo[record.SubjectID] = record.vchSecretPubKeyBytes; } //LogPrintf("%s -- link request = %s\n", __func__, record.ToString()); } LogPrint("bdap", "%s -- Clear text link request added to map id = %s\n", __func__, linkID.ToString()); m_Links[linkID] = record; } else LogPrintf("%s ***** Warning. Link request found with an invalid signature proof! Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); } else { LogPrintf("%s -- Link request GetDomainEntry failed.\n", __func__); return false; } } else if (storage.nType == 2) // Clear text accept { std::vector<unsigned char> vchData = RemoveVersionFromLinkData(storage.vchRawData, nDataVersion); CLinkAccept link(vchData, storage.txHash); LogPrint("bdap", "%s -- %s\n", __func__, link.ToString()); link.nHeight = storage.nHeight; link.txHash = storage.txHash; link.nExpireTime = storage.nExpireTime; CDomainEntry entry; if (GetDomainEntry(link.RecipientFullObjectPath, entry)) { if (SignatureProofIsValid(entry.GetWalletAddress(), link.RequestorFQDN(), link.SignatureProof)) { bool fIsLinkFromMe = IsLinkFromMe(storage.vchLinkPubKey); //bool fIsLinkForMe = IsLinkForMe(storage.vchLinkPubKey, storage.vchSharedPubKey); LogPrint("bdap", "%s -- Link accept from me found with a valid signature proof. Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); uint256 linkID = GetLinkID(link); CLink record; std::map<uint256, CLink>::iterator it = m_Links.find(linkID); if (it != m_Links.end()) { record = it->second; } record.LinkID = linkID; record.fAcceptFromMe = fIsLinkFromMe; record.nLinkState = 2; record.RequestorFullObjectPath = link.RequestorFullObjectPath; record.RecipientFullObjectPath = link.RecipientFullObjectPath; record.RecipientPubKey = link.RecipientPubKey; record.SharedAcceptPubKey = link.SharedPubKey; record.nHeightAccept = link.nHeight; record.nExpireTimeAccept = link.nExpireTime; record.txHashAccept = link.txHash; record.RecipientWalletAddress = entry.WalletAddress; if (record.SharedAcceptPubKey.size() > 0 && record.SharedRequestPubKey.size() > 0) { std::string strErrorMessage = ""; if (!GetMessageInfo(record, strErrorMessage)) { LogPrintf("%s -- Error getting message info %s\n", __func__, strErrorMessage); } else { pwalletMain->WriteLinkMessageInfo(record.SubjectID, record.vchSecretPubKeyBytes); m_LinkMessageInfo[record.SubjectID] = record.vchSecretPubKeyBytes; } //LogPrintf("%s -- link accept = %s\n", __func__, record.ToString()); } LogPrint("bdap", "%s -- Clear text accept added to map id = %s, %s\n", __func__, linkID.ToString(), record.ToString()); m_Links[linkID] = record; } else LogPrintf("%s -- Warning! Link accept found with an invalid signature proof! Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); } else { LogPrintf("%s -- Link accept GetDomainEntry failed.\n", __func__); return false; } } } else if (storage.Encrypted() && !pwalletMain->IsLocked()) { bool fIsLinkFromMe = IsLinkFromMe(storage.vchLinkPubKey); bool fIsLinkForMe = IsLinkForMe(storage.vchLinkPubKey, storage.vchSharedPubKey); if (!fIsLinkFromMe && !fIsLinkForMe) { // This happens if you lose your DHT private key but have the BDAP account link wallet private key. LogPrintf("%s -- ** Warning: Encrypted link received but can not process it: TxID = %s\n", __func__, storage.txHash.ToString()); return false; } if (storage.nType == 1 && fIsLinkFromMe) // Encrypted link request from me { //LogPrintf("%s -- Version 1 link request from me found! vchLinkPubKey = %s\n", __func__, stringFromVch(storage.vchLinkPubKey)); CKeyEd25519 privDHTKey; CKeyID keyID(Hash160(storage.vchLinkPubKey.begin(), storage.vchLinkPubKey.end())); if (pwalletMain->GetDHTKey(keyID, privDHTKey)) { std::vector<unsigned char> vchData = RemoveVersionFromLinkData(storage.vchRawData, nDataVersion); std::string strMessage = ""; std::vector<unsigned char> dataDecrypted; if (DecryptBDAPData(privDHTKey.GetPrivSeedBytes(), vchData, dataDecrypted, strMessage)) { std::vector<unsigned char> vchData, vchHash; CScript scriptData; scriptData << OP_RETURN << dataDecrypted; if (GetBDAPData(scriptData, vchData, vchHash)) { CLinkRequest link(dataDecrypted, storage.txHash); LogPrint("bdap", "%s -- %s\n", __func__, link.ToString()); CDomainEntry entry; if (!GetDomainEntry(link.RequestorFullObjectPath, entry)) { LogPrintf("%s -- Failed to get link requestor %s\n", __func__, stringFromVch(link.RequestorFullObjectPath)); return false; } if (!SignatureProofIsValid(entry.GetWalletAddress(), link.RecipientFQDN(), link.SignatureProof)) { LogPrintf("%s ***** Warning. Link request found with an invalid signature proof! Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); return false; } link.nHeight = storage.nHeight; link.nExpireTime = storage.nExpireTime; uint256 linkID = GetLinkID(link); CLink record; std::map<uint256, CLink>::iterator it = m_Links.find(linkID); if (it != m_Links.end()) { record = it->second; } record.LinkID = linkID; record.fRequestFromMe = fIsLinkFromMe; record.fAcceptFromMe = (fIsLinkFromMe && fIsLinkForMe); if (record.nHeightAccept > 0) { record.nLinkState = 2; } else { record.nLinkState = 1; } record.RequestorFullObjectPath = link.RequestorFullObjectPath; record.RecipientFullObjectPath = link.RecipientFullObjectPath; record.RequestorPubKey = link.RequestorPubKey; record.SharedRequestPubKey = link.SharedPubKey; record.LinkMessage = link.LinkMessage; record.nHeightRequest = link.nHeight; record.nExpireTimeRequest = link.nExpireTime; record.txHashRequest = link.txHash; record.RequestorWalletAddress = entry.WalletAddress; if (record.SharedAcceptPubKey.size() > 0 && record.SharedRequestPubKey.size() > 0) { std::string strErrorMessage = ""; if (!GetMessageInfo(record, strErrorMessage)) { LogPrintf("%s -- Error getting message info %s\n", __func__, strErrorMessage); } else { pwalletMain->WriteLinkMessageInfo(record.SubjectID, record.vchSecretPubKeyBytes); m_LinkMessageInfo[record.SubjectID] = record.vchSecretPubKeyBytes; } //LogPrintf("%s -- link request = %s\n", __func__, record.ToString()); } LogPrint("bdap", "%s -- Encrypted link request from me added to map id = %s\n%s\n", __func__, linkID.ToString(), record.ToString()); m_Links[linkID] = record; } else { LogPrintf("%s -- Link request GetBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link request DecryptBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link request GetDHTKey failed.\n", __func__); return false; } } else if (storage.nType == 1 && !fIsLinkFromMe && fIsLinkForMe) // Encrypted link request for me { //LogPrintf("%s -- Version 1 link request for me found! vchLinkPubKey = %s\n", __func__, stringFromVch(storage.vchLinkPubKey)); CKeyEd25519 sharedDHTKey; std::array<char, 32> sharedSeed; std::string strErrorMessage; if (GetLinkPrivateKey(storage.vchLinkPubKey, storage.vchSharedPubKey, sharedSeed, strErrorMessage)) { CKeyEd25519 sharedKey(sharedSeed); std::vector<unsigned char> vchData = RemoveVersionFromLinkData(storage.vchRawData, nDataVersion); std::string strMessage = ""; std::vector<unsigned char> dataDecrypted; if (DecryptBDAPData(sharedKey.GetPrivSeedBytes(), vchData, dataDecrypted, strMessage)) { std::vector<unsigned char> vchData, vchHash; CScript scriptData; scriptData << OP_RETURN << dataDecrypted; if (GetBDAPData(scriptData, vchData, vchHash)) { CLinkRequest link(dataDecrypted, storage.txHash); LogPrint("bdap", "%s -- %s\n", __func__, link.ToString()); CDomainEntry entry; if (!GetDomainEntry(link.RequestorFullObjectPath, entry)) { LogPrintf("%s -- Failed to get link requestor %s\n", __func__, stringFromVch(link.RequestorFullObjectPath)); return false; } if (!SignatureProofIsValid(entry.GetWalletAddress(), link.RecipientFQDN(), link.SignatureProof)) { LogPrintf("%s ***** Warning. Link request found with an invalid signature proof! Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); return false; } link.nHeight = storage.nHeight; link.nExpireTime = storage.nExpireTime; uint256 linkID = GetLinkID(link); CLink record; std::map<uint256, CLink>::iterator it = m_Links.find(linkID); if (it != m_Links.end()) { record = it->second; } record.LinkID = linkID; record.fRequestFromMe = fIsLinkFromMe; if (record.nHeightAccept > 0) { record.nLinkState = 2; } else { record.nLinkState = 1; } record.RequestorFullObjectPath = link.RequestorFullObjectPath; record.RecipientFullObjectPath = link.RecipientFullObjectPath; record.RequestorPubKey = link.RequestorPubKey; record.SharedRequestPubKey = link.SharedPubKey; record.LinkMessage = link.LinkMessage; record.nHeightRequest = link.nHeight; record.nExpireTimeRequest = link.nExpireTime; record.txHashRequest = link.txHash; record.RequestorWalletAddress = entry.WalletAddress; if (record.SharedAcceptPubKey.size() > 0 && record.SharedRequestPubKey.size() > 0) { std::string strErrorMessage = ""; if (!GetMessageInfo(record, strErrorMessage)) { LogPrintf("%s -- Error getting message info %s\n", __func__, strErrorMessage); } else { pwalletMain->WriteLinkMessageInfo(record.SubjectID, record.vchSecretPubKeyBytes); m_LinkMessageInfo[record.SubjectID] = record.vchSecretPubKeyBytes; } //LogPrintf("%s -- link request = %s\n", __func__, record.ToString()); } LogPrint("bdap", "%s -- Encrypted link request for me added to map id = %s\n%s\n", __func__, linkID.ToString(), record.ToString()); m_Links[linkID] = record; } else { LogPrintf("%s -- Link request GetBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link request DecryptBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link request GetLinkPrivateKey failed.\n", __func__); return false; } } else if (storage.nType == 2 && fIsLinkFromMe) // Link accept from me { //LogPrintf("%s -- Version 1 encrypted link accept from me found! vchLinkPubKey = %s\n", __func__, stringFromVch(storage.vchLinkPubKey)); CKeyEd25519 privDHTKey; CKeyID keyID(Hash160(storage.vchLinkPubKey.begin(), storage.vchLinkPubKey.end())); if (pwalletMain->GetDHTKey(keyID, privDHTKey)) { std::vector<unsigned char> vchData = RemoveVersionFromLinkData(storage.vchRawData, nDataVersion); std::string strMessage = ""; std::vector<unsigned char> dataDecrypted; if (DecryptBDAPData(privDHTKey.GetPrivSeedBytes(), vchData, dataDecrypted, strMessage)) { std::vector<unsigned char> vchData, vchHash; CScript scriptData; scriptData << OP_RETURN << dataDecrypted; if (GetBDAPData(scriptData, vchData, vchHash)) { CLinkAccept link(dataDecrypted, storage.txHash); LogPrint("bdap", "%s -- %s\n", __func__, link.ToString()); CDomainEntry entry; if (!GetDomainEntry(link.RecipientFullObjectPath, entry)) { LogPrintf("%s -- Failed to get link recipient %s\n", __func__, stringFromVch(link.RecipientFullObjectPath)); return false; } if (!SignatureProofIsValid(entry.GetWalletAddress(), link.RequestorFQDN(), link.SignatureProof)) { LogPrintf("%s ***** Warning. Link accept found with an invalid signature proof! Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); return false; } link.nHeight = storage.nHeight; link.nExpireTime = storage.nExpireTime; uint256 linkID = GetLinkID(link); CLink record; std::map<uint256, CLink>::iterator it = m_Links.find(linkID); if (it != m_Links.end()) { record = it->second; } record.LinkID = linkID; record.fRequestFromMe = (fIsLinkFromMe && fIsLinkForMe); record.fAcceptFromMe = fIsLinkFromMe; record.nLinkState = 2; record.RequestorFullObjectPath = link.RequestorFullObjectPath; record.RecipientFullObjectPath = link.RecipientFullObjectPath; record.RecipientPubKey = link.RecipientPubKey; record.SharedAcceptPubKey = link.SharedPubKey; record.nHeightAccept = link.nHeight; record.nExpireTimeAccept = link.nExpireTime; record.txHashAccept = link.txHash; record.RecipientWalletAddress = entry.WalletAddress; if (record.SharedAcceptPubKey.size() > 0 && record.SharedRequestPubKey.size() > 0) { std::string strErrorMessage = ""; if (!GetMessageInfo(record, strErrorMessage)) { LogPrintf("%s -- Error getting message info %s\n", __func__, strErrorMessage); } else { pwalletMain->WriteLinkMessageInfo(record.SubjectID, record.vchSecretPubKeyBytes); m_LinkMessageInfo[record.SubjectID] = record.vchSecretPubKeyBytes; } //LogPrintf("%s -- accept request = %s\n", __func__, record.ToString()); } LogPrint("bdap", "%s -- Encrypted link accept from me added to map id = %s\n%s\n", __func__, linkID.ToString(), record.ToString()); m_Links[linkID] = record; } else { LogPrintf("%s -- Link accept GetBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link accept DecryptBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link accept GetDHTKey failed.\n", __func__); return false; } } else if (storage.nType == 2 && !fIsLinkFromMe && fIsLinkForMe) // Link accept for me { //LogPrintf("%s -- Version 1 link accept for me found! vchLinkPubKey = %s\n", __func__, stringFromVch(storage.vchLinkPubKey)); CKeyEd25519 sharedDHTKey; std::array<char, 32> sharedSeed; std::string strErrorMessage; if (GetLinkPrivateKey(storage.vchLinkPubKey, storage.vchSharedPubKey, sharedSeed, strErrorMessage)) { CKeyEd25519 sharedKey(sharedSeed); std::vector<unsigned char> vchData = RemoveVersionFromLinkData(storage.vchRawData, nDataVersion); std::string strMessage = ""; std::vector<unsigned char> dataDecrypted; if (DecryptBDAPData(sharedKey.GetPrivSeedBytes(), vchData, dataDecrypted, strMessage)) { std::vector<unsigned char> vchData, vchHash; CScript scriptData; scriptData << OP_RETURN << dataDecrypted; if (GetBDAPData(scriptData, vchData, vchHash)) { CLinkAccept link(dataDecrypted, storage.txHash); LogPrint("bdap", "%s -- %s\n", __func__, link.ToString()); CDomainEntry entry; if (!GetDomainEntry(link.RecipientFullObjectPath, entry)) { LogPrintf("%s -- Failed to get link recipient %s\n", __func__, stringFromVch(link.RecipientFullObjectPath)); return false; } if (!SignatureProofIsValid(entry.GetWalletAddress(), link.RequestorFQDN(), link.SignatureProof)) { LogPrintf("%s ***** Warning. Link accept found with an invalid signature proof! Link requestor = %s, recipient = %s, pubkey = %s\n", __func__, link.RequestorFQDN(), link.RecipientFQDN(), stringFromVch(storage.vchLinkPubKey)); return false; } link.nHeight = storage.nHeight; link.nExpireTime = storage.nExpireTime; uint256 linkID = GetLinkID(link); CLink record; std::map<uint256, CLink>::iterator it = m_Links.find(linkID); if (it != m_Links.end()) { record = it->second; } record.LinkID = linkID; record.fAcceptFromMe = fIsLinkFromMe; record.nLinkState = 2; record.RequestorFullObjectPath = link.RequestorFullObjectPath; record.RecipientFullObjectPath = link.RecipientFullObjectPath; record.RecipientPubKey = link.RecipientPubKey; record.SharedAcceptPubKey = link.SharedPubKey; record.nHeightAccept = link.nHeight; record.nExpireTimeAccept = link.nExpireTime; record.txHashAccept = link.txHash; record.RecipientWalletAddress = entry.WalletAddress; if (record.SharedAcceptPubKey.size() > 0 && record.SharedRequestPubKey.size() > 0) { std::string strErrorMessage = ""; if (!GetMessageInfo(record, strErrorMessage)) { LogPrintf("%s -- Error getting message info %s\n", __func__, strErrorMessage); } else { pwalletMain->WriteLinkMessageInfo(record.SubjectID, record.vchSecretPubKeyBytes); m_LinkMessageInfo[record.SubjectID] = record.vchSecretPubKeyBytes; } //LogPrintf("%s -- accept request = %s\n", __func__, record.ToString()); } LogPrint("bdap", "%s -- Encrypted link accept for me added to map id = %s\n%s\n", __func__, linkID.ToString(), record.ToString()); m_Links[linkID] = record; } else { LogPrintf("%s -- Link accept GetBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link accept DecryptBDAPData failed.\n", __func__); return false; } } else { LogPrintf("%s -- Link accept GetLinkPrivateKey failed.\n", __func__); return false; } } else { linkQueue.push(storage); } } return true; #endif // ENABLE_WALLET } std::vector<CLinkInfo> CLinkManager::GetCompletedLinkInfo(const std::vector<unsigned char>& vchFullObjectPath) { std::vector<CLinkInfo> vchLinkInfo; for(const std::pair<uint256, CLink>& link : m_Links) { if (link.second.nLinkState == 2) // completed link { if (link.second.RequestorFullObjectPath == vchFullObjectPath) { CLinkInfo linkInfo(link.second.RecipientFullObjectPath, link.second.RecipientPubKey, link.second.RequestorPubKey); vchLinkInfo.push_back(linkInfo); } else if (link.second.RecipientFullObjectPath == vchFullObjectPath) { CLinkInfo linkInfo(link.second.RequestorFullObjectPath, link.second.RequestorPubKey, link.second.RecipientPubKey); vchLinkInfo.push_back(linkInfo); } } } return vchLinkInfo; } int CLinkManager::IsMyMessage(const uint256& subjectID, const uint256& messageID, const int64_t& timestamp) { std::vector<unsigned char> vchPubKey; if (GetLinkMessageInfo(subjectID, vchPubKey)) { if (messageID != GetMessageID(vchPubKey, timestamp)) { // Incorrect message id. Might be spoofed. return -100; } return 1; } return 0; } void CLinkManager::LoadLinkMessageInfo(const uint256& subjectID, const std::vector<unsigned char>& vchPubKey) { if (m_LinkMessageInfo.count(subjectID) == 0) m_LinkMessageInfo[subjectID] = vchPubKey; } bool CLinkManager::GetLinkMessageInfo(const uint256& subjectID, std::vector<unsigned char>& vchPubKey) { std::map<uint256, std::vector<unsigned char>>::iterator it = m_LinkMessageInfo.find(subjectID); if (it != m_LinkMessageInfo.end()) { vchPubKey = it->second; return true; // found subjectID } return false; // doesn't exist } uint256 GetLinkID(const CLinkRequest& request) { std::vector<unsigned char> vchLinkPath = request.LinkPath(); return Hash(vchLinkPath.begin(), vchLinkPath.end()); } uint256 GetLinkID(const CLinkAccept& accept) { std::vector<unsigned char> vchLinkPath = accept.LinkPath(); return Hash(vchLinkPath.begin(), vchLinkPath.end()); } uint256 GetLinkID(const std::string& account1, const std::string& account2) { if (account1 != account2) { std::vector<unsigned char> vchSeparator = {':'}; std::set<std::string> sorted; sorted.insert(account1); sorted.insert(account2); std::set<std::string>::iterator it = sorted.begin(); std::vector<unsigned char> vchLink1 = vchFromString(*it); std::advance(it, 1); std::vector<unsigned char> vchLink2 = vchFromString(*it); vchLink1.insert(vchLink1.end(), vchSeparator.begin(), vchSeparator.end()); vchLink1.insert(vchLink1.end(), vchLink2.begin(), vchLink2.end()); return Hash(vchLink1.begin(), vchLink1.end()); } return uint256(); } #ifdef ENABLE_WALLET bool GetSharedPrivateSeed(const CLink& link, std::array<char, 32>& seed, std::string& strErrorMessage) { if (!pwalletMain) return false; if (link.nLinkState != 2) return false; //LogPrint("bdap", "%s -- %s\n", __func__, link.ToString()); std::array<char, 32> sharedSeed1; std::array<char, 32> sharedSeed2; CDomainEntry entry; if (pDomainEntryDB->GetDomainEntryInfo(link.RecipientFullObjectPath, entry)) { if (link.fRequestFromMe) // Requestor { // first key exchange: requestor link pubkey + recipient account pubkey std::vector<unsigned char> vchRecipientPubKey = entry.DHTPublicKey; std::vector<unsigned char> vchRequestorPubKey = link.RequestorPubKey; CKeyEd25519 reqKey; CKeyID reqKeyID(Hash160(vchRequestorPubKey.begin(), vchRequestorPubKey.end())); if (pwalletMain->GetDHTKey(reqKeyID, reqKey)) { std::vector<unsigned char> vchGetLinkSharedPubKey = GetLinkSharedPubKey(reqKey, vchRecipientPubKey); if (link.SharedRequestPubKey == vchGetLinkSharedPubKey) { sharedSeed1 = GetLinkSharedPrivateKey(reqKey, vchRecipientPubKey); } else { strErrorMessage = strprintf("Requestor SharedRequestPubKey (%s) does not match derived shared request public key (%s).", stringFromVch(link.SharedRequestPubKey), stringFromVch(vchGetLinkSharedPubKey)); return false; } } else { strErrorMessage = strprintf("Failed to get reqKey %s DHT private key.", stringFromVch(vchRequestorPubKey)); return false; } // second key exchange: recipient link pubkey + requestor account pubkey CDomainEntry entryRequestor; if (pDomainEntryDB->GetDomainEntryInfo(link.RequestorFullObjectPath, entryRequestor)) { std::vector<unsigned char> vchReqPubKey = entryRequestor.DHTPublicKey; std::vector<unsigned char> vchLinkPubKey = link.RecipientPubKey; CKeyEd25519 linkKey; CKeyID linkKeyID(Hash160(vchReqPubKey.begin(), vchReqPubKey.end())); if (pwalletMain->GetDHTKey(linkKeyID, linkKey)) { std::vector<unsigned char> vchGetLinkSharedPubKey = GetLinkSharedPubKey(linkKey, vchLinkPubKey); if (link.SharedAcceptPubKey == vchGetLinkSharedPubKey) { sharedSeed2 = GetLinkSharedPrivateKey(linkKey, vchLinkPubKey); } else { strErrorMessage = strprintf("Requestor SharedAcceptPubKey (%s) does not match derived shared link public key (%s).", stringFromVch(link.SharedAcceptPubKey), stringFromVch(vchGetLinkSharedPubKey)); return false; } } else { strErrorMessage = strprintf("Failed to get requestor link Key %s DHT private key.", stringFromVch(vchLinkPubKey)); return false; } } else { strErrorMessage = strprintf("Can not find %s link requestor record.", stringFromVch(link.RequestorFullObjectPath)); return false; } } else // Recipient { // first key exchange: requestor link pubkey + recipient account pubkey std::vector<unsigned char> vchRecipientPubKey = entry.DHTPublicKey; std::vector<unsigned char> vchRequestorPubKey = link.RequestorPubKey; CKeyEd25519 recKey; CKeyID recKeyID(Hash160(vchRecipientPubKey.begin(), vchRecipientPubKey.end())); if (pwalletMain->GetDHTKey(recKeyID, recKey)) { std::vector<unsigned char> vchGetLinkSharedPubKey = GetLinkSharedPubKey(recKey, vchRequestorPubKey); if (link.SharedRequestPubKey == vchGetLinkSharedPubKey) { sharedSeed1 = GetLinkSharedPrivateKey(recKey, vchRequestorPubKey); } else { strErrorMessage = strprintf("Recipient SharedRequestPubKey (%s) does not match derived shared request public key (%s).", stringFromVch(link.SharedRequestPubKey), stringFromVch(vchGetLinkSharedPubKey)); return false; } } else { strErrorMessage = strprintf("Failed to get recKey %s DHT private key.", stringFromVch(vchRecipientPubKey)); return false; } // second key exchange: recipient link pubkey + requestor account pubkey CDomainEntry entryRequestor; if (pDomainEntryDB->GetDomainEntryInfo(link.RequestorFullObjectPath, entryRequestor)) { std::vector<unsigned char> vchLinkPubKey = link.RecipientPubKey; std::vector<unsigned char> vchReqPubKey = entryRequestor.DHTPublicKey; CKeyEd25519 linkKey; CKeyID linkKeyID(Hash160(vchLinkPubKey.begin(), vchLinkPubKey.end())); if (pwalletMain->GetDHTKey(linkKeyID, linkKey)) { std::vector<unsigned char> vchGetLinkSharedPubKey = GetLinkSharedPubKey(linkKey, vchReqPubKey); if (link.SharedAcceptPubKey == vchGetLinkSharedPubKey) { sharedSeed2 = GetLinkSharedPrivateKey(linkKey, vchReqPubKey); } else { strErrorMessage = strprintf("Recipient SharedAcceptPubKey (%s) does not match derived shared link public key (%s).", stringFromVch(link.SharedAcceptPubKey), stringFromVch(vchGetLinkSharedPubKey)); return false; } } else { strErrorMessage = strprintf("Failed to get recipient linkKey %s DHT private key.", stringFromVch(vchLinkPubKey)); return false; } } else { strErrorMessage = strprintf("Can not find %s link requestor record.", stringFromVch(link.RequestorFullObjectPath)); return false; } } } else { strErrorMessage = strprintf("Can not find %s link recipient record.", stringFromVch(link.RecipientFullObjectPath)); return false; } CKeyEd25519 sharedKey1(sharedSeed1); CKeyEd25519 sharedKey2(sharedSeed2); // third key exchange: shared link request pubkey + shared link accept pubkey // Only the link recipient and requestor can derive this secret key. // the third shared public key is not on the blockchain and should only be known by the participants. seed = GetLinkSharedPrivateKey(sharedKey1, sharedKey2.GetPubKey()); return true; } bool GetMessageInfo(CLink& link, std::string& strErrorMessage) { std::array<char, 32> seed; if (!GetSharedPrivateSeed(link, seed, strErrorMessage)) { return false; } CKeyEd25519 key(seed); link.vchSecretPubKeyBytes = key.GetPubKeyBytes(); link.SubjectID = Hash(link.vchSecretPubKeyBytes.begin(), link.vchSecretPubKeyBytes.end()); return true; } #endif // ENABLE_WALLET uint256 GetMessageID(const std::vector<unsigned char>& vchPubKey, const int64_t& timestamp) { CScript scriptMessage; scriptMessage << vchPubKey << timestamp; return Hash(scriptMessage.begin(), scriptMessage.end()); } uint256 GetMessageID(const CKeyEd25519& key, const int64_t& timestamp) { return GetMessageID(key.GetPubKeyBytes(), timestamp); } //#endif // ENABLE_WALLET
47.265531
254
0.545271
AmirAbrams
4fafd512581b5e05309b43067b1f821888bf3612
14,177
cc
C++
pkgs/libs/mesa/src/src/glu/sgi/libnurbs/internals/ccw.cc
manggoguy/parsec-modified
d14edfb62795805c84a4280d67b50cca175b95af
[ "BSD-3-Clause" ]
64
2015-03-06T00:30:56.000Z
2022-03-24T13:26:53.000Z
pkgs/libs/mesa/src/src/glu/sgi/libnurbs/internals/ccw.cc
manggoguy/parsec-modified
d14edfb62795805c84a4280d67b50cca175b95af
[ "BSD-3-Clause" ]
12
2020-12-15T08:30:19.000Z
2022-03-13T03:54:24.000Z
pkgs/libs/mesa/src/src/glu/sgi/libnurbs/internals/ccw.cc
manggoguy/parsec-modified
d14edfb62795805c84a4280d67b50cca175b95af
[ "BSD-3-Clause" ]
40
2015-02-26T15:31:16.000Z
2022-03-03T23:23:37.000Z
/* ** License Applicability. Except to the extent portions of this file are ** made subject to an alternative license as permitted in the SGI Free ** Software License B, Version 1.1 (the "License"), the contents of this ** file are subject only to the provisions of the License. You may not use ** this file except in compliance with the License. You may obtain a copy ** of the License at Silicon Graphics, Inc., attn: Legal Services, 1600 ** Amphitheatre Parkway, Mountain View, CA 94043-1351, or at: ** ** http://oss.sgi.com/projects/FreeB ** ** Note that, as provided in the License, the Software is distributed on an ** "AS IS" basis, with ALL EXPRESS AND IMPLIED WARRANTIES AND CONDITIONS ** DISCLAIMED, INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTIES AND ** CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, FITNESS FOR A ** PARTICULAR PURPOSE, AND NON-INFRINGEMENT. ** ** Original Code. The Original Code is: OpenGL Sample Implementation, ** Version 1.2.1, released January 26, 2000, developed by Silicon Graphics, ** Inc. The Original Code is Copyright (c) 1991-2000 Silicon Graphics, Inc. ** Copyright in any portions created by third parties is as indicated ** elsewhere herein. All Rights Reserved. ** ** Additional Notice Provisions: The application programming interfaces ** established by SGI in conjunction with the Original Code are The ** OpenGL(R) Graphics System: A Specification (Version 1.2.1), released ** April 1, 1999; The OpenGL(R) Graphics System Utility Library (Version ** 1.3), released November 4, 1998; and OpenGL(R) Graphics with the X ** Window System(R) (Version 1.3), released October 19, 1998. This software ** was created using the OpenGL(R) version 1.2.1 Sample Implementation ** published by SGI, but has not been independently verified as being ** compliant with the OpenGL(R) version 1.2.1 Specification. */ /* * ccw.c++ * * $Date: 2012/03/29 17:22:18 $ $Revision: 1.1.1.1 $ * $Header: /cvs/bao-parsec/pkgs/libs/mesa/src/src/glu/sgi/libnurbs/internals/ccw.cc,v 1.1.1.1 2012/03/29 17:22:18 uid42307 Exp $ */ #include "glimports.h" #include "mystdio.h" #include "myassert.h" #include "subdivider.h" #include "types.h" #include "arc.h" #include "trimvertex.h" #include "simplemath.h" inline int Subdivider::bbox( TrimVertex *a, TrimVertex *b, TrimVertex *c, int p ) { return bbox( a->param[p], b->param[p], c->param[p], a->param[1-p], b->param[1-p], c->param[1-p] ); } int Subdivider::ccwTurn_sr( Arc_ptr j1, Arc_ptr j2 ) // dir = 1 { register TrimVertex *v1 = &j1->pwlArc->pts[j1->pwlArc->npts-1]; register TrimVertex *v1last = &j1->pwlArc->pts[0]; register TrimVertex *v2 = &j2->pwlArc->pts[0]; register TrimVertex *v2last = &j2->pwlArc->pts[j2->pwlArc->npts-1]; register TrimVertex *v1next = v1-1; register TrimVertex *v2next = v2+1; int sgn; assert( v1 != v1last ); assert( v2 != v2last ); #ifndef NDEBUG _glu_dprintf( "arc_ccw_turn, p = %d\n", 0 ); #endif // the arcs lie on the line (0 == v1->param[0]) if( v1->param[0] == v1next->param[0] && v2->param[0] == v2next->param[0] ) return 0; if( v2next->param[0] < v2->param[0] || v1next->param[0] < v1->param[0] ) ::mylongjmp( jumpbuffer, 28 ); if( v1->param[1] < v2->param[1] ) return 0; else if( v1->param[1] > v2->param[1] ) return 1; while( 1 ) { if( v1next->param[0] < v2next->param[0] ) { #ifndef NDEBUG _glu_dprintf( "case a\n" ); #endif assert( v1->param[0] <= v1next->param[0] ); assert( v2->param[0] <= v1next->param[0] ); switch( bbox( v2, v2next, v1next, 1 ) ) { case -1: return 0; case 0: sgn = ccw( v1next, v2, v2next ); if( sgn != -1 ) { return sgn; } else { #ifdef DEBUG _glu_dprintf( "decr\n" ); #endif v1 = v1next--; if( v1 == v1last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 1; } } else if( v1next->param[0] > v2next->param[0] ) { #ifndef NDEBUG _glu_dprintf( "case b\n" ); #endif assert( v1->param[0] <= v2next->param[0] ); assert( v2->param[0] <= v2next->param[0] ); switch( bbox( v1, v1next, v2next, 1 ) ) { case -1: return 1; case 0: sgn = ccw( v1next, v1, v2next ); if( sgn != -1 ) { return sgn; } else { #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif v2 = v2next++; if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 0; } } else { #ifndef NDEBUG _glu_dprintf( "case ab\n" ); #endif if( v1next->param[1] < v2next->param[1] ) return 0; else if( v1next->param[1] > v2next->param[1] ) return 1; else { #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif v2 = v2next++; if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } } } } int Subdivider::ccwTurn_sl( Arc_ptr j1, Arc_ptr j2 ) // dir = 0 { register TrimVertex *v1 = &j1->pwlArc->pts[j1->pwlArc->npts-1]; register TrimVertex *v1last = &j1->pwlArc->pts[0]; register TrimVertex *v2 = &j2->pwlArc->pts[0]; register TrimVertex *v2last = &j2->pwlArc->pts[j2->pwlArc->npts-1]; register TrimVertex *v1next = v1-1; register TrimVertex *v2next = v2+1; int sgn; assert( v1 != v1last ); assert( v2 != v2last ); #ifndef NDEBUG _glu_dprintf( "arc_ccw_turn, p = %d\n", 0 ); #endif // the arcs lie on the line (0 == v1->param[0]) if( v1->param[0] == v1next->param[0] && v2->param[0] == v2next->param[0] ) return 0; if( v2next->param[0] > v2->param[0] || v1next->param[0] > v1->param[0] ) ::mylongjmp( jumpbuffer, 28 ); if( v1->param[1] < v2->param[1] ) return 1; else if( v1->param[1] > v2->param[1] ) return 0; while( 1 ) { if( v1next->param[0] > v2next->param[0] ) { #ifndef NDEBUG _glu_dprintf( "case c\n" ); #endif assert( v1->param[0] >= v1next->param[0] ); assert( v2->param[0] >= v1next->param[0] ); switch( bbox( v2next, v2, v1next, 1 ) ) { case -1: return 1; case 0: sgn = ccw( v1next, v2, v2next ); if( sgn != -1 ) return sgn; else { v1 = v1next--; #ifdef DEBUG _glu_dprintf( "decr\n" ); #endif if( v1 == v1last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 0; } } else if( v1next->param[0] < v2next->param[0] ) { #ifndef NDEBUG _glu_dprintf( "case d\n" ); #endif assert( v1->param[0] >= v2next->param[0] ); assert( v2->param[0] >= v2next->param[0] ); switch( bbox( v1next, v1, v2next, 1 ) ) { case -1: return 0; case 0: sgn = ccw( v1next, v1, v2next ); if( sgn != -1 ) return sgn; else { v2 = v2next++; #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 1; } } else { #ifdef DEBUG _glu_dprintf( "case cd\n" ); #endif if( v1next->param[1] < v2next->param[1] ) return 1; else if( v1next->param[1] > v2next->param[1] ) return 0; else { v2 = v2next++; #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } } } } int Subdivider::ccwTurn_tr( Arc_ptr j1, Arc_ptr j2 ) // dir = 1 { register TrimVertex *v1 = &j1->pwlArc->pts[j1->pwlArc->npts-1]; register TrimVertex *v1last = &j1->pwlArc->pts[0]; register TrimVertex *v2 = &j2->pwlArc->pts[0]; register TrimVertex *v2last = &j2->pwlArc->pts[j2->pwlArc->npts-1]; register TrimVertex *v1next = v1-1; register TrimVertex *v2next = v2+1; int sgn; assert( v1 != v1last ); assert( v2 != v2last ); #ifndef NDEBUG _glu_dprintf( "arc_ccw_turn, p = %d\n", 1 ); #endif // the arcs lie on the line (1 == v1->param[1]) if( v1->param[1] == v1next->param[1] && v2->param[1] == v2next->param[1] ) return 0; if( v2next->param[1] < v2->param[1] || v1next->param[1] < v1->param[1] ) ::mylongjmp( jumpbuffer, 28 ); if( v1->param[0] < v2->param[0] ) return 1; else if( v1->param[0] > v2->param[0] ) return 0; while( 1 ) { if( v1next->param[1] < v2next->param[1] ) { #ifndef NDEBUG _glu_dprintf( "case a\n" ); #endif assert( v1->param[1] <= v1next->param[1] ); assert( v2->param[1] <= v1next->param[1] ); switch( bbox( v2, v2next, v1next, 0 ) ) { case -1: return 1; case 0: sgn = ccw( v1next, v2, v2next ); if( sgn != -1 ) { return sgn; } else { #ifdef DEBUG _glu_dprintf( "decr\n" ); #endif v1 = v1next--; if( v1 == v1last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 0; } } else if( v1next->param[1] > v2next->param[1] ) { #ifndef NDEBUG _glu_dprintf( "case b\n" ); #endif assert( v1->param[1] <= v2next->param[1] ); assert( v2->param[1] <= v2next->param[1] ); switch( bbox( v1, v1next, v2next, 0 ) ) { case -1: return 0; case 0: sgn = ccw( v1next, v1, v2next ); if( sgn != -1 ) { return sgn; } else { #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif v2 = v2next++; if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 1; } } else { #ifdef DEBUG _glu_dprintf( "case ab\n" ); #endif if( v1next->param[0] < v2next->param[0] ) return 1; else if( v1next->param[0] > v2next->param[0] ) return 0; else { #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif v2 = v2next++; if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } } } } int Subdivider::ccwTurn_tl( Arc_ptr j1, Arc_ptr j2 ) { register TrimVertex *v1 = &j1->pwlArc->pts[j1->pwlArc->npts-1]; register TrimVertex *v1last = &j1->pwlArc->pts[0]; register TrimVertex *v2 = &j2->pwlArc->pts[0]; register TrimVertex *v2last = &j2->pwlArc->pts[j2->pwlArc->npts-1]; register TrimVertex *v1next = v1-1; register TrimVertex *v2next = v2+1; int sgn; assert( v1 != v1last ); assert( v2 != v2last ); #ifndef NDEBUG _glu_dprintf( "arc_ccw_turn, p = %d\n", 1 ); #endif // the arcs lie on the line (1 == v1->param[1]) if( v1->param[1] == v1next->param[1] && v2->param[1] == v2next->param[1] ) return 0; if( v2next->param[1] > v2->param[1] || v1next->param[1] > v1->param[1] ) ::mylongjmp( jumpbuffer, 28 ); if( v1->param[0] < v2->param[0] ) return 0; else if( v1->param[0] > v2->param[0] ) return 1; while( 1 ) { if( v1next->param[1] > v2next->param[1] ) { #ifndef NDEBUG _glu_dprintf( "case c\n" ); #endif assert( v1->param[1] >= v1next->param[1] ); assert( v2->param[1] >= v1next->param[1] ); switch( bbox( v2next, v2, v1next, 0 ) ) { case -1: return 0; case 0: sgn = ccw( v1next, v2, v2next ); if( sgn != -1 ) return sgn; else { v1 = v1next--; #ifdef DEBUG _glu_dprintf( "decr\n" ); #endif if( v1 == v1last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 1; } } else if( v1next->param[1] < v2next->param[1] ) { #ifndef NDEBUG _glu_dprintf( "case d\n" ); assert( v1->param[1] >= v2next->param[1] ); assert( v2->param[1] >= v2next->param[1] ); #endif switch( bbox( v1next, v1, v2next, 0 ) ) { case -1: return 1; case 0: sgn = ccw( v1next, v1, v2next ); if( sgn != -1 ) return sgn; else { v2 = v2next++; #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } break; case 1: return 0; } } else { #ifdef DEBUG _glu_dprintf( "case cd\n" ); #endif if( v1next->param[0] < v2next->param[0] ) return 0; else if( v1next->param[0] > v2next->param[0] ) return 1; else { v2 = v2next++; #ifdef DEBUG _glu_dprintf( "incr\n" ); #endif if( v2 == v2last ) { #ifdef DEBUG _glu_dprintf( "no good results\n" ); #endif return 0; // ill-conditioned, guess answer } } } } } #ifndef NDEBUG int Subdivider::bbox( register REAL sa, register REAL sb, register REAL sc, register REAL ta, register REAL tb, register REAL tc ) #else int Subdivider::bbox( register REAL sa, register REAL sb, register REAL sc, register REAL , register REAL , register REAL ) #endif { #ifndef NDEBUG assert( tc >= ta ); assert( tc <= tb ); #endif if( sa < sb ) { if( sc <= sa ) { return -1; } else if( sb <= sc ) { return 1; } else { return 0; } } else if( sa > sb ) { if( sc >= sa ) { return 1; } else if( sb >= sc ) { return -1; } else { return 0; } } else { if( sc > sa ) { return 1; } else if( sb > sc ) { return -1; } else { return 0; } } } /*---------------------------------------------------------------------------- * ccw - determine how three points are oriented by computing their * determinant. * Return 1 if the vertices are ccw oriented, * 0 if they are cw oriented, or * -1 if the computation is ill-conditioned. *---------------------------------------------------------------------------- */ int Subdivider::ccw( TrimVertex *a, TrimVertex *b, TrimVertex *c ) { REAL d = det3( a, b, c ); if( glu_abs(d) < 0.0001 ) return -1; return (d < 0.0) ? 0 : 1; }
24.959507
129
0.578402
manggoguy
4fb1d855e1e0c068ba2c79e87167dfc3c6db8683
3,833
cc
C++
src/eckit/system/SystemInfo.cc
dvuckovic/eckit
58a918e7be8fe073f37683abf639374ab1ad3e4f
[ "Apache-2.0" ]
10
2018-03-01T22:11:10.000Z
2021-05-17T14:13:58.000Z
src/eckit/system/SystemInfo.cc
dvuckovic/eckit
58a918e7be8fe073f37683abf639374ab1ad3e4f
[ "Apache-2.0" ]
43
2018-04-11T11:13:44.000Z
2022-03-31T15:28:03.000Z
src/eckit/system/SystemInfo.cc
dvuckovic/eckit
58a918e7be8fe073f37683abf639374ab1ad3e4f
[ "Apache-2.0" ]
20
2018-03-07T21:36:50.000Z
2022-03-30T13:25:25.000Z
/* * (C) Copyright 1996- ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation nor * does it submit to any jurisdiction. */ /// @author Baudouin Raoult /// @author Tiago Quintino /// @date May 2016 #include "SystemInfo.h" #include <pwd.h> #include <sys/types.h> #include <unistd.h> #include <memory> #include "eckit/eckit.h" #include "eckit/utils/StringTools.h" #include "eckit/exception/Exceptions.h" #include "eckit/filesystem/LocalPathName.h" #include "eckit/memory/MMap.h" #include "eckit/memory/Shmget.h" #if defined(__APPLE__) && defined(__MACH__) #include "eckit/system/SystemInfoMacOSX.h" #endif #if defined(__linux__) #include "eckit/system/SystemInfoLinux.h" #endif #if defined(__FreeBSD__) #include "eckit/system/SystemInfoFreeBSD.h" #endif namespace eckit { namespace system { //---------------------------------------------------------------------------------------------------------------------- SystemInfo* makeSystemInfo(const std::string& system) { ///< @todo add a factory? #if defined(__APPLE__) && defined(__MACH__) if (StringTools::startsWith(ECKIT_OS_NAME, "Darwin")) { // double check with ecbuild name return new SystemInfoMacOSX(); } #endif #if defined(__linux__) if (StringTools::startsWith(ECKIT_OS_NAME, "Linux")) { return new SystemInfoLinux(); } #endif #if defined(__FreeBSD__) if (StringTools::startsWith(ECKIT_OS_NAME, "FreeBSD")) { return new SystemInfoFreeBSD(); } #endif NOTIMP; } static pthread_once_t once = PTHREAD_ONCE_INIT; static std::unique_ptr<SystemInfo> systemInfoPtr; static void createInstance() { ASSERT(!systemInfoPtr); systemInfoPtr.reset(makeSystemInfo(ECKIT_OS_NAME)); } //---------------------------------------------------------------------------------------------------------------------- const SystemInfo& SystemInfo::instance() { pthread_once(&once, createInstance); ASSERT(systemInfoPtr); return *systemInfoPtr; } std::string SystemInfo::userName() const { char buf[4096]; struct passwd pwbuf; struct passwd* pwbufp = nullptr; SYSCALL(::getpwuid_r(::getuid(), &pwbuf, buf, sizeof(buf), &pwbufp)); ASSERT(pwbufp); return std::string(pwbuf.pw_name); } void SystemInfo::dumpProcMemInfo(std::ostream& os, const char* prepend) const { if (prepend) os << prepend; os << " SystemInfo::dumpProcMemInfo() NOT IMPLEMENTED FOR SYSTEM " << ECKIT_OS_NAME << std::endl; } void SystemInfo::dumpSysMemInfo(std::ostream& os, const char* prepend) const { if (prepend) os << prepend; os << " SystemInfo::dumpSysMemInfo() NOT IMPLEMENTED FOR SYSTEM " << ECKIT_OS_NAME << std::endl; } SystemInfo::~SystemInfo() {} void SystemInfo::print(std::ostream& out) const { out << "SystemInfo(" << "executablePath=" << executablePath() << ")"; } //---------------------------------------------------------------------------------------------------------------------- bool SystemInfo::isBigEndian() { #if ECKIT_BIG_ENDIAN return true; #elif ECKIT_LITTLE_ENDIAN return false; #else throw SeriousBug("Unsupported endianess -- neither BIG or LITTLE detected"); #endif } bool SystemInfo::isLittleEndian() { #if ECKIT_BIG_ENDIAN return false; #elif ECKIT_LITTLE_ENDIAN return true; #else throw SeriousBug("Unsupported endianess -- neither BIG or LITTLE detected"); #endif } //---------------------------------------------------------------------------------------------------------------------- } // namespace system } // namespace eckit
26.434483
120
0.615706
dvuckovic
4fb200e44ebaa6efdcfc1efaec83b8ffcc78b838
2,997
hpp
C++
examples/record_printer/client_hello_record.hpp
pioneer19/libcornet
9eb91629d8f9a6793b28af10a3535bfba0cc24ca
[ "Apache-2.0" ]
1
2020-07-25T06:39:24.000Z
2020-07-25T06:39:24.000Z
examples/record_printer/client_hello_record.hpp
pioneer19/libcornet
9eb91629d8f9a6793b28af10a3535bfba0cc24ca
[ "Apache-2.0" ]
1
2020-07-25T05:32:10.000Z
2020-07-25T05:32:10.000Z
examples/record_printer/client_hello_record.hpp
pioneer19/libcornet
9eb91629d8f9a6793b28af10a3535bfba0cc24ca
[ "Apache-2.0" ]
1
2020-07-25T05:28:54.000Z
2020-07-25T05:28:54.000Z
/* * Copyright 2020 Alex Syrnikov <pioneer19@post.cz> * SPDX-License-Identifier: Apache-2.0 * * This file is part of libcornet (https://github.com/pioneer19/libcornet). */ const uint8_t tls13_client_hello_record[] = { 0x16, 0x03, 0x01, 0x00, 0xea, // TlsPlaintext handshake, legacy version, length 0x01, 0x00, 0x00, 0xe6, // ClientHello(1), length(24 bit) 0x03, 0x03, // ProtocolVersion 0xe9, 0x53, 0xc0, 0xde, 0x38, 0x8c, 0x75, 0x82, // Random 32 bytes 0xbc, 0x49, 0xd5, 0xb2, 0xec, 0x46, 0x7c, 0x99, 0x21, 0xc5, 0xdb, 0x64, 0x3c, 0x66, 0x07, 0xa4, 0x18, 0x0e, 0x4d, 0x2a, 0x1a, 0x23, 0x2b, 0x08, 0x20, // legacy session vector length 0x99, 0x57, 0x6c, 0xce, 0x6e, 0x83, 0xc0, 0x69, 0xdc, 0xd9, 0x98, 0x43, 0x07, 0xe2, 0xbe, 0xfc, 0xb4, 0x38, 0x86, 0x33, 0x00, 0xf5, 0x58, 0x5f, 0x2b, 0x95, 0xce, 0x6f, 0xfe, 0x42, 0xf5, 0x26, 0x00, 0x08, // CipherSuites vector length 0x13, 0x02, 0x13, 0x03, 0x13, 0x01, 0x00, 0xff, 0x01, 0x00, // legacy_compression_methods<1..2^8-1> // === Extensions === 0x00, 0x95, // Extension extensions<8..2^16-1>; 0x00, 0x00, 0x00, 0x14, // ExtensionType(server_name), data_length(2 bytes) 0x00, 0x12, // ServerNameList vector length 0x00, 0x00, 0x0f, // ServerName type(1byte) and length 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x70, 0x6c, 0x69, 0x6e, 0x65, 0x2e, 0x65, 0x75, 0x00, 0x0b, 0x00, 0x04, // ExtensionType(unknown), data_length(2 bytes) 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, 0x00, 0x0c, // supported_groups(10) 0x00, 0x0a, 0x00, 0x1d, 0x00, 0x17, 0x00, 0x1e, 0x00, 0x19, 0x00, 0x18, 0x00, 0x23, 0x00, 0x00, // unknown extension 0x00, 0x16, 0x00, 0x00, // unknown extension 0x00, 0x17, 0x00, 0x00, // unknown extension 0x00, 0x0d, 0x00, 0x1e, // signature_algorithms(13) 0x00, 0x1c, 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x08, 0x07, 0x08, 0x08, 0x08, 0x09, 0x08, 0x0a, 0x08, 0x0b, 0x08, 0x04, 0x08, 0x05, 0x08, 0x06, 0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x00, 0x2b, 0x00, 0x03, // supported_versions(43), 0x02, 0x03, 0x04, // length, TLS 1.3 (0x03, 0x04) 0x00, 0x2d, 0x00, 0x02, // psk_key_exchange_modes(45) 0x01, 0x01, // length 1, mode PSK_DHE_KE = 1 0x00, 0x33, 0x00, 0x26, // key_share(51) 0x00, 0x24, 0x00, 0x1d, 0x00, 0x20, 0xcd, 0xbe, 0xc4, 0xf3, 0x5a, 0x48, 0x28, 0x6e, 0x59, 0xb0, 0xe7, 0xeb, 0x2e, 0xe5, 0xa0, 0x51, 0x05, 0x21, 0x45, 0x7e, 0xdf, 0xa1, 0x12, 0x69, 0x23, 0x42, 0x2e, 0x92, 0x38, 0xcd, 0xd5, 0x0e };
48.33871
91
0.54688
pioneer19
4fb215be17b7a517f5c703eef67e7a23eb9a1c2c
73,408
cpp
C++
src/mongo/s/commands_admin.cpp
nleite/mongo
1a1b6b0aaeefbae06942867e4dcf55d00d42afe0
[ "Apache-2.0" ]
null
null
null
src/mongo/s/commands_admin.cpp
nleite/mongo
1a1b6b0aaeefbae06942867e4dcf55d00d42afe0
[ "Apache-2.0" ]
null
null
null
src/mongo/s/commands_admin.cpp
nleite/mongo
1a1b6b0aaeefbae06942867e4dcf55d00d42afe0
[ "Apache-2.0" ]
null
null
null
/** * Copyright (C) 2008 10gen Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License, version 3, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "pch.h" #include "mongo/db/commands.h" #include "mongo/client/connpool.h" #include "mongo/client/dbclientcursor.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/dbmessage.h" #include "mongo/db/field_parser.h" #include "mongo/db/hasher.h" #include "mongo/db/index_names.h" #include "mongo/db/stats/counters.h" #include "mongo/s/chunk.h" #include "mongo/s/client_info.h" #include "mongo/s/config.h" #include "mongo/s/grid.h" #include "mongo/s/strategy.h" #include "mongo/s/type_chunk.h" #include "mongo/s/type_database.h" #include "mongo/s/type_shard.h" #include "mongo/s/writeback_listener.h" #include "mongo/util/net/listen.h" #include "mongo/util/net/message.h" #include "mongo/util/processinfo.h" #include "mongo/util/ramlog.h" #include "mongo/util/stringutils.h" #include "mongo/util/timer.h" #include "mongo/util/version.h" namespace mongo { namespace dbgrid_cmds { class GridAdminCmd : public Command { public: GridAdminCmd( const char * n ) : Command( n , false, tolowerString(n).c_str() ) { } virtual bool slaveOk() const { return true; } virtual bool adminOnly() const { return true; } // all grid commands are designed not to lock virtual LockType locktype() const { return NONE; } bool okForConfigChanges( string& errmsg ) { string e; if ( ! configServer.allUp(e) ) { errmsg = str::stream() << "not all config servers are up: " << e; return false; } return true; } }; // --------------- misc commands ---------------------- class NetStatCmd : public GridAdminCmd { public: NetStatCmd() : GridAdminCmd("netstat") { } virtual void help( stringstream& help ) const { help << " shows status/reachability of servers in the cluster"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::netstat); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result.append("configserver", configServer.getPrimary().getConnString() ); result.append("isdbgrid", 1); return true; } } netstat; class FlushRouterConfigCmd : public GridAdminCmd { public: FlushRouterConfigCmd() : GridAdminCmd("flushRouterConfig") { } virtual void help( stringstream& help ) const { help << "flush all router config"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::flushRouterConfig); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { grid.flushConfig(); result.appendBool( "flushed" , true ); return true; } } flushRouterConfigCmd; class FsyncCommand : public GridAdminCmd { public: FsyncCommand() : GridAdminCmd( "fsync" ) {} virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::fsync); out->push_back(Privilege(AuthorizationManager::SERVER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { if ( cmdObj["lock"].trueValue() ) { errmsg = "can't do lock through mongos"; return false; } BSONObjBuilder sub; bool ok = true; int numFiles = 0; vector<Shard> shards; Shard::getAllShards( shards ); for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ) { Shard s = *i; BSONObj x = s.runCommand( "admin" , "fsync" ); sub.append( s.getName() , x ); if ( ! x["ok"].trueValue() ) { ok = false; errmsg = x["errmsg"].String(); } numFiles += x["numFiles"].numberInt(); } result.append( "numFiles" , numFiles ); result.append( "all" , sub.obj() ); return ok; } } fsyncCmd; // ------------ database level commands ------------- class MoveDatabasePrimaryCommand : public GridAdminCmd { public: MoveDatabasePrimaryCommand() : GridAdminCmd("movePrimary") { } virtual void help( stringstream& help ) const { help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::movePrimary); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string dbname = cmdObj.firstElement().valuestrsafe(); if ( dbname.size() == 0 ) { errmsg = "no db"; return false; } if ( dbname == "config" ) { errmsg = "can't move config db"; return false; } // Flush the configuration // This can't be perfect, but it's better than nothing. grid.flushConfig(); DBConfigPtr config = grid.getDBConfig( dbname , false ); if ( ! config ) { errmsg = "can't find db!"; return false; } string to = cmdObj["to"].valuestrsafe(); if ( ! to.size() ) { errmsg = "you have to specify where you want to move it"; return false; } Shard s = Shard::make( to ); if ( config->getPrimary() == s.getConnString() ) { errmsg = "it is already the primary"; return false; } if ( ! grid.knowAboutShard( s.getConnString() ) ) { errmsg = "that server isn't known to me"; return false; } log() << "Moving " << dbname << " primary from: " << config->getPrimary().toString() << " to: " << s.toString() << endl; // Locking enabled now... DistributedLock lockSetup( configServer.getConnectionString(), dbname + "-movePrimary" ); dist_lock_try dlk; // Distributed locking added. try{ dlk = dist_lock_try( &lockSetup , string("Moving primary shard of ") + dbname ); } catch( LockException& e ){ errmsg = str::stream() << "error locking distributed lock to move primary shard of " << dbname << causedBy( e ); warning() << errmsg << endl; return false; } if ( ! dlk.got() ) { errmsg = (string)"metadata lock is already taken for moving " + dbname; return false; } set<string> shardedColls; config->getAllShardedCollections( shardedColls ); BSONArrayBuilder barr; barr.append( shardedColls ); ScopedDbConnection toconn(s.getConnString()); // TODO ERH - we need a clone command which replays operations from clone start to now // can just use local.oplog.$main BSONObj cloneRes; bool worked = toconn->runCommand( dbname.c_str(), BSON( "clone" << config->getPrimary().getConnString() << "collsToIgnore" << barr.arr() ), cloneRes ); toconn.done(); if ( ! worked ) { log() << "clone failed" << cloneRes << endl; errmsg = "clone failed"; return false; } string oldPrimary = config->getPrimary().getConnString(); ScopedDbConnection fromconn(config->getPrimary().getConnString()); config->setPrimary( s.getConnString() ); if( shardedColls.empty() ){ // TODO: Collections can be created in the meantime, and we should handle in the future. log() << "movePrimary dropping database on " << oldPrimary << ", no sharded collections in " << dbname << endl; try { fromconn->dropDatabase( dbname.c_str() ); } catch( DBException& e ){ e.addContext( str::stream() << "movePrimary could not drop the database " << dbname << " on " << oldPrimary ); throw; } } else if( cloneRes["clonedColls"].type() != Array ){ // Legacy behavior from old mongod with sharded collections, *do not* delete database, // but inform user they can drop manually (or ignore). warning() << "movePrimary legacy mongod behavior detected, user must manually remove unsharded collections in " << "database " << dbname << " on " << oldPrimary << endl; } else { // We moved some unsharded collections, but not all BSONObjIterator it( cloneRes["clonedColls"].Obj() ); while( it.more() ){ BSONElement el = it.next(); if( el.type() == String ){ try { log() << "movePrimary dropping cloned collection " << el.String() << " on " << oldPrimary << endl; fromconn->dropCollection( el.String() ); } catch( DBException& e ){ e.addContext( str::stream() << "movePrimary could not drop the cloned collection " << el.String() << " on " << oldPrimary ); throw; } } } } fromconn.done(); result << "primary " << s.toString(); return true; } } movePrimary; class EnableShardingCmd : public GridAdminCmd { public: EnableShardingCmd() : GridAdminCmd( "enableSharding" ) {} virtual void help( stringstream& help ) const { help << "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n" << " { enablesharding : \"<dbname>\" }\n"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::enableSharding); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string dbname = cmdObj.firstElement().valuestrsafe(); if ( dbname.size() == 0 ) { errmsg = "no db"; return false; } if ( dbname == "admin" ) { errmsg = "can't shard the admin db"; return false; } if ( dbname == "local" ) { errmsg = "can't shard the local db"; return false; } DBConfigPtr config = grid.getDBConfig( dbname ); if ( config->isShardingEnabled() ) { errmsg = "already enabled"; return false; } if ( ! okForConfigChanges( errmsg ) ) return false; log() << "enabling sharding on: " << dbname << endl; config->enableSharding(); return true; } } enableShardingCmd; // ------------ collection level commands ------------- class ShardCollectionCmd : public GridAdminCmd { public: ShardCollectionCmd() : GridAdminCmd( "shardCollection" ) {} virtual void help( stringstream& help ) const { help << "Shard a collection. Requires key. Optional unique. Sharding must already be enabled for the database.\n" << " { enablesharding : \"<dbname>\" }\n"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::shardCollection); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { const string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "no ns"; return false; } const NamespaceString nsStr( ns ); if ( !nsStr.isValid() ){ errmsg = str::stream() << "bad ns[" << ns << "]"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isShardingEnabled() ) { errmsg = "sharding not enabled for db"; return false; } if ( config->isSharded( ns ) ) { errmsg = "already sharded"; return false; } BSONObj proposedKey = cmdObj.getObjectField( "key" ); if ( proposedKey.isEmpty() ) { errmsg = "no shard key"; return false; } bool isHashedShardKey = // br ( IndexNames::findPluginName( proposedKey ) == IndexNames::HASHED ); // Currently the allowable shard keys are either // i) a hashed single field, e.g. { a : "hashed" }, or // ii) a compound list of ascending fields, e.g. { a : 1 , b : 1 } if ( isHashedShardKey ) { // case i) if ( proposedKey.nFields() > 1 ) { errmsg = "hashed shard keys currently only support single field keys"; return false; } if ( cmdObj["unique"].trueValue() ) { // it's possible to ensure uniqueness on the hashed field by // declaring an additional (non-hashed) unique index on the field, // but the hashed shard key itself should not be declared unique errmsg = "hashed shard keys cannot be declared unique."; return false; } } else { // case ii) BSONForEach(e, proposedKey) { if (!e.isNumber() || e.number() != 1.0) { errmsg = str::stream() << "Unsupported shard key pattern. Pattern must" << " either be a single hashed field, or a list" << " of ascending fields."; return false; } } } if ( ns.find( ".system." ) != string::npos ) { errmsg = "can't shard system namespaces"; return false; } if ( ! okForConfigChanges( errmsg ) ) return false; //the rest of the checks require a connection to the primary db ScopedDbConnection conn(config->getPrimary().getConnString()); //check that collection is not capped BSONObj res = conn->findOne( config->getName() + ".system.namespaces", BSON( "name" << ns ) ); if ( res["options"].type() == Object && res["options"].embeddedObject()["capped"].trueValue() ) { errmsg = "can't shard capped collection"; conn.done(); return false; } // The proposed shard key must be validated against the set of existing indexes. // In particular, we must ensure the following constraints // // 1. All existing unique indexes, except those which start with the _id index, // must contain the proposed key as a prefix (uniqueness of the _id index is // ensured by the _id generation process or guaranteed by the user). // // 2. If the collection is not empty, there must exist at least one index that // is "useful" for the proposed key. A "useful" index is defined as follows // Useful Index: // i. contains proposedKey as a prefix // ii. is not sparse // iii. contains no null values // iv. is not multikey (maybe lift this restriction later) // v. if a hashed index, has default seed (lift this restriction later) // // 3. If the proposed shard key is specified as unique, there must exist a useful, // unique index exactly equal to the proposedKey (not just a prefix). // // After validating these constraint: // // 4. If there is no useful index, and the collection is non-empty, we // must fail. // // 5. If the collection is empty, and it's still possible to create an index // on the proposed key, we go ahead and do so. string indexNS = config->getName() + ".system.indexes"; // 1. Verify consistency with existing unique indexes BSONObj uniqueQuery = BSON( "ns" << ns << "unique" << true ); auto_ptr<DBClientCursor> uniqueQueryResult = conn->query( indexNS , uniqueQuery ); ShardKeyPattern proposedShardKey( proposedKey ); while ( uniqueQueryResult->more() ) { BSONObj idx = uniqueQueryResult->next(); BSONObj currentKey = idx["key"].embeddedObject(); if( ! proposedShardKey.isUniqueIndexCompatible( currentKey ) ) { errmsg = str::stream() << "can't shard collection '" << ns << "' " << "with unique index on " << currentKey << " " << "and proposed shard key " << proposedKey << ". " << "Uniqueness can't be maintained unless " << "shard key is a prefix"; conn.done(); return false; } } // 2. Check for a useful index bool hasUsefulIndexForKey = false; BSONObj allQuery = BSON( "ns" << ns ); auto_ptr<DBClientCursor> allQueryResult = conn->query( indexNS , allQuery ); BSONArrayBuilder allIndexes; while ( allQueryResult->more() ) { BSONObj idx = allQueryResult->next(); allIndexes.append( idx ); BSONObj currentKey = idx["key"].embeddedObject(); // Check 2.i. and 2.ii. if ( ! idx["sparse"].trueValue() && proposedKey.isPrefixOf( currentKey ) ) { // We can't currently use hashed indexes with a non-default hash seed // Check v. // Note that this means that, for sharding, we only support one hashed index // per field per collection. if ( isHashedShardKey && !idx["seed"].eoo() && idx["seed"].numberInt() != BSONElementHasher::DEFAULT_HASH_SEED ) { errmsg = str::stream() << "can't shard collection " << ns << " with hashed shard key " << proposedKey << " because the hashed index uses a non-default seed of " << idx["seed"].numberInt(); conn.done(); return false; } hasUsefulIndexForKey = true; } } // 3. If proposed key is required to be unique, additionally check for exact match. bool careAboutUnique = cmdObj["unique"].trueValue(); if ( hasUsefulIndexForKey && careAboutUnique ) { BSONObj eqQuery = BSON( "ns" << ns << "key" << proposedKey ); BSONObj eqQueryResult = conn->findOne( indexNS, eqQuery ); if ( eqQueryResult.isEmpty() ) { hasUsefulIndexForKey = false; // if no exact match, index not useful, // but still possible to create one later } else { bool isExplicitlyUnique = eqQueryResult["unique"].trueValue(); BSONObj currKey = eqQueryResult["key"].embeddedObject(); bool isCurrentID = str::equals( currKey.firstElementFieldName() , "_id" ); if ( ! isExplicitlyUnique && ! isCurrentID ) { errmsg = str::stream() << "can't shard collection " << ns << ", " << proposedKey << " index not unique, " << "and unique index explicitly specified"; conn.done(); return false; } } } if ( hasUsefulIndexForKey ) { // Check 2.iii and 2.iv. Make sure no null entries in the sharding index // and that there is a useful, non-multikey index available BSONObjBuilder cmd; cmd.append( "checkShardingIndex" , ns ); cmd.append( "keyPattern" , proposedKey ); BSONObj cmdObj = cmd.obj(); if ( ! conn.get()->runCommand( "admin" , cmdObj , res ) ) { errmsg = res["errmsg"].str(); conn.done(); return false; } } // 4. if no useful index, and collection is non-empty, fail else if ( conn->count( ns ) != 0 ) { errmsg = str::stream() << "please create an index that starts with the " << "shard key before sharding."; result.append( "proposedKey" , proposedKey ); result.appendArray( "curIndexes" , allIndexes.done() ); conn.done(); return false; } // 5. If no useful index exists, and collection empty, create one on proposedKey. // Only need to call ensureIndex on primary shard, since indexes get copied to // receiving shard whenever a migrate occurs. else { // call ensureIndex with cache=false, see SERVER-1691 bool ensureSuccess = conn->ensureIndex( ns , proposedKey , careAboutUnique , "" , false ); if ( ! ensureSuccess ) { errmsg = "ensureIndex failed to create index on primary shard"; conn.done(); return false; } } bool isEmpty = ( conn->count( ns ) == 0 ); conn.done(); // Pre-splitting: // For new collections which use hashed shard keys, we can can pre-split the // range of possible hashes into a large number of chunks, and distribute them // evenly at creation time. Until we design a better initialization scheme, the // safest way to pre-split is to // 1. make one big chunk for each shard // 2. move them one at a time // 3. split the big chunks to achieve the desired total number of initial chunks vector<Shard> shards; Shard primary = config->getPrimary(); primary.getAllShards( shards ); int numShards = shards.size(); vector<BSONObj> initSplits; // there will be at most numShards-1 of these vector<BSONObj> allSplits; // all of the initial desired split points // only pre-split when using a hashed shard key and collection is still empty if ( isHashedShardKey && isEmpty ){ int numChunks = cmdObj["numInitialChunks"].numberInt(); if ( numChunks <= 0 ) numChunks = 2*numShards; // default number of initial chunks // hashes are signed, 64-bit ints. So we divide the range (-MIN long, +MAX long) // into intervals of size (2^64/numChunks) and create split points at the // boundaries. The logic below ensures that initial chunks are all // symmetric around 0. long long intervalSize = ( std::numeric_limits<long long>::max()/ numChunks )*2; long long current = 0; if( numChunks % 2 == 0 ){ allSplits.push_back( BSON(proposedKey.firstElementFieldName() << current) ); current += intervalSize; } else { current += intervalSize/2; } for( int i=0; i < (numChunks-1)/2; i++ ){ allSplits.push_back( BSON(proposedKey.firstElementFieldName() << current) ); allSplits.push_back( BSON(proposedKey.firstElementFieldName() << -current)); current += intervalSize; } sort( allSplits.begin() , allSplits.end() ); // 1. the initial splits define the "big chunks" that we will subdivide later int lastIndex = -1; for ( int i = 1; i < numShards; i++ ){ if ( lastIndex < (i*numChunks)/numShards - 1 ){ lastIndex = (i*numChunks)/numShards - 1; initSplits.push_back( allSplits[ lastIndex ] ); } } } tlog() << "CMD: shardcollection: " << cmdObj << endl; config->shardCollection( ns , proposedKey , careAboutUnique , &initSplits ); result << "collectionsharded" << ns; // only initially move chunks when using a hashed shard key if (isHashedShardKey) { // Reload the new config info. If we created more than one initial chunk, then // we need to move them around to balance. ChunkManagerPtr chunkManager = config->getChunkManager( ns , true ); ChunkMap chunkMap = chunkManager->getChunkMap(); // 2. Move and commit each "big chunk" to a different shard. int i = 0; for ( ChunkMap::const_iterator c = chunkMap.begin(); c != chunkMap.end(); ++c,++i ){ Shard to = shards[ i % numShards ]; ChunkPtr chunk = c->second; // can't move chunk to shard it's already on if ( to == chunk->getShard() ) continue; BSONObj moveResult; if (!chunk->moveAndCommit(to, Chunk::MaxChunkSize, false, true, moveResult)) { warning() << "Couldn't move chunk " << chunk << " to shard " << to << " while sharding collection " << ns << ". Reason: " << moveResult << endl; } } if (allSplits.empty()) { return true; } // Reload the config info, after all the migrations chunkManager = config->getChunkManager( ns , true ); // 3. Subdivide the big chunks by splitting at each of the points in "allSplits" // that we haven't already split by. ChunkPtr currentChunk = chunkManager->findIntersectingChunk( allSplits[0] ); vector<BSONObj> subSplits; for ( unsigned i = 0 ; i <= allSplits.size(); i++){ if ( i == allSplits.size() || ! currentChunk->containsPoint( allSplits[i] ) ) { if ( ! subSplits.empty() ){ BSONObj splitResult; if ( ! currentChunk->multiSplit( subSplits , splitResult ) ){ warning() << "Couldn't split chunk " << currentChunk << " while sharding collection " << ns << ". Reason: " << splitResult << endl; } subSplits.clear(); } if ( i < allSplits.size() ) currentChunk = chunkManager->findIntersectingChunk( allSplits[i] ); } else { subSplits.push_back( allSplits[i] ); } } // Proactively refresh the chunk manager. Not really necessary, but this way it's // immediately up-to-date the next time it's used. config->getChunkManager( ns , true ); } return true; } } shardCollectionCmd; class GetShardVersion : public GridAdminCmd { public: GetShardVersion() : GridAdminCmd( "getShardVersion" ) {} virtual void help( stringstream& help ) const { help << " example: { getShardVersion : 'alleyinsider.foo' } "; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::getShardVersion); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to specify fully namespace"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { errmsg = "ns not sharded."; return false; } ChunkManagerPtr cm = config->getChunkManagerIfExists( ns ); if ( ! cm ) { errmsg = "no chunk manager?"; return false; } cm->_printChunks(); cm->getVersion().addToBSON( result ); return 1; } } getShardVersionCmd; class SplitCollectionCmd : public GridAdminCmd { public: SplitCollectionCmd() : GridAdminCmd( "split" ) {} virtual void help( stringstream& help ) const { help << " example: - split the shard that contains give key \n" << " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n" << " example: - split the shard that contains the key with this as the middle \n" << " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n" << " NOTE: this does not move move the chunks, it merely creates a logical separation \n" ; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::split); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { if ( ! okForConfigChanges( errmsg ) ) return false; ShardConnection::sync(); string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "no ns"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { config->reload(); if ( ! config->isSharded( ns ) ) { errmsg = "ns not sharded. have to shard before can split"; return false; } } const BSONField<BSONObj> findField("find", BSONObj()); const BSONField<BSONArray> boundsField("bounds", BSONArray()); const BSONField<BSONObj> middleField("middle", BSONObj()); BSONObj find; if (FieldParser::extract(cmdObj, findField, &find, &errmsg) == FieldParser::FIELD_INVALID) { return false; } BSONArray bounds; if (FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg) == FieldParser::FIELD_INVALID) { return false; } if (!bounds.isEmpty()) { if (!bounds.hasField("0")) { errmsg = "lower bound not specified"; return false; } if (!bounds.hasField("1")) { errmsg = "upper bound not specified"; return false; } } if (!find.isEmpty() && !bounds.isEmpty()) { errmsg = "cannot specify bounds and find at the same time"; return false; } BSONObj middle; if (FieldParser::extract(cmdObj, middleField, &middle, &errmsg) == FieldParser::FIELD_INVALID) { return false; } if (find.isEmpty() && bounds.isEmpty() && middle.isEmpty()) { errmsg = "need to specify find/bounds or middle"; return false; } if (!find.isEmpty() && !middle.isEmpty()) { errmsg = "cannot specify find and middle together"; return false; } if (!bounds.isEmpty() && !middle.isEmpty()) { errmsg = "cannot specify bounds and middle together"; return false; } ChunkManagerPtr info = config->getChunkManager( ns ); ChunkPtr chunk; if (!find.isEmpty()) { chunk = info->findChunkForDoc(find); } else if (!bounds.isEmpty()) { chunk = info->findIntersectingChunk(bounds[0].Obj()); verify(chunk.get()); if (chunk->getMin() != bounds[0].Obj() || chunk->getMax() != bounds[1].Obj()) { errmsg = "no chunk found from the given upper and lower bounds"; return false; } } else { // middle chunk = info->findIntersectingChunk(middle); } verify(chunk.get()); log() << "splitting: " << ns << " shard: " << chunk << endl; BSONObj res; bool worked; if ( middle.isEmpty() ) { BSONObj ret = chunk->singleSplit( true /* force a split even if not enough data */ , res ); worked = !ret.isEmpty(); } else { // sanity check if the key provided is a valid split point if ( ( middle == chunk->getMin() ) || ( middle == chunk->getMax() ) ) { errmsg = "cannot split on initial or final chunk's key"; return false; } if (!fieldsMatch(middle, info->getShardKey().key())){ errmsg = "middle has different fields (or different order) than shard key"; return false; } vector<BSONObj> splitPoints; splitPoints.push_back( middle ); worked = chunk->multiSplit( splitPoints , res ); } if ( !worked ) { errmsg = "split failed"; result.append( "cause" , res ); return false; } config->getChunkManager( ns , true ); return true; } } splitCollectionCmd; class MoveChunkCmd : public GridAdminCmd { public: MoveChunkCmd() : GridAdminCmd( "moveChunk" ) {} virtual void help( stringstream& help ) const { help << "Example: move chunk that contains the doc {num : 7} to shard001\n" << " { movechunk : 'test.foo' , find : { num : 7 } , to : 'shard0001' }\n" << "Example: move chunk with lower bound 0 and upper bound 10 to shard001\n" << " { movechunk : 'test.foo' , bounds : [ { num : 0 } , { num : 10 } ] " << " , to : 'shard001' }\n"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::moveChunk); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { if ( ! okForConfigChanges( errmsg ) ) return false; ShardConnection::sync(); Timer t; string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "no ns"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { config->reload(); if ( ! config->isSharded( ns ) ) { errmsg = "ns not sharded. have to shard before we can move a chunk"; return false; } } string toString = cmdObj["to"].valuestrsafe(); if ( ! toString.size() ) { errmsg = "you have to specify where you want to move the chunk"; return false; } Shard to = Shard::make( toString ); // so far, chunk size serves test purposes; it may or may not become a supported parameter long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong(); if ( maxChunkSizeBytes == 0 ) { maxChunkSizeBytes = Chunk::MaxChunkSize; } BSONObj find = cmdObj.getObjectField( "find" ); BSONObj bounds = cmdObj.getObjectField( "bounds" ); // check that only one of the two chunk specification methods is used if ( find.isEmpty() == bounds.isEmpty() ) { errmsg = "need to specify either a find query, or both lower and upper bounds."; return false; } ChunkManagerPtr info = config->getChunkManager( ns ); ChunkPtr c = find.isEmpty() ? info->findIntersectingChunk( bounds[0].Obj() ) : info->findChunkForDoc( find ); if ( ! bounds.isEmpty() && ( c->getMin() != bounds[0].Obj() || c->getMax() != bounds[1].Obj() ) ) { errmsg = "no chunk found with those upper and lower bounds"; return false; } const Shard& from = c->getShard(); if ( from == to ) { errmsg = "that chunk is already on that shard"; return false; } tlog() << "CMD: movechunk: " << cmdObj << endl; BSONObj res; if (!c->moveAndCommit(to, maxChunkSizeBytes, cmdObj["_secondaryThrottle"].trueValue(), cmdObj["_waitForDelete"].trueValue(), res)) { errmsg = "move failed"; result.append( "cause" , res ); return false; } // preemptively reload the config to get new version info config->getChunkManager( ns , true ); result.append( "millis" , t.millis() ); return true; } } moveChunkCmd; // ------------ server level commands ------------- class ListShardsCmd : public GridAdminCmd { public: ListShardsCmd() : GridAdminCmd("listShards") { } virtual void help( stringstream& help ) const { help << "list all shards of the system"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::listShards); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { ScopedDbConnection conn(configServer.getPrimary().getConnString(), 30); vector<BSONObj> all; auto_ptr<DBClientCursor> cursor = conn->query( ShardType::ConfigNS , BSONObj() ); while ( cursor->more() ) { BSONObj o = cursor->next(); all.push_back( o ); } result.append("shards" , all ); conn.done(); return true; } } listShardsCmd; /* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage partition. */ class AddShard : public GridAdminCmd { public: AddShard() : GridAdminCmd("addShard") { } virtual void help( stringstream& help ) const { help << "add a new shard to the system"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::addShard); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { errmsg.clear(); // get replica set component hosts ConnectionString servers = ConnectionString::parse( cmdObj.firstElement().valuestrsafe() , errmsg ); if ( ! errmsg.empty() ) { log() << "addshard request " << cmdObj << " failed:" << errmsg << endl; return false; } // using localhost in server names implies every other process must use localhost addresses too vector<HostAndPort> serverAddrs = servers.getServers(); for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ) { if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ) { errmsg = str::stream() << "can't use localhost as a shard since all shards need to communicate. " << "either use all shards and configdbs in localhost or all in actual IPs " << " host: " << serverAddrs[i].toString() << " isLocalHost:" << serverAddrs[i].isLocalHost(); log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl; return false; } // it's fine if mongods of a set all use default port if ( ! serverAddrs[i].hasPort() ) { serverAddrs[i].setPort( CmdLine::ShardServerPort ); } } // name is optional; addShard will provide one if needed string name = ""; if ( cmdObj["name"].type() == String ) { name = cmdObj["name"].valuestrsafe(); } // maxSize is the space usage cap in a shard in MBs long long maxSize = 0; if ( cmdObj[ ShardType::maxSize() ].isNumber() ) { maxSize = cmdObj[ ShardType::maxSize() ].numberLong(); } if ( ! grid.addShard( &name , servers , maxSize , errmsg ) ) { log() << "addshard request " << cmdObj << " failed: " << errmsg << endl; return false; } result << "shardAdded" << name; return true; } } addServer; /* See usage docs at: * http://dochub.mongodb.org/core/configuringsharding#ConfiguringSharding-Removingashard */ class RemoveShardCmd : public GridAdminCmd { public: RemoveShardCmd() : GridAdminCmd("removeShard") { } virtual void help( stringstream& help ) const { help << "remove a shard to the system."; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::removeShard); out->push_back(Privilege(AuthorizationManager::CLUSTER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string target = cmdObj.firstElement().valuestrsafe(); Shard s = Shard::make( target ); if ( ! grid.knowAboutShard( s.getConnString() ) ) { errmsg = "unknown shard"; return false; } ScopedDbConnection conn(configServer.getPrimary().getConnString(), 30); if (conn->count(ShardType::ConfigNS, BSON(ShardType::name() << NE << s.getName() << ShardType::draining(true)))){ conn.done(); errmsg = "Can't have more than one draining shard at a time"; return false; } if (conn->count(ShardType::ConfigNS, BSON(ShardType::name() << NE << s.getName())) == 0){ conn.done(); errmsg = "Can't remove last shard"; return false; } BSONObj primaryDoc = BSON(DatabaseType::name.ne("local") << DatabaseType::primary(s.getName())); BSONObj dbInfo; // appended at end of result on success { boost::scoped_ptr<DBClientCursor> cursor (conn->query(DatabaseType::ConfigNS, primaryDoc)); if (cursor->more()) { // skip block and allocations if empty BSONObjBuilder dbInfoBuilder; dbInfoBuilder.append("note", "you need to drop or movePrimary these databases"); BSONArrayBuilder dbs(dbInfoBuilder.subarrayStart("dbsToMove")); while (cursor->more()){ BSONObj db = cursor->nextSafe(); dbs.append(db[DatabaseType::name()]); } dbs.doneFast(); dbInfo = dbInfoBuilder.obj(); } } // If the server is not yet draining chunks, put it in draining mode. BSONObj searchDoc = BSON(ShardType::name() << s.getName()); BSONObj drainingDoc = BSON(ShardType::name() << s.getName() << ShardType::draining(true)); BSONObj shardDoc = conn->findOne(ShardType::ConfigNS, drainingDoc); if ( shardDoc.isEmpty() ) { // TODO prevent move chunks to this shard. log() << "going to start draining shard: " << s.getName() << endl; BSONObj newStatus = BSON( "$set" << BSON( ShardType::draining(true) ) ); conn->update( ShardType::ConfigNS , searchDoc , newStatus, false /* do no upsert */); errmsg = conn->getLastError(); if ( errmsg.size() ) { log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl; return false; } BSONObj primaryLocalDoc = BSON(DatabaseType::name("local") << DatabaseType::primary(s.getName())); PRINT(primaryLocalDoc); if (conn->count(DatabaseType::ConfigNS, primaryLocalDoc)) { log() << "This shard is listed as primary of local db. Removing entry." << endl; conn->remove(DatabaseType::ConfigNS, BSON(DatabaseType::name("local"))); errmsg = conn->getLastError(); if ( errmsg.size() ) { log() << "error removing local db: " << errmsg << endl; return false; } } Shard::reloadShardInfo(); result.append( "msg" , "draining started successfully" ); result.append( "state" , "started" ); result.append( "shard" , s.getName() ); result.appendElements(dbInfo); conn.done(); return true; } // If the server has been completely drained, remove it from the ConfigDB. // Check not only for chunks but also databases. BSONObj shardIDDoc = BSON(ChunkType::shard(shardDoc[ShardType::name()].str())); long long chunkCount = conn->count(ChunkType::ConfigNS, shardIDDoc); long long dbCount = conn->count( DatabaseType::ConfigNS , primaryDoc ); if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) { log() << "going to remove shard: " << s.getName() << endl; conn->remove( ShardType::ConfigNS , searchDoc ); errmsg = conn->getLastError(); if ( errmsg.size() ) { log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl; return false; } string shardName = shardDoc[ ShardType::name() ].str(); Shard::removeShard( shardName ); shardConnectionPool.removeHost( shardName ); ReplicaSetMonitor::remove( shardName, true ); Shard::reloadShardInfo(); result.append( "msg" , "removeshard completed successfully" ); result.append( "state" , "completed" ); result.append( "shard" , s.getName() ); conn.done(); return true; } // If the server is already in draining mode, just report on its progress. // Report on databases (not just chunks) that are left too. result.append( "msg" , "draining ongoing" ); result.append( "state" , "ongoing" ); BSONObjBuilder inner; inner.append( "chunks" , chunkCount ); inner.append( "dbs" , dbCount ); result.append( "remaining" , inner.obj() ); result.appendElements(dbInfo); conn.done(); return true; } } removeShardCmd; // --------------- public commands ---------------- class IsDbGridCmd : public Command { public: virtual LockType locktype() const { return NONE; } virtual bool slaveOk() const { return true; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) {} // No auth required IsDbGridCmd() : Command("isdbgrid") { } bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result.append("isdbgrid", 1); result.append("hostname", getHostNameCached()); return true; } } isdbgrid; class CmdIsMaster : public Command { public: virtual LockType locktype() const { return NONE; } virtual bool slaveOk() const { return true; } virtual void help( stringstream& help ) const { help << "test if this is master half of a replica pair"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) {} // No auth required CmdIsMaster() : Command("isMaster" , false , "ismaster") { } virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result.appendBool("ismaster", true ); result.append("msg", "isdbgrid"); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes); result.appendDate("localTime", jsTime()); return true; } } ismaster; class CmdWhatsMyUri : public Command { public: CmdWhatsMyUri() : Command("whatsmyuri") { } virtual bool logTheOp() { return false; // the modification will be logged directly } virtual bool slaveOk() const { return true; } virtual LockType locktype() const { return NONE; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) {} // No auth required virtual void help( stringstream &help ) const { help << "{whatsmyuri:1}"; } virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result << "you" << ClientInfo::get()->getRemote(); return true; } } cmdWhatsMyUri; class CmdShardingGetPrevError : public Command { public: virtual LockType locktype() const { return NONE; } virtual bool slaveOk() const { return true; } virtual void help( stringstream& help ) const { help << "get previous error (since last reseterror command)"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) {} // No auth required CmdShardingGetPrevError() : Command( "getPrevError" , false , "getpreverror") { } virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { errmsg += "getpreverror not supported for sharded environments"; return false; } } cmdGetPrevError; class CmdShardingGetLastError : public Command { public: virtual LockType locktype() const { return NONE; } virtual bool slaveOk() const { return true; } virtual void help( stringstream& help ) const { help << "check for an error on the last command executed"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) {} // No auth required CmdShardingGetLastError() : Command("getLastError" , false , "getlasterror") { } virtual bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { LastError *le = lastError.disableForCommand(); verify( le ); { if ( le->msg.size() && le->nPrev == 1 ) { le->appendSelf( result ); return true; } } ClientInfo * client = ClientInfo::get(); bool res = client->getLastError( dbName, cmdObj , result, errmsg ); client->disableForCommand(); return res; } } cmdGetLastError; } class CmdShardingResetError : public Command { public: CmdShardingResetError() : Command( "resetError" , false , "reseterror" ) {} virtual LockType locktype() const { return NONE; } virtual bool slaveOk() const { return true; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) {} // No auth required bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { LastError *le = lastError.get(); if ( le ) le->reset(); ClientInfo * client = ClientInfo::get(); set<string> * shards = client->getPrev(); for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) { string theShard = *i; ShardConnection conn( theShard , "" ); BSONObj res; conn->runCommand( dbName , cmdObj , res ); conn.done(); } return true; } } cmdShardingResetError; class CmdListDatabases : public Command { public: CmdListDatabases() : Command("listDatabases", true , "listdatabases" ) {} virtual bool logTheOp() { return false; } virtual bool slaveOk() const { return true; } virtual bool slaveOverrideOk() const { return true; } virtual bool adminOnly() const { return true; } virtual LockType locktype() const { return NONE; } virtual void help( stringstream& help ) const { help << "list databases on cluster"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::listDatabases); out->push_back(Privilege(AuthorizationManager::SERVER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { vector<Shard> shards; Shard::getAllShards( shards ); map<string,long long> sizes; map< string,shared_ptr<BSONObjBuilder> > dbShardInfo; for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ) { Shard s = *i; BSONObj x = s.runCommand( "admin" , "listDatabases" ); BSONObjIterator j( x["databases"].Obj() ); while ( j.more() ) { BSONObj theDB = j.next().Obj(); string name = theDB["name"].String(); long long size = theDB["sizeOnDisk"].numberLong(); long long& totalSize = sizes[name]; if ( size == 1 ) { if ( totalSize <= 1 ) totalSize = 1; } else totalSize += size; shared_ptr<BSONObjBuilder>& bb = dbShardInfo[name]; if ( ! bb.get() ) bb.reset( new BSONObjBuilder() ); bb->appendNumber( s.getName() , size ); } } long long totalSize = 0; BSONArrayBuilder bb( result.subarrayStart( "databases" ) ); for ( map<string,long long>::iterator i=sizes.begin(); i!=sizes.end(); ++i ) { string name = i->first; if ( name == "local" ) { // we don't return local // since all shards have their own independent local continue; } if ( name == "config" || name == "admin" ) { //always get this from the config servers continue; } long long size = i->second; totalSize += size; BSONObjBuilder temp; temp.append( "name" , name ); temp.appendNumber( "sizeOnDisk" , size ); temp.appendBool( "empty" , size == 1 ); temp.append( "shards" , dbShardInfo[name]->obj() ); bb.append( temp.obj() ); } { // get config db from the config servers (first one) ScopedDbConnection conn(configServer.getPrimary().getConnString(), 30); BSONObj x; if ( conn->simpleCommand( "config" , &x , "dbstats" ) ){ BSONObjBuilder b; b.append( "name" , "config" ); b.appendBool( "empty" , false ); if ( x["fileSize"].type() ) b.appendAs( x["fileSize"] , "sizeOnDisk" ); else b.append( "sizeOnDisk" , 1 ); bb.append( b.obj() ); } else { bb.append( BSON( "name" << "config" ) ); } conn.done(); } { // get admin db from the config servers (first one) ScopedDbConnection conn(configServer.getPrimary().getConnString(), 30); BSONObj x; if ( conn->simpleCommand( "admin" , &x , "dbstats" ) ){ BSONObjBuilder b; b.append( "name" , "admin" ); b.appendBool( "empty" , false ); if ( x["fileSize"].type() ) b.appendAs( x["fileSize"] , "sizeOnDisk" ); else b.append( "sizeOnDisk" , 1 ); bb.append( b.obj() ); } else { bb.append( BSON( "name" << "admin" ) ); } conn.done(); } bb.done(); result.appendNumber( "totalSize" , totalSize ); result.appendNumber( "totalSizeMb" , totalSize / ( 1024 * 1024 ) ); return 1; } } cmdListDatabases; class CmdCloseAllDatabases : public Command { public: CmdCloseAllDatabases() : Command("closeAllDatabases", false , "closeAllDatabases" ) {} virtual bool logTheOp() { return false; } virtual bool slaveOk() const { return true; } virtual bool slaveOverrideOk() const { return true; } virtual bool adminOnly() const { return true; } virtual LockType locktype() const { return NONE; } virtual void help( stringstream& help ) const { help << "Not supported sharded"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::closeAllDatabases); out->push_back(Privilege(AuthorizationManager::SERVER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) { errmsg = "closeAllDatabases isn't supported through mongos"; return false; } } cmdCloseAllDatabases; class CmdReplSetGetStatus : public Command { public: CmdReplSetGetStatus() : Command("replSetGetStatus"){} virtual bool logTheOp() { return false; } virtual bool slaveOk() const { return true; } virtual bool adminOnly() const { return true; } virtual LockType locktype() const { return NONE; } virtual void help( stringstream& help ) const { help << "Not supported through mongos"; } virtual void addRequiredPrivileges(const std::string& dbname, const BSONObj& cmdObj, std::vector<Privilege>* out) { // TODO: Should this require no auth since it's not supported in mongos anyway? ActionSet actions; actions.addAction(ActionType::replSetGetStatus); out->push_back(Privilege(AuthorizationManager::SERVER_RESOURCE_NAME, actions)); } bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { if ( jsobj["forShell"].trueValue() ) { lastError.disableForCommand(); ClientInfo::get()->disableForCommand(); } errmsg = "replSetGetStatus is not supported through mongos"; result.append("info", "mongos"); // see sayReplSetMemberState return false; } } cmdReplSetGetStatus; CmdShutdown cmdShutdown; void CmdShutdown::help( stringstream& help ) const { help << "shutdown the database. must be ran against admin db and " << "either (1) ran from localhost or (2) authenticated."; } bool CmdShutdown::run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { return shutdownHelper(); } } // namespace mongo
45.538462
156
0.468314
nleite
4fb26419819029cbca5a4561cdcaaa756c6d76f2
895
cpp
C++
src/playground/oop.cpp
yyqian/cpp-arsenal
1f3ce5c044d388a7ddc81f326f304c3acfa420fc
[ "MIT" ]
null
null
null
src/playground/oop.cpp
yyqian/cpp-arsenal
1f3ce5c044d388a7ddc81f326f304c3acfa420fc
[ "MIT" ]
null
null
null
src/playground/oop.cpp
yyqian/cpp-arsenal
1f3ce5c044d388a7ddc81f326f304c3acfa420fc
[ "MIT" ]
null
null
null
#include <string> class Quote { public: Quote() = default; Quote(const std::string &book, double sales_price) : bookNo(book), price(sales_price) {} std::string isbn() const { return bookNo; } virtual double net_price(std::size_t n) const { return n * price; } virtual ~Quote() = default; private: std::string bookNo; protected: double price = 0.0; }; class Bulk_quote : public Quote { public: Bulk_quote() = default; Bulk_quote(const std::string &book, double p, std::size_t qty, double disc) : Quote(book, p), min_qty(qty), discount(disc) {} double net_price(std::size_t n) const override; private: std::size_t min_qty = 0; double discount = 0.0; }; double Bulk_quote::net_price(std::size_t cnt) const { if (cnt >= min_qty) { return cnt * (1 - discount) * price; } else { return cnt * price; } }
23.552632
78
0.625698
yyqian
4fb59c89f9637b5a55fea3244994655ee8f3d196
2,981
cc
C++
code/render/coregraphics/base/multiplerendertargetbase.cc
gscept/nebula-trifid
e7c0a0acb05eedad9ed37a72c1bdf2d658511b42
[ "BSD-2-Clause" ]
67
2015-03-30T19:56:16.000Z
2022-03-11T13:52:17.000Z
code/render/coregraphics/base/multiplerendertargetbase.cc
gscept/nebula-trifid
e7c0a0acb05eedad9ed37a72c1bdf2d658511b42
[ "BSD-2-Clause" ]
5
2015-04-15T17:17:33.000Z
2016-02-11T00:40:17.000Z
code/render/coregraphics/base/multiplerendertargetbase.cc
gscept/nebula-trifid
e7c0a0acb05eedad9ed37a72c1bdf2d658511b42
[ "BSD-2-Clause" ]
34
2015-03-30T15:08:00.000Z
2021-09-23T05:55:10.000Z
//------------------------------------------------------------------------------ // multiplerendertargetbase.cc // (C) 2007 Radon Labs GmbH // (C) 2013-2016 Individual contributors, see AUTHORS file //------------------------------------------------------------------------------ #include "stdneb.h" #include "coregraphics/base/multiplerendertargetbase.h" namespace Base { __ImplementClass(Base::MultipleRenderTargetBase, 'MRTB', Core::RefCounted); using namespace CoreGraphics; using namespace Resources; //------------------------------------------------------------------------------ /** */ MultipleRenderTargetBase::MultipleRenderTargetBase() : clearDepthStencil(false), depthStencilTarget(0), numRenderTargets(0) { IndexT i; for (i = 0; i < MaxNumRenderTargets; i++) { this->clearColor[i].set(0.0f, 0.0f, 0.0f, 0.0f); this->clearDepth = 1.0f; this->clearStencil = 0; } } //------------------------------------------------------------------------------ /** */ MultipleRenderTargetBase::~MultipleRenderTargetBase() { // empty } //------------------------------------------------------------------------------ /** */ void MultipleRenderTargetBase::AddRenderTarget(const Ptr<RenderTarget>& rt) { n_assert(rt.isvalid()); n_assert(this->numRenderTargets < MaxNumRenderTargets); this->renderTarget[this->numRenderTargets] = rt; this->renderTarget[this->numRenderTargets]->SetMRTIndex(this->numRenderTargets); this->numRenderTargets++; } //------------------------------------------------------------------------------ /** */ void MultipleRenderTargetBase::BeginPass() { IndexT i; for (i = 0; i < this->numRenderTargets; i++) { uint clearFlags = this->renderTarget[i]->GetClearFlags(); this->renderTarget[i]->SetClearFlags(this->clearFlags[i]); this->renderTarget[i]->SetClearColor(this->clearColor[i]); this->renderTarget[i]->BeginPass(); } } //------------------------------------------------------------------------------ /** */ void MultipleRenderTargetBase::BeginBatch(CoreGraphics::FrameBatchType::Code batchType) { IndexT i; for (i = 0; i < this->numRenderTargets; i++) { this->renderTarget[i]->BeginBatch(batchType); } } //------------------------------------------------------------------------------ /** */ void MultipleRenderTargetBase::EndBatch() { IndexT i; for (i = 0; i < this->numRenderTargets; i++) { this->renderTarget[i]->EndBatch(); } } //------------------------------------------------------------------------------ /** */ void MultipleRenderTargetBase::EndPass() { IndexT i; for (i = 0; i < this->numRenderTargets; i++) { this->renderTarget[i]->EndPass(); } } //------------------------------------------------------------------------------ /** */ void MultipleRenderTargetBase::OnDisplayResized(SizeT width, SizeT height) { // override me } } // namespace Base
25.05042
84
0.482724
gscept
4fb7559032ee7beb162d0ef4e006d5493076a3d1
19,198
cxx
C++
qrenderdoc/3rdparty/scintilla/src/CaseConvert.cxx
PLohrmannAMD/renderdoc
ea16d31aa340581f5e505e0c734a8468e5d3d47f
[ "MIT" ]
6,181
2015-01-07T11:49:11.000Z
2022-03-31T21:46:55.000Z
qrenderdoc/3rdparty/scintilla/src/CaseConvert.cxx
PLohrmannAMD/renderdoc
ea16d31aa340581f5e505e0c734a8468e5d3d47f
[ "MIT" ]
2,015
2015-01-16T01:45:25.000Z
2022-03-25T12:01:06.000Z
qrenderdoc/3rdparty/scintilla/src/CaseConvert.cxx
PLohrmannAMD/renderdoc
ea16d31aa340581f5e505e0c734a8468e5d3d47f
[ "MIT" ]
1,088
2015-01-06T08:36:25.000Z
2022-03-30T03:31:21.000Z
// Scintilla source code edit control // Encoding: UTF-8 /** @file CaseConvert.cxx ** Case fold characters and convert them to upper or lower case. ** Tables automatically regenerated by scripts/GenerateCaseConvert.py ** Should only be rarely regenerated for new versions of Unicode. **/ // Copyright 2013 by Neil Hodgson <neilh@scintilla.org> // The License.txt file describes the conditions under which this software may be distributed. #include <cstring> #include <stdexcept> #include <string> #include <vector> #include <algorithm> #include "StringCopy.h" #include "CaseConvert.h" #include "UniConversion.h" #include "UnicodeFromUTF8.h" #ifdef SCI_NAMESPACE using namespace Scintilla; #endif namespace { // Use an unnamed namespace to protect the declarations from name conflicts // Unicode code points are ordered by groups and follow patterns. // Most characters (pitch==1) are in ranges for a particular alphabet and their // upper case forms are a fixed distance away. // Another pattern (pitch==2) is where each lower case letter is preceded by // the upper case form. These are also grouped into ranges. int symmetricCaseConversionRanges[] = { //lower, upper, range length, range pitch //++Autogenerated -- start of section automatically generated //**\(\*\n\) 97,65,26,1, 224,192,23,1, 248,216,7,1, 257,256,24,2, 314,313,8,2, 331,330,23,2, 462,461,8,2, 479,478,9,2, 505,504,20,2, 547,546,9,2, 583,582,5,2, 945,913,17,1, 963,931,9,1, 985,984,12,2, 1072,1040,32,1, 1104,1024,16,1, 1121,1120,17,2, 1163,1162,27,2, 1218,1217,7,2, 1233,1232,44,2, 1377,1329,38,1, 7681,7680,75,2, 7841,7840,48,2, 7936,7944,8,1, 7952,7960,6,1, 7968,7976,8,1, 7984,7992,8,1, 8000,8008,6,1, 8032,8040,8,1, 8560,8544,16,1, 9424,9398,26,1, 11312,11264,47,1, 11393,11392,50,2, 11520,4256,38,1, 42561,42560,23,2, 42625,42624,12,2, 42787,42786,7,2, 42803,42802,31,2, 42879,42878,5,2, 42913,42912,5,2, 65345,65313,26,1, 66600,66560,40,1, //--Autogenerated -- end of section automatically generated }; // Code points that are symmetric but don't fit into a range of similar characters // are listed here. int symmetricCaseConversions[] = { //lower, upper //++Autogenerated -- start of section automatically generated //**1 \(\*\n\) 255,376, 307,306, 309,308, 311,310, 378,377, 380,379, 382,381, 384,579, 387,386, 389,388, 392,391, 396,395, 402,401, 405,502, 409,408, 410,573, 414,544, 417,416, 419,418, 421,420, 424,423, 429,428, 432,431, 436,435, 438,437, 441,440, 445,444, 447,503, 454,452, 457,455, 460,458, 477,398, 499,497, 501,500, 572,571, 575,11390, 576,11391, 578,577, 592,11375, 593,11373, 594,11376, 595,385, 596,390, 598,393, 599,394, 601,399, 603,400, 608,403, 611,404, 613,42893, 614,42922, 616,407, 617,406, 619,11362, 623,412, 625,11374, 626,413, 629,415, 637,11364, 640,422, 643,425, 648,430, 649,580, 650,433, 651,434, 652,581, 658,439, 881,880, 883,882, 887,886, 891,1021, 892,1022, 893,1023, 940,902, 941,904, 942,905, 943,906, 972,908, 973,910, 974,911, 983,975, 1010,1017, 1016,1015, 1019,1018, 1231,1216, 7545,42877, 7549,11363, 8017,8025, 8019,8027, 8021,8029, 8023,8031, 8048,8122, 8049,8123, 8050,8136, 8051,8137, 8052,8138, 8053,8139, 8054,8154, 8055,8155, 8056,8184, 8057,8185, 8058,8170, 8059,8171, 8060,8186, 8061,8187, 8112,8120, 8113,8121, 8144,8152, 8145,8153, 8160,8168, 8161,8169, 8165,8172, 8526,8498, 8580,8579, 11361,11360, 11365,570, 11366,574, 11368,11367, 11370,11369, 11372,11371, 11379,11378, 11382,11381, 11500,11499, 11502,11501, 11507,11506, 11559,4295, 11565,4301, 42874,42873, 42876,42875, 42892,42891, 42897,42896, 42899,42898, //--Autogenerated -- end of section automatically generated }; // Characters that have complex case conversions are listed here. // This includes cases where more than one character is needed for a conversion, // folding is different to lowering, or (as appropriate) upper(lower(x)) != x or // lower(upper(x)) != x. const char *complexCaseConversions = // Original | Folded | Upper | Lower | //++Autogenerated -- start of section automatically generated //**2 \(\*\n\) "\xc2\xb5|\xce\xbc|\xce\x9c||" "\xc3\x9f|ss|SS||" "\xc4\xb0|i\xcc\x87||i\xcc\x87|" "\xc4\xb1||I||" "\xc5\x89|\xca\xbcn|\xca\xbcN||" "\xc5\xbf|s|S||" "\xc7\x85|\xc7\x86|\xc7\x84|\xc7\x86|" "\xc7\x88|\xc7\x89|\xc7\x87|\xc7\x89|" "\xc7\x8b|\xc7\x8c|\xc7\x8a|\xc7\x8c|" "\xc7\xb0|j\xcc\x8c|J\xcc\x8c||" "\xc7\xb2|\xc7\xb3|\xc7\xb1|\xc7\xb3|" "\xcd\x85|\xce\xb9|\xce\x99||" "\xce\x90|\xce\xb9\xcc\x88\xcc\x81|\xce\x99\xcc\x88\xcc\x81||" "\xce\xb0|\xcf\x85\xcc\x88\xcc\x81|\xce\xa5\xcc\x88\xcc\x81||" "\xcf\x82|\xcf\x83|\xce\xa3||" "\xcf\x90|\xce\xb2|\xce\x92||" "\xcf\x91|\xce\xb8|\xce\x98||" "\xcf\x95|\xcf\x86|\xce\xa6||" "\xcf\x96|\xcf\x80|\xce\xa0||" "\xcf\xb0|\xce\xba|\xce\x9a||" "\xcf\xb1|\xcf\x81|\xce\xa1||" "\xcf\xb4|\xce\xb8||\xce\xb8|" "\xcf\xb5|\xce\xb5|\xce\x95||" "\xd6\x87|\xd5\xa5\xd6\x82|\xd4\xb5\xd5\x92||" "\xe1\xba\x96|h\xcc\xb1|H\xcc\xb1||" "\xe1\xba\x97|t\xcc\x88|T\xcc\x88||" "\xe1\xba\x98|w\xcc\x8a|W\xcc\x8a||" "\xe1\xba\x99|y\xcc\x8a|Y\xcc\x8a||" "\xe1\xba\x9a|a\xca\xbe|A\xca\xbe||" "\xe1\xba\x9b|\xe1\xb9\xa1|\xe1\xb9\xa0||" "\xe1\xba\x9e|ss||\xc3\x9f|" "\xe1\xbd\x90|\xcf\x85\xcc\x93|\xce\xa5\xcc\x93||" "\xe1\xbd\x92|\xcf\x85\xcc\x93\xcc\x80|\xce\xa5\xcc\x93\xcc\x80||" "\xe1\xbd\x94|\xcf\x85\xcc\x93\xcc\x81|\xce\xa5\xcc\x93\xcc\x81||" "\xe1\xbd\x96|\xcf\x85\xcc\x93\xcd\x82|\xce\xa5\xcc\x93\xcd\x82||" "\xe1\xbe\x80|\xe1\xbc\x80\xce\xb9|\xe1\xbc\x88\xce\x99||" "\xe1\xbe\x81|\xe1\xbc\x81\xce\xb9|\xe1\xbc\x89\xce\x99||" "\xe1\xbe\x82|\xe1\xbc\x82\xce\xb9|\xe1\xbc\x8a\xce\x99||" "\xe1\xbe\x83|\xe1\xbc\x83\xce\xb9|\xe1\xbc\x8b\xce\x99||" "\xe1\xbe\x84|\xe1\xbc\x84\xce\xb9|\xe1\xbc\x8c\xce\x99||" "\xe1\xbe\x85|\xe1\xbc\x85\xce\xb9|\xe1\xbc\x8d\xce\x99||" "\xe1\xbe\x86|\xe1\xbc\x86\xce\xb9|\xe1\xbc\x8e\xce\x99||" "\xe1\xbe\x87|\xe1\xbc\x87\xce\xb9|\xe1\xbc\x8f\xce\x99||" "\xe1\xbe\x88|\xe1\xbc\x80\xce\xb9|\xe1\xbc\x88\xce\x99|\xe1\xbe\x80|" "\xe1\xbe\x89|\xe1\xbc\x81\xce\xb9|\xe1\xbc\x89\xce\x99|\xe1\xbe\x81|" "\xe1\xbe\x8a|\xe1\xbc\x82\xce\xb9|\xe1\xbc\x8a\xce\x99|\xe1\xbe\x82|" "\xe1\xbe\x8b|\xe1\xbc\x83\xce\xb9|\xe1\xbc\x8b\xce\x99|\xe1\xbe\x83|" "\xe1\xbe\x8c|\xe1\xbc\x84\xce\xb9|\xe1\xbc\x8c\xce\x99|\xe1\xbe\x84|" "\xe1\xbe\x8d|\xe1\xbc\x85\xce\xb9|\xe1\xbc\x8d\xce\x99|\xe1\xbe\x85|" "\xe1\xbe\x8e|\xe1\xbc\x86\xce\xb9|\xe1\xbc\x8e\xce\x99|\xe1\xbe\x86|" "\xe1\xbe\x8f|\xe1\xbc\x87\xce\xb9|\xe1\xbc\x8f\xce\x99|\xe1\xbe\x87|" "\xe1\xbe\x90|\xe1\xbc\xa0\xce\xb9|\xe1\xbc\xa8\xce\x99||" "\xe1\xbe\x91|\xe1\xbc\xa1\xce\xb9|\xe1\xbc\xa9\xce\x99||" "\xe1\xbe\x92|\xe1\xbc\xa2\xce\xb9|\xe1\xbc\xaa\xce\x99||" "\xe1\xbe\x93|\xe1\xbc\xa3\xce\xb9|\xe1\xbc\xab\xce\x99||" "\xe1\xbe\x94|\xe1\xbc\xa4\xce\xb9|\xe1\xbc\xac\xce\x99||" "\xe1\xbe\x95|\xe1\xbc\xa5\xce\xb9|\xe1\xbc\xad\xce\x99||" "\xe1\xbe\x96|\xe1\xbc\xa6\xce\xb9|\xe1\xbc\xae\xce\x99||" "\xe1\xbe\x97|\xe1\xbc\xa7\xce\xb9|\xe1\xbc\xaf\xce\x99||" "\xe1\xbe\x98|\xe1\xbc\xa0\xce\xb9|\xe1\xbc\xa8\xce\x99|\xe1\xbe\x90|" "\xe1\xbe\x99|\xe1\xbc\xa1\xce\xb9|\xe1\xbc\xa9\xce\x99|\xe1\xbe\x91|" "\xe1\xbe\x9a|\xe1\xbc\xa2\xce\xb9|\xe1\xbc\xaa\xce\x99|\xe1\xbe\x92|" "\xe1\xbe\x9b|\xe1\xbc\xa3\xce\xb9|\xe1\xbc\xab\xce\x99|\xe1\xbe\x93|" "\xe1\xbe\x9c|\xe1\xbc\xa4\xce\xb9|\xe1\xbc\xac\xce\x99|\xe1\xbe\x94|" "\xe1\xbe\x9d|\xe1\xbc\xa5\xce\xb9|\xe1\xbc\xad\xce\x99|\xe1\xbe\x95|" "\xe1\xbe\x9e|\xe1\xbc\xa6\xce\xb9|\xe1\xbc\xae\xce\x99|\xe1\xbe\x96|" "\xe1\xbe\x9f|\xe1\xbc\xa7\xce\xb9|\xe1\xbc\xaf\xce\x99|\xe1\xbe\x97|" "\xe1\xbe\xa0|\xe1\xbd\xa0\xce\xb9|\xe1\xbd\xa8\xce\x99||" "\xe1\xbe\xa1|\xe1\xbd\xa1\xce\xb9|\xe1\xbd\xa9\xce\x99||" "\xe1\xbe\xa2|\xe1\xbd\xa2\xce\xb9|\xe1\xbd\xaa\xce\x99||" "\xe1\xbe\xa3|\xe1\xbd\xa3\xce\xb9|\xe1\xbd\xab\xce\x99||" "\xe1\xbe\xa4|\xe1\xbd\xa4\xce\xb9|\xe1\xbd\xac\xce\x99||" "\xe1\xbe\xa5|\xe1\xbd\xa5\xce\xb9|\xe1\xbd\xad\xce\x99||" "\xe1\xbe\xa6|\xe1\xbd\xa6\xce\xb9|\xe1\xbd\xae\xce\x99||" "\xe1\xbe\xa7|\xe1\xbd\xa7\xce\xb9|\xe1\xbd\xaf\xce\x99||" "\xe1\xbe\xa8|\xe1\xbd\xa0\xce\xb9|\xe1\xbd\xa8\xce\x99|\xe1\xbe\xa0|" "\xe1\xbe\xa9|\xe1\xbd\xa1\xce\xb9|\xe1\xbd\xa9\xce\x99|\xe1\xbe\xa1|" "\xe1\xbe\xaa|\xe1\xbd\xa2\xce\xb9|\xe1\xbd\xaa\xce\x99|\xe1\xbe\xa2|" "\xe1\xbe\xab|\xe1\xbd\xa3\xce\xb9|\xe1\xbd\xab\xce\x99|\xe1\xbe\xa3|" "\xe1\xbe\xac|\xe1\xbd\xa4\xce\xb9|\xe1\xbd\xac\xce\x99|\xe1\xbe\xa4|" "\xe1\xbe\xad|\xe1\xbd\xa5\xce\xb9|\xe1\xbd\xad\xce\x99|\xe1\xbe\xa5|" "\xe1\xbe\xae|\xe1\xbd\xa6\xce\xb9|\xe1\xbd\xae\xce\x99|\xe1\xbe\xa6|" "\xe1\xbe\xaf|\xe1\xbd\xa7\xce\xb9|\xe1\xbd\xaf\xce\x99|\xe1\xbe\xa7|" "\xe1\xbe\xb2|\xe1\xbd\xb0\xce\xb9|\xe1\xbe\xba\xce\x99||" "\xe1\xbe\xb3|\xce\xb1\xce\xb9|\xce\x91\xce\x99||" "\xe1\xbe\xb4|\xce\xac\xce\xb9|\xce\x86\xce\x99||" "\xe1\xbe\xb6|\xce\xb1\xcd\x82|\xce\x91\xcd\x82||" "\xe1\xbe\xb7|\xce\xb1\xcd\x82\xce\xb9|\xce\x91\xcd\x82\xce\x99||" "\xe1\xbe\xbc|\xce\xb1\xce\xb9|\xce\x91\xce\x99|\xe1\xbe\xb3|" "\xe1\xbe\xbe|\xce\xb9|\xce\x99||" "\xe1\xbf\x82|\xe1\xbd\xb4\xce\xb9|\xe1\xbf\x8a\xce\x99||" "\xe1\xbf\x83|\xce\xb7\xce\xb9|\xce\x97\xce\x99||" "\xe1\xbf\x84|\xce\xae\xce\xb9|\xce\x89\xce\x99||" "\xe1\xbf\x86|\xce\xb7\xcd\x82|\xce\x97\xcd\x82||" "\xe1\xbf\x87|\xce\xb7\xcd\x82\xce\xb9|\xce\x97\xcd\x82\xce\x99||" "\xe1\xbf\x8c|\xce\xb7\xce\xb9|\xce\x97\xce\x99|\xe1\xbf\x83|" "\xe1\xbf\x92|\xce\xb9\xcc\x88\xcc\x80|\xce\x99\xcc\x88\xcc\x80||" "\xe1\xbf\x93|\xce\xb9\xcc\x88\xcc\x81|\xce\x99\xcc\x88\xcc\x81||" "\xe1\xbf\x96|\xce\xb9\xcd\x82|\xce\x99\xcd\x82||" "\xe1\xbf\x97|\xce\xb9\xcc\x88\xcd\x82|\xce\x99\xcc\x88\xcd\x82||" "\xe1\xbf\xa2|\xcf\x85\xcc\x88\xcc\x80|\xce\xa5\xcc\x88\xcc\x80||" "\xe1\xbf\xa3|\xcf\x85\xcc\x88\xcc\x81|\xce\xa5\xcc\x88\xcc\x81||" "\xe1\xbf\xa4|\xcf\x81\xcc\x93|\xce\xa1\xcc\x93||" "\xe1\xbf\xa6|\xcf\x85\xcd\x82|\xce\xa5\xcd\x82||" "\xe1\xbf\xa7|\xcf\x85\xcc\x88\xcd\x82|\xce\xa5\xcc\x88\xcd\x82||" "\xe1\xbf\xb2|\xe1\xbd\xbc\xce\xb9|\xe1\xbf\xba\xce\x99||" "\xe1\xbf\xb3|\xcf\x89\xce\xb9|\xce\xa9\xce\x99||" "\xe1\xbf\xb4|\xcf\x8e\xce\xb9|\xce\x8f\xce\x99||" "\xe1\xbf\xb6|\xcf\x89\xcd\x82|\xce\xa9\xcd\x82||" "\xe1\xbf\xb7|\xcf\x89\xcd\x82\xce\xb9|\xce\xa9\xcd\x82\xce\x99||" "\xe1\xbf\xbc|\xcf\x89\xce\xb9|\xce\xa9\xce\x99|\xe1\xbf\xb3|" "\xe2\x84\xa6|\xcf\x89||\xcf\x89|" "\xe2\x84\xaa|k||k|" "\xe2\x84\xab|\xc3\xa5||\xc3\xa5|" "\xef\xac\x80|ff|FF||" "\xef\xac\x81|fi|FI||" "\xef\xac\x82|fl|FL||" "\xef\xac\x83|ffi|FFI||" "\xef\xac\x84|ffl|FFL||" "\xef\xac\x85|st|ST||" "\xef\xac\x86|st|ST||" "\xef\xac\x93|\xd5\xb4\xd5\xb6|\xd5\x84\xd5\x86||" "\xef\xac\x94|\xd5\xb4\xd5\xa5|\xd5\x84\xd4\xb5||" "\xef\xac\x95|\xd5\xb4\xd5\xab|\xd5\x84\xd4\xbb||" "\xef\xac\x96|\xd5\xbe\xd5\xb6|\xd5\x8e\xd5\x86||" "\xef\xac\x97|\xd5\xb4\xd5\xad|\xd5\x84\xd4\xbd||" //--Autogenerated -- end of section automatically generated ; class CaseConverter : public ICaseConverter { // Maximum length of a case conversion result is 6 bytes in UTF-8 enum { maxConversionLength=6 }; struct ConversionString { char conversion[maxConversionLength+1]; ConversionString() { conversion[0] = '\0'; } }; // Conversions are initially store in a vector of structs but then decomposed into // parallel arrays as that is about 10% faster to search. struct CharacterConversion { int character; ConversionString conversion; CharacterConversion(int character_=0, const char *conversion_="") : character(character_) { StringCopy(conversion.conversion, conversion_); } bool operator<(const CharacterConversion &other) const { return character < other.character; } }; typedef std::vector<CharacterConversion> CharacterToConversion; CharacterToConversion characterToConversion; // The parallel arrays std::vector<int> characters; std::vector<ConversionString> conversions; public: CaseConverter() { } bool Initialised() const { return characters.size() > 0; } void Add(int character, const char *conversion) { characterToConversion.push_back(CharacterConversion(character, conversion)); } const char *Find(int character) { const std::vector<int>::iterator it = std::lower_bound(characters.begin(), characters.end(), character); if (it == characters.end()) return 0; else if (*it == character) return conversions[it - characters.begin()].conversion; else return 0; } size_t CaseConvertString(char *converted, size_t sizeConverted, const char *mixed, size_t lenMixed) { size_t lenConverted = 0; size_t mixedPos = 0; unsigned char bytes[UTF8MaxBytes + 1]; while (mixedPos < lenMixed) { const unsigned char leadByte = static_cast<unsigned char>(mixed[mixedPos]); const char *caseConverted = 0; size_t lenMixedChar = 1; if (UTF8IsAscii(leadByte)) { caseConverted = Find(leadByte); } else { bytes[0] = leadByte; const int widthCharBytes = UTF8BytesOfLead[leadByte]; for (int b=1; b<widthCharBytes; b++) { bytes[b] = (mixedPos+b < lenMixed) ? mixed[mixedPos+b] : 0; } int classified = UTF8Classify(bytes, widthCharBytes); if (!(classified & UTF8MaskInvalid)) { // valid UTF-8 lenMixedChar = classified & UTF8MaskWidth; int character = UnicodeFromUTF8(bytes); caseConverted = Find(character); } } if (caseConverted) { // Character has a conversion so copy that conversion in while (*caseConverted) { converted[lenConverted++] = *caseConverted++; if (lenConverted >= sizeConverted) return 0; } } else { // Character has no conversion so copy the input to output for (size_t i=0; i<lenMixedChar; i++) { converted[lenConverted++] = mixed[mixedPos+i]; if (lenConverted >= sizeConverted) return 0; } } mixedPos += lenMixedChar; } return lenConverted; } void FinishedAdding() { std::sort(characterToConversion.begin(), characterToConversion.end()); characters.reserve(characterToConversion.size()); conversions.reserve(characterToConversion.size()); for (CharacterToConversion::iterator it = characterToConversion.begin(); it != characterToConversion.end(); ++it) { characters.push_back(it->character); conversions.push_back(it->conversion); } // Empty the original calculated data completely CharacterToConversion().swap(characterToConversion); } }; CaseConverter caseConvFold; CaseConverter caseConvUp; CaseConverter caseConvLow; void UTF8FromUTF32Character(int uch, char *putf) { size_t k = 0; if (uch < 0x80) { putf[k++] = static_cast<char>(uch); } else if (uch < 0x800) { putf[k++] = static_cast<char>(0xC0 | (uch >> 6)); putf[k++] = static_cast<char>(0x80 | (uch & 0x3f)); } else if (uch < 0x10000) { putf[k++] = static_cast<char>(0xE0 | (uch >> 12)); putf[k++] = static_cast<char>(0x80 | ((uch >> 6) & 0x3f)); putf[k++] = static_cast<char>(0x80 | (uch & 0x3f)); } else { putf[k++] = static_cast<char>(0xF0 | (uch >> 18)); putf[k++] = static_cast<char>(0x80 | ((uch >> 12) & 0x3f)); putf[k++] = static_cast<char>(0x80 | ((uch >> 6) & 0x3f)); putf[k++] = static_cast<char>(0x80 | (uch & 0x3f)); } putf[k] = 0; } void AddSymmetric(enum CaseConversion conversion, int lower,int upper) { char lowerUTF8[UTF8MaxBytes+1]; UTF8FromUTF32Character(lower, lowerUTF8); char upperUTF8[UTF8MaxBytes+1]; UTF8FromUTF32Character(upper, upperUTF8); switch (conversion) { case CaseConversionFold: caseConvFold.Add(upper, lowerUTF8); break; case CaseConversionUpper: caseConvUp.Add(lower, upperUTF8); break; case CaseConversionLower: caseConvLow.Add(upper, lowerUTF8); break; } } void SetupConversions(enum CaseConversion conversion) { // First initialize for the symmetric ranges for (size_t i=0; i<ELEMENTS(symmetricCaseConversionRanges);) { int lower = symmetricCaseConversionRanges[i++]; int upper = symmetricCaseConversionRanges[i++]; int length = symmetricCaseConversionRanges[i++]; int pitch = symmetricCaseConversionRanges[i++]; for (int j=0; j<length*pitch; j+=pitch) { AddSymmetric(conversion, lower+j, upper+j); } } // Add the symmetric singletons for (size_t i=0; i<ELEMENTS(symmetricCaseConversions);) { int lower = symmetricCaseConversions[i++]; int upper = symmetricCaseConversions[i++]; AddSymmetric(conversion, lower, upper); } // Add the complex cases const char *sComplex = complexCaseConversions; while (*sComplex) { // Longest ligature is 3 character so 5 for safety const size_t lenUTF8 = 5*UTF8MaxBytes+1; char originUTF8[lenUTF8]; char foldedUTF8[lenUTF8]; char lowerUTF8[lenUTF8]; char upperUTF8[lenUTF8]; size_t i = 0; while (*sComplex && *sComplex != '|') { originUTF8[i++] = *sComplex; sComplex++; } sComplex++; originUTF8[i] = 0; i = 0; while (*sComplex && *sComplex != '|') { foldedUTF8[i++] = *sComplex; sComplex++; } sComplex++; foldedUTF8[i] = 0; i = 0; while (*sComplex && *sComplex != '|') { upperUTF8[i++] = *sComplex; sComplex++; } sComplex++; upperUTF8[i] = 0; i = 0; while (*sComplex && *sComplex != '|') { lowerUTF8[i++] = *sComplex; sComplex++; } sComplex++; lowerUTF8[i] = 0; int character = UnicodeFromUTF8(reinterpret_cast<unsigned char *>(originUTF8)); if (conversion == CaseConversionFold && foldedUTF8[0]) { caseConvFold.Add(character, foldedUTF8); } if (conversion == CaseConversionUpper && upperUTF8[0]) { caseConvUp.Add(character, upperUTF8); } if (conversion == CaseConversionLower && lowerUTF8[0]) { caseConvLow.Add(character, lowerUTF8); } } switch (conversion) { case CaseConversionFold: caseConvFold.FinishedAdding(); break; case CaseConversionUpper: caseConvUp.FinishedAdding(); break; case CaseConversionLower: caseConvLow.FinishedAdding(); break; } } CaseConverter *ConverterForConversion(enum CaseConversion conversion) { switch (conversion) { case CaseConversionFold: return &caseConvFold; case CaseConversionUpper: return &caseConvUp; case CaseConversionLower: return &caseConvLow; } return 0; } } #ifdef SCI_NAMESPACE namespace Scintilla { #endif ICaseConverter *ConverterFor(enum CaseConversion conversion) { CaseConverter *pCaseConv = ConverterForConversion(conversion); if (!pCaseConv->Initialised()) SetupConversions(conversion); return pCaseConv; } const char *CaseConvert(int character, enum CaseConversion conversion) { CaseConverter *pCaseConv = ConverterForConversion(conversion); if (!pCaseConv->Initialised()) SetupConversions(conversion); return pCaseConv->Find(character); } size_t CaseConvertString(char *converted, size_t sizeConverted, const char *mixed, size_t lenMixed, enum CaseConversion conversion) { CaseConverter *pCaseConv = ConverterForConversion(conversion); if (!pCaseConv->Initialised()) SetupConversions(conversion); return pCaseConv->CaseConvertString(converted, sizeConverted, mixed, lenMixed); } std::string CaseConvertString(const std::string &s, enum CaseConversion conversion) { std::string retMapped(s.length() * maxExpansionCaseConversion, 0); size_t lenMapped = CaseConvertString(&retMapped[0], retMapped.length(), s.c_str(), s.length(), conversion); retMapped.resize(lenMapped); return retMapped; } #ifdef SCI_NAMESPACE } #endif
29.764341
133
0.68455
PLohrmannAMD
4fb7a2f953cc7c8e7943c2a7b5d9c4caf1c4d5a3
1,144
cpp
C++
cpp/subprojects/common/src/common/data/vector_dok.cpp
Waguy02/Boomer-Scripted
b06bb9213d64dca0c05d41701dea12666931618c
[ "MIT" ]
8
2020-06-30T01:06:43.000Z
2022-03-14T01:58:29.000Z
cpp/subprojects/common/src/common/data/vector_dok.cpp
Waguy02/Boomer-Scripted
b06bb9213d64dca0c05d41701dea12666931618c
[ "MIT" ]
3
2020-12-14T11:30:18.000Z
2022-02-07T06:31:51.000Z
cpp/subprojects/common/src/common/data/vector_dok.cpp
Waguy02/Boomer-Scripted
b06bb9213d64dca0c05d41701dea12666931618c
[ "MIT" ]
4
2020-06-24T08:45:00.000Z
2021-12-23T21:44:51.000Z
#include "common/data/vector_dok.hpp" template<typename T> DokVector<T>::DokVector(T sparseValue) : sparseValue_(sparseValue) { } template<typename T> typename DokVector<T>::iterator DokVector<T>::begin() { return data_.begin(); } template<typename T> typename DokVector<T>::iterator DokVector<T>::end() { return data_.end(); } template<typename T> typename DokVector<T>::const_iterator DokVector<T>::cbegin() const { return data_.cbegin(); } template<typename T> typename DokVector<T>::const_iterator DokVector<T>::cend() const { return data_.cend(); } template<typename T> const T& DokVector<T>::operator[](uint32 pos) const { auto it = data_.find(pos); return it != data_.cend() ? it->second : sparseValue_; } template<typename T> void DokVector<T>::set(uint32 pos, T value) { auto result = data_.emplace(pos, value); if (!result.second) { result.first->second = value; } } template<typename T> void DokVector<T>::clear() { data_.clear(); } template class DokVector<uint8>; template class DokVector<uint32>; template class DokVector<float32>; template class DokVector<float64>;
21.584906
68
0.698427
Waguy02
4fb7cb08b00f7a87f694463e9c2e14d8f0ad616a
1,635
cpp
C++
source/adios2/operator/compress/CompressNull.cpp
taniabanerjee/ADIOS2
b32205071a22ea6319c34ed85fb1c47265c76a9d
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
source/adios2/operator/compress/CompressNull.cpp
taniabanerjee/ADIOS2
b32205071a22ea6319c34ed85fb1c47265c76a9d
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
source/adios2/operator/compress/CompressNull.cpp
taniabanerjee/ADIOS2
b32205071a22ea6319c34ed85fb1c47265c76a9d
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
/* * Distributed under the OSI-approved Apache License, Version 2.0. See * accompanying file Copyright.txt for details. * * CompressNull.cpp * * Created on: Dec 1, 2021 * Author: Jason Wang jason.ruonan.wang@gmail.com */ #include "CompressNull.h" #include "adios2/helper/adiosFunctions.h" namespace adios2 { namespace core { namespace compress { CompressNull::CompressNull(const Params &parameters) : Operator("null", COMPRESS_NULL, "compress", parameters) { } size_t CompressNull::Operate(const char *dataIn, const Dims &blockStart, const Dims &blockCount, const DataType varType, char *bufferOut) { const uint8_t bufferVersion = 1; size_t bufferOutOffset = 0; MakeCommonHeader(bufferOut, bufferOutOffset, bufferVersion); size_t totalInputBytes = helper::GetTotalSize(blockCount, helper::GetDataTypeSize(varType)); PutParameter(bufferOut, bufferOutOffset, totalInputBytes); std::memcpy(bufferOut + bufferOutOffset, dataIn, totalInputBytes); bufferOutOffset += totalInputBytes; return bufferOutOffset; } size_t CompressNull::InverseOperate(const char *bufferIn, const size_t sizeIn, char *dataOut) { size_t bufferInOffset = 4; // skip common header const size_t totalBytes = GetParameter<size_t>(bufferIn, bufferInOffset); std::memcpy(dataOut, bufferIn + bufferInOffset, totalBytes); return totalBytes; } bool CompressNull::IsDataTypeValid(const DataType type) const { return true; } } // end namespace compress } // end namespace core } // end namespace adios2
29.196429
78
0.703976
taniabanerjee
4fb7e82857d84cd5366a009336fff5bb0ea3b537
1,814
cpp
C++
tests/src/test_rates_root.cpp
JhaPrajjwal/k40gen
89cd669d8e7327569705033ea299f32849a0a896
[ "Apache-2.0" ]
3
2019-03-22T15:03:29.000Z
2019-07-03T12:08:48.000Z
tests/src/test_rates_root.cpp
JhaPrajjwal/k40gen
89cd669d8e7327569705033ea299f32849a0a896
[ "Apache-2.0" ]
1
2019-04-01T08:21:04.000Z
2019-04-01T08:22:45.000Z
tests/src/test_rates_root.cpp
JhaPrajjwal/k40gen
89cd669d8e7327569705033ea299f32849a0a896
[ "Apache-2.0" ]
5
2019-03-22T15:03:33.000Z
2019-07-16T19:19:40.000Z
#include <numeric> #include <iostream> #include <array> #include <iomanip> #include <generate.h> #include <storage.h> #include <test_functions.h> #include <TApplication.h> #include <TH1.h> #include <TF1.h> #include <TFitResult.h> #include <TCanvas.h> #include <TROOT.h> #define CATCH_CONFIG_RUNNER #include "catch2/catch.hpp" using namespace std; int main(int argc, char* argv[]) { gROOT->SetBatch(); return Catch::Session().run( argc, argv ); } TEST_CASE( "Rates makes sense [ROOT]", "[rates_ROOT]" ) { map<bool, unique_ptr<TH1I>> histos; array<float, 4> rates{7000., 0., 0., 0.}; for (bool use_avx2 : {false, true}) { string name = string{"diffs_"} + (use_avx2 ? "avx2" : "scalar"); auto r = histos.emplace(use_avx2, make_unique<TH1I>(name.c_str(), name.c_str(), 100, 0, 1000000)); auto& time_histo = r.first->second; Generators gens{1052, 9523, rates}; long dt = std::lround(1e7); long time_start = 0, time_end = time_start + dt; auto [times, values] = generate(time_start, time_end, gens, "reference", use_avx2); const size_t n_times = times.size(); for (size_t i = 0; i < n_times - 1; ++i) { if (((values[i + 1]) >> 8) == (values[i] >> 8)) { time_histo->Fill(times[i + 1] - times[i]); } } TF1 expo{"exp", "expo", time_histo->GetBinCenter(1), time_histo->GetBinCenter(1 + time_histo->GetNbinsX())}; auto fit = time_histo->Fit(&expo, "RS"); // parameter is negative REQUIRE(std::fabs(rates[0] + (fit->Parameter(1) * 1e9)) / rates[0] < 1e-3); } TCanvas canvas{"canvas", "canvas", 600, 800}; canvas.Divide(1, 2); for (auto& [arch, histo] : histos) { canvas.cd(arch + 1); histo->Draw(); } canvas.Print("distributions.png"); }
26.289855
104
0.601985
JhaPrajjwal
4fb84a3b3aa2b769bcbd0205f640df54c7e9d1fc
13,607
cc
C++
src/atlas/parallel/GatherScatter.cc
wdeconinck/atlas
8949d2b362b9b5431023a967bcf4ca84f6b8ce05
[ "Apache-2.0" ]
3
2021-08-17T03:08:45.000Z
2021-09-09T09:22:54.000Z
src/atlas/parallel/GatherScatter.cc
pmarguinaud/atlas
7e0a1251685e07a5dcccc84f4d9251d5a066e2ee
[ "Apache-2.0" ]
62
2020-10-21T15:27:38.000Z
2022-03-28T12:42:43.000Z
src/atlas/parallel/GatherScatter.cc
pmarguinaud/atlas
7e0a1251685e07a5dcccc84f4d9251d5a066e2ee
[ "Apache-2.0" ]
1
2021-03-10T19:19:08.000Z
2021-03-10T19:19:08.000Z
/* * (C) Copyright 2013 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation * nor does it submit to any jurisdiction. */ #include <algorithm> #include <iostream> #include <numeric> #include <sstream> #include <stdexcept> #include "atlas/array.h" #include "atlas/array/ArrayView.h" #include "atlas/parallel/GatherScatter.h" #include "atlas/parallel/mpi/Statistics.h" #include "atlas/runtime/Log.h" #include "atlas/runtime/Trace.h" namespace atlas { namespace parallel { namespace { struct IsGhostPoint { IsGhostPoint( const int part[], const idx_t ridx[], const idx_t base, const int N ) { part_ = part; ridx_ = ridx; base_ = base; mypart_ = mpi::rank(); } bool operator()( idx_t idx ) { if ( part_[idx] != mypart_ ) { return true; } if ( ridx_[idx] != base_ + idx ) { return true; } return false; } int mypart_; const int* part_; const idx_t* ridx_; idx_t base_; }; struct Node { int p; idx_t i; gidx_t g; Node() = default; Node( gidx_t gid, int part, idx_t idx ) { g = gid; p = part; i = idx; } bool operator<( const Node& other ) const { return ( g < other.g ); } bool operator==( const Node& other ) const { return ( g == other.g ); } }; } // namespace GatherScatter::GatherScatter() : name_(), is_setup_( false ) { myproc = mpi::rank(); nproc = mpi::size(); } GatherScatter::GatherScatter( const std::string& name ) : name_( name ), is_setup_( false ) { myproc = mpi::rank(); nproc = mpi::size(); } void GatherScatter::setup( const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int mask[], const idx_t parsize ) { ATLAS_TRACE( "GatherScatter::setup" ); parsize_ = parsize; glbcounts_.resize( nproc ); glbcounts_.assign( nproc, 0 ); glbdispls_.resize( nproc ); glbdispls_.assign( nproc, 0 ); const idx_t nvar = 3; std::vector<gidx_t> sendnodes( parsize_ * nvar ); loccnt_ = 0; for ( idx_t n = 0; n < parsize_; ++n ) { if ( !mask[n] ) { sendnodes[loccnt_++] = glb_idx[n]; sendnodes[loccnt_++] = part[n]; sendnodes[loccnt_++] = remote_idx[n] - base; } } ATLAS_TRACE_MPI( ALLGATHER ) { mpi::comm().allGather( loccnt_, glbcounts_.begin(), glbcounts_.end() ); } glbcnt_ = std::accumulate( glbcounts_.begin(), glbcounts_.end(), 0 ); glbdispls_[0] = 0; for ( idx_t jproc = 1; jproc < nproc; ++jproc ) // start at 1 { glbdispls_[jproc] = glbcounts_[jproc - 1] + glbdispls_[jproc - 1]; } std::vector<gidx_t> recvnodes( glbcnt_ ); ATLAS_TRACE_MPI( ALLGATHER ) { mpi::comm().allGatherv( sendnodes.begin(), sendnodes.begin() + loccnt_, recvnodes.data(), glbcounts_.data(), glbdispls_.data() ); } // Load recvnodes in sorting structure idx_t nb_recv_nodes = glbcnt_ / nvar; std::vector<Node> node_sort( nb_recv_nodes ); for ( idx_t n = 0; n < nb_recv_nodes; ++n ) { node_sort[n].g = recvnodes[n * nvar + 0]; node_sort[n].p = recvnodes[n * nvar + 1]; node_sort[n].i = recvnodes[n * nvar + 2]; } recvnodes.clear(); // Sort on "g" member, and remove duplicates ATLAS_TRACE_SCOPE( "sorting" ) { std::sort( node_sort.begin(), node_sort.end() ); node_sort.erase( std::unique( node_sort.begin(), node_sort.end() ), node_sort.end() ); } glbcounts_.assign( nproc, 0 ); glbdispls_.assign( nproc, 0 ); for ( size_t n = 0; n < node_sort.size(); ++n ) { ++glbcounts_[node_sort[n].p]; } glbdispls_[0] = 0; for ( idx_t jproc = 1; jproc < nproc; ++jproc ) // start at 1 { glbdispls_[jproc] = glbcounts_[jproc - 1] + glbdispls_[jproc - 1]; } glbcnt_ = std::accumulate( glbcounts_.begin(), glbcounts_.end(), 0 ); loccnt_ = glbcounts_[myproc]; glbmap_.clear(); glbmap_.resize( glbcnt_ ); locmap_.clear(); locmap_.resize( loccnt_ ); std::vector<int> idx( nproc, 0 ); int n{0}; for ( const auto& node : node_sort ) { idx_t jproc = node.p; glbmap_[glbdispls_[jproc] + idx[jproc]] = n++; if ( jproc == myproc ) { locmap_[idx[jproc]] = node.i; } ++idx[jproc]; } is_setup_ = true; } void GatherScatter::setup( const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const idx_t parsize ) { std::vector<int> mask( parsize ); IsGhostPoint is_ghost( part, remote_idx, base, parsize ); for ( idx_t jj = 0; jj < parsize; ++jj ) { mask[jj] = is_ghost( jj ) ? 1 : 0; } setup( part, remote_idx, base, glb_idx, mask.data(), parsize ); } ///////////////////// GatherScatter* atlas__GatherScatter__new() { return new GatherScatter(); } void atlas__GatherScatter__delete( GatherScatter* This ) { delete This; } void atlas__GatherScatter__setup32( GatherScatter* This, int part[], idx_t remote_idx[], int base, int glb_idx[], int parsize ) { #if ATLAS_BITS_GLOBAL == 32 This->setup( part, remote_idx, base, glb_idx, parsize ); #else std::vector<gidx_t> glb_idx_convert( parsize ); for ( int j = 0; j < parsize; ++j ) { glb_idx_convert[j] = glb_idx[j]; } This->setup( part, remote_idx, base, glb_idx_convert.data(), parsize ); #endif } void atlas__GatherScatter__setup64( GatherScatter* This, int part[], idx_t remote_idx[], int base, long glb_idx[], int parsize ) { #if ATLAS_BITS_GLOBAL == 64 This->setup( part, remote_idx, base, glb_idx, parsize ); #else std::vector<gidx_t> glb_idx_convert( parsize ); for ( idx_t j = 0; j < parsize; ++j ) { glb_idx_convert[j] = glb_idx[j]; } This->setup( part, remote_idx, base, glb_idx_convert.data(), parsize ); #endif } int atlas__GatherScatter__glb_dof( GatherScatter* This ) { return This->glb_dof(); } void atlas__GatherScatter__gather_int( GatherScatter* This, int lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank, int gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->gather( lfield, lvstrides.data(), lvextents.data(), lvar_rank, gfield, gvstrides.data(), gvextents.data(), gvar_rank ); } void atlas__GatherScatter__gather_long( GatherScatter* This, long lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank, long gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->gather( lfield, lvstrides.data(), lvextents.data(), lvar_rank, gfield, gvstrides.data(), gvextents.data(), gvar_rank ); } void atlas__GatherScatter__gather_float( GatherScatter* This, float lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank, float gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->gather( lfield, lvstrides.data(), lvextents.data(), lvar_rank, gfield, gvstrides.data(), gvextents.data(), gvar_rank ); } void atlas__GatherScatter__gather_double( GatherScatter* This, double lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank, double gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->gather( lfield, lvstrides.data(), lvextents.data(), lvar_rank, gfield, gvstrides.data(), gvextents.data(), gvar_rank ); } void atlas__GatherScatter__scatter_int( GatherScatter* This, int gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank, int lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->scatter( gfield, gvstrides.data(), gvextents.data(), gvar_rank, lfield, lvstrides.data(), lvextents.data(), lvar_rank ); } void atlas__GatherScatter__scatter_long( GatherScatter* This, long gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank, long lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->scatter( gfield, gvstrides.data(), gvextents.data(), gvar_rank, lfield, lvstrides.data(), lvextents.data(), lvar_rank ); } void atlas__GatherScatter__scatter_float( GatherScatter* This, float gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank, float lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->scatter( gfield, gvstrides.data(), gvextents.data(), gvar_rank, lfield, lvstrides.data(), lvextents.data(), lvar_rank ); } void atlas__GatherScatter__scatter_double( GatherScatter* This, double gfield[], int gvar_strides[], int gvar_extents[], int gvar_rank, double lfield[], int lvar_strides[], int lvar_extents[], int lvar_rank ) { std::vector<idx_t> lvstrides( lvar_rank ); std::vector<idx_t> lvextents( lvar_rank ); std::vector<idx_t> gvstrides( gvar_rank ); std::vector<idx_t> gvextents( gvar_rank ); for ( int n = 0; n < lvar_rank; ++n ) { lvstrides[n] = lvar_strides[n]; lvextents[n] = lvar_extents[n]; } for ( int n = 0; n < gvar_rank; ++n ) { gvstrides[n] = gvar_strides[n]; gvextents[n] = gvar_extents[n]; } This->scatter( gfield, gvstrides.data(), gvextents.data(), gvar_rank, lfield, lvstrides.data(), lvextents.data(), lvar_rank ); } ///////////////////// } // namespace parallel } // namespace atlas
36.18883
120
0.582715
wdeconinck
4fbc3d4675912824cbf04e4249193d83d9aa2617
4,826
cc
C++
mindspore/lite/src/delegate/tensorrt/op/allgather_tensorrt.cc
httpsgithu/mindspore
c29d6bb764e233b427319cb89ba79e420f1e2c64
[ "Apache-2.0" ]
1
2022-02-23T09:13:43.000Z
2022-02-23T09:13:43.000Z
mindspore/lite/src/delegate/tensorrt/op/allgather_tensorrt.cc
949144093/mindspore
c29d6bb764e233b427319cb89ba79e420f1e2c64
[ "Apache-2.0" ]
null
null
null
mindspore/lite/src/delegate/tensorrt/op/allgather_tensorrt.cc
949144093/mindspore
c29d6bb764e233b427319cb89ba79e420f1e2c64
[ "Apache-2.0" ]
null
null
null
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/delegate/tensorrt/op/allgather_tensorrt.h" #include <numeric> #include "NvInferRuntimeCommon.h" namespace mindspore::lite { REGISTER_TENSORRT_PLUGIN(AllGatherPluginCreater); template class TensorRTPluginCreater<AllGatherPlugin>; template <class T> nvinfer1::PluginFieldCollection TensorRTPluginCreater<T>::field_collection_{}; template <class T> std::vector<nvinfer1::PluginField> TensorRTPluginCreater<T>::fields_; int AllGatherTensorRT::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, const std::vector<mindspore::MSTensor> &out_tensors) { #ifndef LITE_CUDA_DISTRIBUTION MS_LOG(ERROR) << "Unsupported package for gpu distribution feature, please recompile with MS_ENABLE_CUDA_DISTRIBUTION set to on."; return RET_ERROR; #else if (!IsShapeKnown()) { MS_LOG(ERROR) << "Unsupported input tensor unknown shape: " << op_name_; return RET_ERROR; } if (in_tensors.size() != 1) { MS_LOG(ERROR) << "invalid input tensor size: " << in_tensors.size(); return RET_ERROR; } if (out_tensors.size() != 1) { MS_LOG(ERROR) << "invalid output tensor size: " << out_tensors.size(); return RET_ERROR; } dynamic_shape_params_.support_hw_dynamic_ = false; return RET_OK; #endif } int AllGatherTensorRT::AddInnerOp(nvinfer1::INetworkDefinition *network) { nvinfer1::ITensor *inputTensors[] = {tensorrt_in_tensors_[0].trt_tensor_}; auto allgather_op = op_primitive_->value_as_AllGather(); if (allgather_op == nullptr) { MS_LOG(ERROR) << "convert failed for " << op_name_; return RET_ERROR; } int rank = GetGPUGroupSize(); auto plugin = std::make_shared<AllGatherPlugin>(op_name_, rank); MS_LOG(INFO) << op_name_ << " group size: " << rank << ", rank id: " << GetRankID(); nvinfer1::IPluginV2Layer *allgather_layer = network->addPluginV2(inputTensors, 1, *plugin); if (allgather_layer == nullptr) { MS_LOG(ERROR) << "create AllGather layer failed for: " << op_name_; return RET_ERROR; } nvinfer1::ITensor *allgather_out = allgather_layer->getOutput(0); allgather_layer->setName(op_name_.c_str()); allgather_out->setName((op_name_ + "_output").c_str()); this->AddInnerOutTensors( ITensorHelper{allgather_out, tensorrt_in_tensors_[0].format_, tensorrt_in_tensors_[0].same_format_}); this->layer_ = allgather_layer; return RET_OK; } // AllGatherPlugin int AllGatherPlugin::enqueue(const nvinfer1::PluginTensorDesc *inputDesc, const nvinfer1::PluginTensorDesc *outputDesc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) noexcept { MS_LOG(INFO) << "all gather run at rank id: " << GetRankID() << " stream: " << stream; nvinfer1::Dims input_dims = inputDesc[0].dims; int send_element_cnt = std::accumulate(input_dims.d, input_dims.d + input_dims.nbDims, 1, std::multiplies<int64_t>()); const void *input = inputs[0]; void *output = outputs[0]; auto ret = DistributionCollective::instance().AllGatherWrapper(input, output, send_element_cnt, inputDesc->type, stream, NCCL_WORLD_GROUP); if (ret != RET_OK) { MS_LOG(ERROR) << "AllGather nccl run failed for " << layer_name_; return ret; } return RET_OK; } nvinfer1::IPluginV2DynamicExt *AllGatherPlugin::clone() const noexcept { auto *plugin = new AllGatherPlugin(*this); plugin->setPluginNamespace(name_space_.c_str()); return plugin; } nvinfer1::DimsExprs AllGatherPlugin::getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs *inputs, int nbInputs, nvinfer1::IExprBuilder &exprBuilder) noexcept { nvinfer1::DimsExprs out_dims{}; out_dims.nbDims = inputs->nbDims; auto rank_dim = exprBuilder.constant(rank_); out_dims.d[0] = exprBuilder.operation(nvinfer1::DimensionOperation::kPROD, *inputs->d[0], *rank_dim); for (int i = 1; i < inputs->nbDims; i++) { out_dims.d[i] = inputs->d[i]; } return out_dims; } REGISTER_TENSORRT_CREATOR(schema::PrimitiveType_AllGather, AllGatherTensorRT) } // namespace mindspore::lite
42.333333
120
0.70576
httpsgithu
4fbd4fd29e2e81fd5b8e65f374e2602f9d35217e
9,645
cpp
C++
AVSCommon/Utils/src/LibcurlUtils/LibCurlHttpContentFetcher.cpp
merdahl/avs-device-sdk
2cc16d8cc472afc9b7a736a8c1169f12b71dd229
[ "Apache-2.0" ]
1
2022-01-09T21:26:04.000Z
2022-01-09T21:26:04.000Z
AVSCommon/Utils/src/LibcurlUtils/LibCurlHttpContentFetcher.cpp
justdoGIT/avs-device-sdk
2cc16d8cc472afc9b7a736a8c1169f12b71dd229
[ "Apache-2.0" ]
null
null
null
AVSCommon/Utils/src/LibcurlUtils/LibCurlHttpContentFetcher.cpp
justdoGIT/avs-device-sdk
2cc16d8cc472afc9b7a736a8c1169f12b71dd229
[ "Apache-2.0" ]
1
2018-10-12T07:58:44.000Z
2018-10-12T07:58:44.000Z
/* * LibCurlHttpContentFetcher.cpp * * Copyright 2016-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0/ * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <AVSCommon/Utils/LibcurlUtils/CurlEasyHandleWrapper.h> #include <AVSCommon/Utils/LibcurlUtils/LibCurlHttpContentFetcher.h> #include <AVSCommon/Utils/Memory/Memory.h> #include <AVSCommon/Utils/SDS/InProcessSDS.h> namespace alexaClientSDK { namespace avsCommon { namespace utils { namespace libcurlUtils { /// String to identify log entries originating from this file. static const std::string TAG("LibCurlHttpContentFetcher"); /** * Create a LogEntry using this file's TAG and the specified event string. * * @param The event string for this @c LogEntry. */ #define LX(event) alexaClientSDK::avsCommon::utils::logger::LogEntry(TAG, event) size_t LibCurlHttpContentFetcher::headerCallback(char* data, size_t size, size_t nmemb, void* userData) { if (!userData) { ACSDK_ERROR(LX("headerCallback").d("reason", "nullUserDataPointer")); return 0; } std::string line(static_cast<const char*>(data), size * nmemb); if (line.find("HTTP") == 0) { // To find lines like: "HTTP/1.1 200 OK" std::istringstream iss(line); std::string httpVersion; long statusCode; iss >> httpVersion >> statusCode; LibCurlHttpContentFetcher* thisObject = static_cast<LibCurlHttpContentFetcher*>(userData); thisObject->m_lastStatusCode = statusCode; } else if (line.find("Content-Type") == 0) { // To find lines like: "Content-Type: audio/x-mpegurl; charset=utf-8" std::istringstream iss(line); std::string contentTypeBeginning; std::string contentType; iss >> contentTypeBeginning >> contentType; contentType.pop_back(); LibCurlHttpContentFetcher* thisObject = static_cast<LibCurlHttpContentFetcher*>(userData); thisObject->m_lastContentType = contentType; } return size * nmemb; } size_t LibCurlHttpContentFetcher::bodyCallback(char* data, size_t size, size_t nmemb, void* userData) { if (!userData) { ACSDK_ERROR(LX("bodyCallback").d("reason", "nullUserDataPointer")); return 0; } LibCurlHttpContentFetcher* thisObject = static_cast<LibCurlHttpContentFetcher*>(userData); if (!thisObject->m_bodyCallbackBegan) { thisObject->m_bodyCallbackBegan = true; thisObject->m_statusCodePromise.set_value(thisObject->m_lastStatusCode); thisObject->m_contentTypePromise.set_value(thisObject->m_lastContentType); } auto streamWriter = thisObject->m_streamWriter; if (streamWriter) { avsCommon::avs::attachment::AttachmentWriter::WriteStatus writeStatus = avsCommon::avs::attachment::AttachmentWriter::WriteStatus::OK; auto numBytesWritten = streamWriter->write(data, size * nmemb, &writeStatus); return numBytesWritten; } else { return 0; } } size_t LibCurlHttpContentFetcher::noopCallback(char* data, size_t size, size_t nmemb, void* userData) { return 0; } LibCurlHttpContentFetcher::LibCurlHttpContentFetcher(const std::string& url) : m_url{url}, m_bodyCallbackBegan{false}, m_lastStatusCode{0} { m_hasObjectBeenUsed.clear(); } std::unique_ptr<avsCommon::utils::HTTPContent> LibCurlHttpContentFetcher::getContent(FetchOptions fetchOption) { if (m_hasObjectBeenUsed.test_and_set()) { return nullptr; } if (!m_curlWrapper.setURL(m_url)) { ACSDK_ERROR(LX("getContentFailed").d("reason", "failedToSetUrl")); return nullptr; } auto curlReturnValue = curl_easy_setopt(m_curlWrapper.getCurlHandle(), CURLOPT_FOLLOWLOCATION, 1L); if (curlReturnValue != CURLE_OK) { ACSDK_ERROR(LX("getContentFailed").d("reason", "enableFollowRedirectsFailed")); return nullptr; } curlReturnValue = curl_easy_setopt(m_curlWrapper.getCurlHandle(), CURLOPT_AUTOREFERER, 1L); if (curlReturnValue != CURLE_OK) { ACSDK_ERROR(LX("getContentFailed").d("reason", "enableAutoReferralSettingToRedirectsFailed")); return nullptr; } // This enables the libcurl cookie engine, allowing it to send cookies curlReturnValue = curl_easy_setopt(m_curlWrapper.getCurlHandle(), CURLOPT_COOKIEFILE, ""); if (curlReturnValue != CURLE_OK) { ACSDK_ERROR(LX("getContentFailed").d("reason", "enableLibCurlCookieEngineFailed")); return nullptr; } auto httpStatusCodeFuture = m_statusCodePromise.get_future(); auto contentTypeFuture = m_contentTypePromise.get_future(); std::shared_ptr<avsCommon::avs::attachment::InProcessAttachment> stream = nullptr; switch (fetchOption) { case FetchOptions::CONTENT_TYPE: /* * Since this option only wants the content-type, I set a noop callback for parsing the body of the HTTP * response. For some webpages, it is required to set a body callback in order for the full webpage data * to render. */ curlReturnValue = curl_easy_setopt(m_curlWrapper.getCurlHandle(), CURLOPT_WRITEFUNCTION, noopCallback); if (curlReturnValue != CURLE_OK) { ACSDK_ERROR(LX("getContentFailed").d("reason", "failedToSetCurlCallback")); return nullptr; } m_thread = std::thread([this]() { long finalResponseCode = 0; char* contentType = nullptr; auto curlReturnValue = curl_easy_perform(m_curlWrapper.getCurlHandle()); if (curlReturnValue != CURLE_OK && curlReturnValue != CURLE_WRITE_ERROR) { ACSDK_ERROR(LX("curlEasyPerformFailed").d("error", curl_easy_strerror(curlReturnValue))); } curlReturnValue = curl_easy_getinfo(m_curlWrapper.getCurlHandle(), CURLINFO_RESPONSE_CODE, &finalResponseCode); if (curlReturnValue != CURLE_OK) { ACSDK_ERROR(LX("curlEasyGetInfoFailed").d("error", curl_easy_strerror(curlReturnValue))); } ACSDK_DEBUG9(LX("getContent").d("responseCode", finalResponseCode).sensitive("url", m_url)); m_statusCodePromise.set_value(finalResponseCode); curlReturnValue = curl_easy_getinfo(m_curlWrapper.getCurlHandle(), CURLINFO_CONTENT_TYPE, &contentType); if (curlReturnValue == CURLE_OK && contentType) { ACSDK_DEBUG9(LX("getContent").d("contentType", contentType).sensitive("url", m_url)); m_contentTypePromise.set_value(std::string(contentType)); } else { ACSDK_ERROR(LX("curlEasyGetInfoFailed").d("error", curl_easy_strerror(curlReturnValue))); ACSDK_ERROR(LX("getContent").d("contentType", "failedToGetContentType").sensitive("url", m_url)); m_contentTypePromise.set_value(""); } }); break; case FetchOptions::ENTIRE_BODY: // Using the url as the identifier for the attachment stream = std::make_shared<avsCommon::avs::attachment::InProcessAttachment>(m_url); m_streamWriter = stream->createWriter(); if (!m_streamWriter) { ACSDK_ERROR(LX("getContentFailed").d("reason", "failedToCreateWriter")); return nullptr; } if (!m_curlWrapper.setWriteCallback(bodyCallback, this)) { ACSDK_ERROR(LX("getContentFailed").d("reason", "failedToSetCurlBodyCallback")); return nullptr; } if (!m_curlWrapper.setHeaderCallback(headerCallback, this)) { ACSDK_ERROR(LX("getContentFailed").d("reason", "failedToSetCurlHeaderCallback")); return nullptr; } m_thread = std::thread([this]() { auto curlReturnValue = curl_easy_perform(m_curlWrapper.getCurlHandle()); if (curlReturnValue != CURLE_OK) { ACSDK_ERROR(LX("curlEasyPerformFailed").d("error", curl_easy_strerror(curlReturnValue))); } if (!m_bodyCallbackBegan) { m_statusCodePromise.set_value(m_lastStatusCode); m_contentTypePromise.set_value(m_lastContentType); } /* * Curl easy perform has finished and all data has been written. Closing writer so that readers know * when they have caught up and read everything. */ m_streamWriter->close(); }); break; default: return nullptr; } return avsCommon::utils::memory::make_unique<avsCommon::utils::HTTPContent>( avsCommon::utils::HTTPContent{std::move(httpStatusCodeFuture), std::move(contentTypeFuture), stream}); } LibCurlHttpContentFetcher::~LibCurlHttpContentFetcher() { if (m_thread.joinable()) { m_thread.join(); } } } // namespace libcurlUtils } // namespace utils } // namespace avsCommon } // namespace alexaClientSDK
45.928571
120
0.655677
merdahl
4fbd68a1b78c449f2e424f71ef756e62ead38c8b
4,317
cpp
C++
source/PyMaterialX/PyMaterialXCore/PyDefinition.cpp
nzanepro/MaterialX
9100ac81231d87f7fbf4dc32f7030867e466bc41
[ "BSD-3-Clause" ]
null
null
null
source/PyMaterialX/PyMaterialXCore/PyDefinition.cpp
nzanepro/MaterialX
9100ac81231d87f7fbf4dc32f7030867e466bc41
[ "BSD-3-Clause" ]
null
null
null
source/PyMaterialX/PyMaterialXCore/PyDefinition.cpp
nzanepro/MaterialX
9100ac81231d87f7fbf4dc32f7030867e466bc41
[ "BSD-3-Clause" ]
null
null
null
// // TM & (c) 2017 Lucasfilm Entertainment Company Ltd. and Lucasfilm Ltd. // All rights reserved. See LICENSE.txt for license. // #include <PyMaterialX/PyMaterialX.h> #include <MaterialXCore/Definition.h> #include <MaterialXCore/Material.h> namespace py = pybind11; namespace mx = MaterialX; void bindPyDefinition(py::module& mod) { py::class_<mx::NodeDef, mx::NodeDefPtr, mx::InterfaceElement>(mod, "NodeDef") .def("setNodeString", &mx::NodeDef::setNodeString) .def("hasNodeString", &mx::NodeDef::hasNodeString) .def("getNodeString", &mx::NodeDef::getNodeString) .def("setNodeGroup", &mx::NodeDef::setNodeGroup) .def("hasNodeGroup", &mx::NodeDef::hasNodeGroup) .def("getNodeGroup", &mx::NodeDef::getNodeGroup) .def("getImplementation", &mx::NodeDef::getImplementation) .def("getImplementation", &mx::NodeDef::getImplementation, py::arg("target") = mx::EMPTY_STRING, py::arg("language") = mx::EMPTY_STRING) .def("getInstantiatingShaderRefs", &mx::NodeDef::getInstantiatingShaderRefs) .def("isVersionCompatible", &mx::NodeDef::isVersionCompatible) .def_readonly_static("CATEGORY", &mx::NodeDef::CATEGORY) .def_readonly_static("NODE_ATTRIBUTE", &mx::NodeDef::NODE_ATTRIBUTE); py::class_<mx::Implementation, mx::ImplementationPtr, mx::InterfaceElement>(mod, "Implementation") .def("setFile", &mx::Implementation::setFile) .def("hasFile", &mx::Implementation::hasFile) .def("getFile", &mx::Implementation::getFile) .def("setFunction", &mx::Implementation::setFunction) .def("hasFunction", &mx::Implementation::hasFunction) .def("getFunction", &mx::Implementation::getFunction) .def("setLanguage", &mx::Implementation::setLanguage) .def("hasLanguage", &mx::Implementation::hasLanguage) .def("getLanguage", &mx::Implementation::getLanguage) .def("setNodeDef", &mx::Implementation::setNodeDef) .def("getNodeDef", &mx::Implementation::getNodeDef) .def_readonly_static("CATEGORY", &mx::Implementation::CATEGORY) .def_readonly_static("FILE_ATTRIBUTE", &mx::Implementation::FILE_ATTRIBUTE) .def_readonly_static("FUNCTION_ATTRIBUTE", &mx::Implementation::FUNCTION_ATTRIBUTE) .def_readonly_static("LANGUAGE_ATTRIBUTE", &mx::Implementation::LANGUAGE_ATTRIBUTE); py::class_<mx::TypeDef, mx::TypeDefPtr, mx::Element>(mod, "TypeDef") .def("setSemantic", &mx::TypeDef::setSemantic) .def("hasSemantic", &mx::TypeDef::hasSemantic) .def("getSemantic", &mx::TypeDef::getSemantic) .def("setContext", &mx::TypeDef::setContext) .def("hasContext", &mx::TypeDef::hasContext) .def("getContext", &mx::TypeDef::getContext) .def("addMember", &mx::TypeDef::addMember, py::arg("name") = mx::EMPTY_STRING) .def("getMember", &mx::TypeDef::getMember) .def("getMembers", &mx::TypeDef::getMembers) .def("removeMember", &mx::TypeDef::removeMember) .def_readonly_static("CATEGORY", &mx::TypeDef::CATEGORY) .def_readonly_static("SEMANTIC_ATTRIBUTE", &mx::TypeDef::SEMANTIC_ATTRIBUTE) .def_readonly_static("CONTEXT_ATTRIBUTE", &mx::TypeDef::CONTEXT_ATTRIBUTE); py::class_<mx::Member, mx::MemberPtr, mx::TypedElement>(mod, "Member") .def_readonly_static("CATEGORY", &mx::TypeDef::CATEGORY); py::class_<mx::Unit, mx::UnitPtr, mx::Element>(mod, "Unit") .def_readonly_static("CATEGORY", &mx::Unit::CATEGORY); py::class_<mx::UnitDef, mx::UnitDefPtr, mx::Element>(mod, "UnitDef") .def("setUnitType", &mx::UnitDef::hasUnitType) .def("hasUnitType", &mx::UnitDef::hasUnitType) .def("getUnitType", &mx::UnitDef::getUnitType) .def("addUnit", &mx::UnitDef::addUnit) .def("getUnit", &mx::UnitDef::getUnit) .def("getUnits", &mx::UnitDef::getUnits) .def_readonly_static("CATEGORY", &mx::UnitDef::CATEGORY) .def_readonly_static("UNITTYPE_ATTRIBUTE", &mx::UnitDef::UNITTYPE_ATTRIBUTE); py::class_<mx::UnitTypeDef, mx::UnitTypeDefPtr, mx::Element>(mod, "UnitTypeDef") .def("getUnitDefs", &mx::UnitTypeDef::getUnitDefs) .def_readonly_static("CATEGORY", &mx::UnitTypeDef::CATEGORY); }
50.197674
102
0.665508
nzanepro
4fbf226090e4518546a97c8a6309253c44196e1c
1,756
hpp
C++
lshkit/trunk/3rd-party/boost/boost/mpl/aux_/msvc_eti_base.hpp
wzj1695224/BinClone
3b6dedb9a1f08be6dbcdce8f3278351ef5530ed8
[ "Apache-2.0" ]
21
2015-05-22T09:22:16.000Z
2021-04-06T18:54:07.000Z
lshkit/trunk/3rd-party/boost/boost/mpl/aux_/msvc_eti_base.hpp
mrfarhadi/BinClone
035c20ab27ec00935c12ce54fe9c52bba4aaeff2
[ "Apache-2.0" ]
1
2020-05-21T08:43:19.000Z
2020-05-21T08:43:19.000Z
lshkit/trunk/3rd-party/boost/boost/mpl/aux_/msvc_eti_base.hpp
mrfarhadi/BinClone
035c20ab27ec00935c12ce54fe9c52bba4aaeff2
[ "Apache-2.0" ]
11
2015-09-08T20:56:14.000Z
2019-12-22T12:52:45.000Z
#ifndef BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED #define BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED // Copyright Aleksey Gurtovoy 2001-2004 // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // See http://www.boost.org/libs/mpl for documentation. // $Id: msvc_eti_base.hpp,v 1.2 2009/02/16 01:51:05 wdong-pku Exp $ // $Date: 2009/02/16 01:51:05 $ // $Revision: 1.2 $ #include <boost/mpl/aux_/is_msvc_eti_arg.hpp> #include <boost/mpl/aux_/config/eti.hpp> #include <boost/mpl/aux_/config/gcc.hpp> #include <boost/mpl/aux_/config/workaround.hpp> namespace boost { namespace mpl { namespace aux { #if defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG) template< bool > struct msvc_eti_base_impl { template< typename T > struct result_ : T { typedef T type; }; }; template<> struct msvc_eti_base_impl<true> { template< typename T > struct result_ { typedef result_ type; typedef result_ first; typedef result_ second; typedef result_ tag; enum { value = 0 }; }; }; template< typename T > struct msvc_eti_base : msvc_eti_base_impl< is_msvc_eti_arg<T>::value > ::template result_<T> { }; #else // !BOOST_MPL_CFG_MSVC_70_ETI_BUG template< typename T > struct msvc_eti_base : T { #if BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0304)) msvc_eti_base(); #endif typedef T type; }; #endif template<> struct msvc_eti_base<int> { typedef msvc_eti_base type; typedef msvc_eti_base first; typedef msvc_eti_base second; typedef msvc_eti_base tag; enum { value = 0 }; }; }}} #endif // BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED
22.512821
67
0.702733
wzj1695224
4fc1fb870b57f09e99aab228be1843909616589a
5,030
cpp
C++
src/Widgets/ThemesSettingsPanel.cpp
huntermalm/Layers
1f2f6eabe3be8dfbc60d682ca47543f7807a0bbf
[ "MIT" ]
null
null
null
src/Widgets/ThemesSettingsPanel.cpp
huntermalm/Layers
1f2f6eabe3be8dfbc60d682ca47543f7807a0bbf
[ "MIT" ]
null
null
null
src/Widgets/ThemesSettingsPanel.cpp
huntermalm/Layers
1f2f6eabe3be8dfbc60d682ca47543f7807a0bbf
[ "MIT" ]
null
null
null
#include "../../include/AttributeWidgets.h" #include "../../include/Application.h" #include "../../include/Layouts.h" #include "../../include/SettingsPanels.h" using Layers::Button; using Layers::Combobox; using Layers::Theme; using Layers::ThemesSettingsPanel; ThemesSettingsPanel::ThemesSettingsPanel(QWidget* parent) : Widget(parent) { init_child_themeable_reference_list(); init_attributes(); set_icon(new Graphic(":/svgs/panel_icon.svg", QSize(20, 20))); set_name("themes_settings_panel"); set_proper_name("Themes Panel"); m_theme_label->set_name("theme_label"); m_theme_label->set_proper_name("\"Theme\" Label"); m_theme_label->set_font_size(15); m_theme_combobox->set_icon(new Graphic(":/svgs/combobox_icon.svg", QSize(21, 18))); m_theme_combobox->set_item_renaming_disabled(false); m_theme_combobox->set_name("theme_combobox"); m_theme_combobox->set_proper_name("Theme Combobox"); m_theme_combobox->set_font_size(15); connect(m_theme_combobox, SIGNAL(item_replaced(const QString&, const QString&)), layersApp, SLOT(rename_theme(const QString&, const QString&))); connect(m_theme_combobox, &Combobox::current_item_changed, [this] { layersApp->apply_theme(layersApp->themes()[m_theme_combobox->current_item()]); }); m_new_theme_button->set_name("new_theme_button"); m_new_theme_button->set_proper_name("New Theme Button"); m_customize_theme_button->set_name("customize_theme_button"); m_customize_theme_button->set_proper_name("Customize Theme Button"); m_delete_theme_button->set_name("delete_theme_button"); m_delete_theme_button->set_proper_name("Delete Theme Button"); m_theme_info_button->set_name("theme_info_button"); m_theme_info_button->set_proper_name("Theme Info Button"); m_theme_info_button->disable_graphic_hover_color(); m_separator_1->replace_all_attributes_with(m_control_separator); m_separator_1->setFixedSize(1, 30); m_separator_2->replace_all_attributes_with(m_control_separator); m_separator_2->setFixedSize(1, 30); m_spacer_1->setFixedWidth(12); m_spacer_2->setFixedWidth(12); m_control_separator->set_name("separator"); m_control_separator->set_proper_name("Separators"); m_control_separator->setFixedSize(1, 30); //m_control_separator->set_ACW_primary("border_awc", false); //m_control_separator->set_ACW_primary("hover_background_caw", false); //m_control_separator->set_ACW_primary("outline_caw", false); //m_control_separator->set_ACW_primary("corner_color_caw", false); //m_control_separator->set_ACW_primary("corner_radii_awc", false); setup_layout(); } void ThemesSettingsPanel::init_attributes() { a_fill.set_disabled(); m_spacer_1->a_fill.set_disabled(); m_spacer_2->a_fill.set_disabled(); m_theme_info_button->graphic()->svg()->a_use_common_hover_color.set_value(false); } void ThemesSettingsPanel::init_child_themeable_reference_list() { add_child_themeable_reference(m_theme_label); add_child_themeable_reference(m_theme_combobox); add_child_themeable_reference(m_new_theme_button); add_child_themeable_reference(m_customize_theme_button); add_child_themeable_reference(m_delete_theme_button); add_child_themeable_reference(m_theme_info_button); add_child_themeable_reference(m_control_separator); } void ThemesSettingsPanel::apply_theme(Theme& theme) { if (theme.is_custom()) show_custom_theme_buttons(); else show_custom_theme_buttons(false); Themeable::apply_theme(theme); } Button* ThemesSettingsPanel::customize_theme_button() const { return m_customize_theme_button; } Button* ThemesSettingsPanel::new_theme_button() const { return m_new_theme_button; } Combobox* ThemesSettingsPanel::theme_combobox() const { return m_theme_combobox; } void ThemesSettingsPanel::show_custom_theme_buttons(bool cond) { if (cond) { m_customize_theme_button->show(); m_delete_theme_button->show(); m_separator_2->show(); m_spacer_1->show(); m_spacer_2->show(); } else { m_customize_theme_button->hide(); m_delete_theme_button->hide(); m_separator_2->hide(); m_spacer_1->hide(); m_spacer_2->hide(); } } void ThemesSettingsPanel::setup_layout() { QHBoxLayout* theme_buttons_hbox = new QHBoxLayout; theme_buttons_hbox->setContentsMargins(0, 5, 0, 0); theme_buttons_hbox->setSpacing(0); theme_buttons_hbox->addWidget(m_new_theme_button); theme_buttons_hbox->addSpacing(12); theme_buttons_hbox->addWidget(m_separator_1); theme_buttons_hbox->addSpacing(12); theme_buttons_hbox->addWidget(m_customize_theme_button); theme_buttons_hbox->addWidget(m_delete_theme_button); theme_buttons_hbox->addWidget(m_spacer_1); theme_buttons_hbox->addWidget(m_separator_2); theme_buttons_hbox->addWidget(m_spacer_2); theme_buttons_hbox->addWidget(m_theme_info_button); theme_buttons_hbox->addStretch(); VerticalLayout* main_layout = new VerticalLayout; main_layout->setContentsMargins(32, 32, 0, 0); main_layout->addWidget(m_theme_label); main_layout->addWidget(m_theme_combobox); main_layout->addLayout(theme_buttons_hbox); main_layout->addStretch(); setLayout(main_layout); }
30.11976
84
0.79841
huntermalm
4fc32173d2cdfb2532a258a3d90c2c36ecacdf0f
1,809
inl
C++
dependencies/checkframebufferstatus/checkFramebufferStatus.inl
jaredhoberock/gotham
e3551cc355646530574d086d7cc2b82e41e8f798
[ "Apache-2.0" ]
6
2015-12-29T07:21:01.000Z
2020-05-29T10:47:38.000Z
dependencies/checkframebufferstatus/checkFramebufferStatus.inl
jaredhoberock/gotham
e3551cc355646530574d086d7cc2b82e41e8f798
[ "Apache-2.0" ]
null
null
null
dependencies/checkframebufferstatus/checkFramebufferStatus.inl
jaredhoberock/gotham
e3551cc355646530574d086d7cc2b82e41e8f798
[ "Apache-2.0" ]
null
null
null
/*! \file checkFramebufferStatus.inl * \author Jared Hoberock * \brief Inline file for checkFramebufferStatus.h. */ #include "checkFramebufferStatus.h" #include <iostream> void checkFramebufferStatus(const char *filename, const unsigned int lineNumber) { GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); if(status != GL_FRAMEBUFFER_COMPLETE_EXT) { std::cerr << filename << "(" << lineNumber << "): "; switch(status) { case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT: { std::cerr << "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT" << std::endl; break; } // end case case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT: { std::cerr << "GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT" << std::endl; break; } // end case case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT: { std::cerr << "GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT" << std::endl; break; } // end case case GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT: { std::cerr << "GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT" << std::endl; break; } // end case case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT: { std::cerr << "GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT" << std::endl; break; } // end case case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT: { std::cerr << "GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT" << std::endl; break; } // end case case GL_FRAMEBUFFER_UNSUPPORTED_EXT: { std::cerr << "GL_FRAMEBUFFER_UNSUPPORTED_EXT" << std::endl; break; } // end case } // end switch } // end if } // end checkFramebufferStatus()
28.714286
86
0.627418
jaredhoberock
4fc35df192304b723558bbf337928fba775972f7
7,310
cpp
C++
src/lib/operators/projection.cpp
dey4ss/hyrise
c304b9ced36044e303eb8a4d68a05fc7edc04819
[ "MIT" ]
null
null
null
src/lib/operators/projection.cpp
dey4ss/hyrise
c304b9ced36044e303eb8a4d68a05fc7edc04819
[ "MIT" ]
null
null
null
src/lib/operators/projection.cpp
dey4ss/hyrise
c304b9ced36044e303eb8a4d68a05fc7edc04819
[ "MIT" ]
null
null
null
#include "projection.hpp" #include <algorithm> #include <functional> #include <memory> #include <numeric> #include <string> #include <utility> #include <vector> #include "expression/evaluation/expression_evaluator.hpp" #include "expression/expression_utils.hpp" #include "expression/pqp_column_expression.hpp" #include "expression/value_expression.hpp" #include "storage/segment_iterate.hpp" #include "utils/assert.hpp" namespace opossum { Projection::Projection(const std::shared_ptr<const AbstractOperator>& in, const std::vector<std::shared_ptr<AbstractExpression>>& expressions) : AbstractReadOnlyOperator(OperatorType::Projection, in), expressions(expressions) {} const std::string& Projection::name() const { static const auto name = std::string{"Projection"}; return name; } std::shared_ptr<AbstractOperator> Projection::_on_deep_copy( const std::shared_ptr<AbstractOperator>& copied_input_left, const std::shared_ptr<AbstractOperator>& copied_input_right) const { return std::make_shared<Projection>(copied_input_left, expressions_deep_copy(expressions)); } void Projection::_on_set_parameters(const std::unordered_map<ParameterID, AllTypeVariant>& parameters) { expressions_set_parameters(expressions, parameters); } void Projection::_on_set_transaction_context(const std::weak_ptr<TransactionContext>& transaction_context) { expressions_set_transaction_context(expressions, transaction_context); } std::shared_ptr<const Table> Projection::_on_execute() { const auto& input_table = *input_table_left(); /** * If an expression is a PQPColumnExpression then it might be possible to forward the input column, if the * input TableType (References or Data) matches the output column type (ReferenceSegment or not). */ const auto only_projects_columns = std::all_of(expressions.begin(), expressions.end(), [&](const auto& expression) { return expression->type == ExpressionType::PQPColumn; }); const auto output_table_type = only_projects_columns ? input_table.type() : TableType::Data; const auto forward_columns = input_table.type() == output_table_type; const auto uncorrelated_subquery_results = ExpressionEvaluator::populate_uncorrelated_subquery_results_cache(expressions); auto column_is_nullable = std::vector<bool>(expressions.size(), false); /** * Perform the projection */ auto output_chunk_segments = std::vector<Segments>(input_table.chunk_count()); const auto chunk_count_input_table = input_table.chunk_count(); for (auto chunk_id = ChunkID{0}; chunk_id < chunk_count_input_table; ++chunk_id) { const auto input_chunk = input_table.get_chunk(chunk_id); Assert(input_chunk, "Physically deleted chunk should not reach this point, see get_chunk / #1686."); auto output_segments = Segments{expressions.size()}; ExpressionEvaluator evaluator(input_table_left(), chunk_id, uncorrelated_subquery_results); for (auto column_id = ColumnID{0}; column_id < expressions.size(); ++column_id) { const auto& expression = expressions[column_id]; // Forward input column if possible if (expression->type == ExpressionType::PQPColumn && forward_columns) { const auto pqp_column_expression = std::static_pointer_cast<PQPColumnExpression>(expression); output_segments[column_id] = input_chunk->get_segment(pqp_column_expression->column_id); column_is_nullable[column_id] = column_is_nullable[column_id] || input_table.column_is_nullable(pqp_column_expression->column_id); } else if (expression->type == ExpressionType::PQPColumn && !forward_columns) { // The current column will be returned without any logical modifications. As other columns do get modified (and // returned as a ValueSegment), all segments (including this one) need to become ValueSegments. This segment is // not yet a ValueSegment (otherwise forward_columns would be true); thus we need to materialize it. const auto pqp_column_expression = std::static_pointer_cast<PQPColumnExpression>(expression); const auto segment = input_chunk->get_segment(pqp_column_expression->column_id); resolve_data_type(expression->data_type(), [&](const auto data_type) { using ColumnDataType = typename decltype(data_type)::type; bool has_null = false; auto values = pmr_concurrent_vector<ColumnDataType>(segment->size()); auto null_values = pmr_concurrent_vector<bool>(segment->size()); auto chunk_offset = ChunkOffset{0}; segment_iterate<ColumnDataType>(*segment, [&](const auto& position) { if (position.is_null()) { has_null = true; null_values[chunk_offset] = true; } else { values[chunk_offset] = position.value(); } ++chunk_offset; }); auto value_segment = std::shared_ptr<ValueSegment<ColumnDataType>>{}; if (has_null) { value_segment = std::make_shared<ValueSegment<ColumnDataType>>(std::move(values), std::move(null_values)); } else { value_segment = std::make_shared<ValueSegment<ColumnDataType>>(std::move(values)); } output_segments[column_id] = std::move(value_segment); column_is_nullable[column_id] = has_null; }); } else { auto output_segment = evaluator.evaluate_expression_to_segment(*expression); column_is_nullable[column_id] = column_is_nullable[column_id] || output_segment->is_nullable(); output_segments[column_id] = std::move(output_segment); } } output_chunk_segments[chunk_id] = std::move(output_segments); } /** * Determine the TableColumnDefinitions and build the output table */ TableColumnDefinitions column_definitions; for (auto column_id = ColumnID{0}; column_id < expressions.size(); ++column_id) { column_definitions.emplace_back(expressions[column_id]->as_column_name(), expressions[column_id]->data_type(), column_is_nullable[column_id]); } auto output_chunks = std::vector<std::shared_ptr<Chunk>>{chunk_count_input_table}; for (auto chunk_id = ChunkID{0}; chunk_id < chunk_count_input_table; ++chunk_id) { const auto input_chunk = input_table.get_chunk(chunk_id); Assert(input_chunk, "Physically deleted chunk should not reach this point, see get_chunk / #1686."); // The output chunk contains all rows that are in the stored chunk, including invalid rows. We forward this // information so that following operators (currently, the Validate operator) can use it for optimizations. output_chunks[chunk_id] = std::make_shared<Chunk>(std::move(output_chunk_segments[chunk_id]), input_chunk->mvcc_data()); output_chunks[chunk_id]->increase_invalid_row_count(input_chunk->invalid_row_count()); } return std::make_shared<Table>(column_definitions, output_table_type, std::move(output_chunks), input_table.uses_mvcc()); } // returns the singleton dummy table used for literal projections std::shared_ptr<Table> Projection::dummy_table() { static auto shared_dummy = std::make_shared<DummyTable>(); return shared_dummy; } } // namespace opossum
45.123457
119
0.723119
dey4ss
4fc3d34ec7353af5e033a67beba373d497b54d53
790
cpp
C++
Part3/eleven.cpp
praseedpai/BasicCppCourse
fb6c2300dfb48961a5f647a51eb9c2032bfb45ea
[ "MIT" ]
null
null
null
Part3/eleven.cpp
praseedpai/BasicCppCourse
fb6c2300dfb48961a5f647a51eb9c2032bfb45ea
[ "MIT" ]
null
null
null
Part3/eleven.cpp
praseedpai/BasicCppCourse
fb6c2300dfb48961a5f647a51eb9c2032bfb45ea
[ "MIT" ]
1
2021-05-03T16:09:46.000Z
2021-05-03T16:09:46.000Z
/////////////////////////////////// // eleven.cpp // A C/C++ program to demonstrate pointers // g++ -oeleven.exe eleven.cpp // cl /Feeleven.exe eleven.cpp #include <stdio.h> #include <stdlib.h> #include <string.h> int main( int argc , char **argv , char **envp ){ int arr[] = { 0,2,4,5,6}; int arr_count = sizeof(arr)/sizeof(arr[0]); //--- allocate dynamic memory int *parr = (int *) malloc(sizeof(int) * arr_count); //----- if failed, print failure and exit if ( parr == 0 ) { printf("Memory Allocation Failure\n"); return 0;} memcpy(parr,arr,arr_count*sizeof(int)); int *temp = parr; for(int i=0; i<arr_count; ++i ) { printf("%p\t%d\n", temp, *temp ); temp++; } free(parr); // free memory from the heap return 0; }
29.259259
71
0.556962
praseedpai
4fc608719fe9791c29165f119f800fc6dd70d9e5
934
hpp
C++
src/icebox/icebox/modules.hpp
Fimbure/icebox-1
0b81992a53e1b410955ca89bdb6f8169d6f2da86
[ "MIT" ]
521
2019-03-29T15:44:08.000Z
2022-03-22T09:46:19.000Z
src/icebox/icebox/modules.hpp
Fimbure/icebox-1
0b81992a53e1b410955ca89bdb6f8169d6f2da86
[ "MIT" ]
30
2019-06-04T17:00:49.000Z
2021-09-08T20:44:19.000Z
src/icebox/icebox/modules.hpp
Fimbure/icebox-1
0b81992a53e1b410955ca89bdb6f8169d6f2da86
[ "MIT" ]
99
2019-03-29T16:04:13.000Z
2022-03-28T16:59:34.000Z
#pragma once #include "enums.hpp" #include "types.hpp" #include <functional> namespace core { struct Core; } namespace modules { using on_mod_fn = std::function<walk_e(mod_t)>; using on_event_fn = std::function<void(mod_t)>; bool list (core::Core&, proc_t proc, on_mod_fn on_mod); opt<std::string> name (core::Core&, proc_t proc, mod_t mod); bool is_equal (core::Core&, proc_t proc, mod_t mod, flags_t flags, std::string_view name); opt<span_t> span (core::Core&, proc_t proc, mod_t mod); opt<mod_t> find (core::Core&, proc_t proc, uint64_t addr); opt<mod_t> find_name (core::Core& core, proc_t proc, std::string_view name, flags_t flags); opt<bpid_t> listen_create (core::Core& core, proc_t proc, flags_t flags, const on_event_fn& on_load); } // namespace modules
40.608696
116
0.603854
Fimbure
4fc61a4d4dc3ccabe1c900a55fa626daacaf4b48
1,805
cpp
C++
test/testNet/testHttp/testHttpAcceptLanguage/testAcceptLanguageParse.cpp
wangsun1983/Obotcha
2464e53599305703f5150df72bf73579a39d8ef4
[ "MIT" ]
27
2019-04-27T00:51:22.000Z
2022-03-30T04:05:44.000Z
test/testNet/testHttp/testHttpAcceptLanguage/testAcceptLanguageParse.cpp
wangsun1983/Obotcha
2464e53599305703f5150df72bf73579a39d8ef4
[ "MIT" ]
9
2020-05-03T12:17:50.000Z
2021-10-15T02:18:47.000Z
test/testNet/testHttp/testHttpAcceptLanguage/testAcceptLanguageParse.cpp
wangsun1983/Obotcha
2464e53599305703f5150df72bf73579a39d8ef4
[ "MIT" ]
1
2019-04-16T01:45:36.000Z
2019-04-16T01:45:36.000Z
#include <stdio.h> #include <unistd.h> #include <sys/time.h> #include "Thread.hpp" #include "Object.hpp" #include "HttpMime.hpp" #include "HttpAcceptLanguage.hpp" #include "Math.hpp" using namespace obotcha; void testLanguageParse() { while(1) { HttpAcceptLanguage encoding1 = createHttpAcceptLanguage(); encoding1->import("fr-CH,fr;q=0.9,en;q=0.8,de;q=0.7,*;q=0.5"); auto languages = encoding1->getLanguages(); if(languages->size() != 5) { printf("---[HttpAcceptLanguage test Parse case1] [FAILED]--- \n"); break; } if(!languages->get(0)->lang->equals("fr-CH") || !languages->get(1)->lang->equals("fr") || !languages->get(2)->lang->equals("en") || !languages->get(3)->lang->equals("de") || !languages->get(4)->lang->equals("*")) { printf("---[HttpAcceptLanguage test Parse case2] [FAILED]--- \n"); break; } if(st(Math)::compareFloat(languages->get(1)->weight,0.9) != st(Math)::AlmostEqual) { printf("---[HttpAcceptLanguage test Parse case3] [FAILED]---,weight is %f \n",languages->get(1)->weight); break; } if(st(Math)::compareFloat(languages->get(2)->weight,0.8) != st(Math)::AlmostEqual) { printf("---[HttpAcceptLanguage test Parse case4] [FAILED]--- \n"); break; } if(st(Math)::compareFloat(languages->get(3)->weight,0.7) != st(Math)::AlmostEqual) { printf("---[HttpAcceptLanguage test Parse case5] [FAILED]--- \n"); break; } if(st(Math)::compareFloat(languages->get(4)->weight,0.5) != st(Math)::AlmostEqual) { printf("---[HttpAcceptLanguage test Parse case6] [FAILED]--- \n"); break; } break; } printf("---[HttpAcceptLanguage test Parse case100] [OK]--- \n"); }
31.12069
112
0.584488
wangsun1983
4fc70deb3e9d24c0023e94283629a9144144c227
15,279
cpp
C++
ChipsEninge/02_Script/DirectX/Effects/Effects.cpp
jerrypoiu/DX11_ChipsEngine2021
a558fb0013259a380d68b66142fc48b575208980
[ "MIT" ]
1
2021-01-25T11:38:21.000Z
2021-01-25T11:38:21.000Z
ChipsEninge/02_Script/DirectX/Effects/Effects.cpp
jerrypoiu/ChipsEngine
a558fb0013259a380d68b66142fc48b575208980
[ "MIT" ]
null
null
null
ChipsEninge/02_Script/DirectX/Effects/Effects.cpp
jerrypoiu/ChipsEngine
a558fb0013259a380d68b66142fc48b575208980
[ "MIT" ]
null
null
null
#include "DirectX/Effects/Effects.h" #pragma region Effect Effect::Effect(ID3D11Device* device, const std::wstring& filename) : mFX(0) { std::ifstream fin(filename, std::ios::binary); fin.seekg(0, std::ios_base::end); int size = (int)fin.tellg(); fin.seekg(0, std::ios_base::beg); std::vector<char> compiledShader(size); fin.read(&compiledShader[0], size); fin.close(); D3DX11CreateEffectFromMemory(&compiledShader[0], size,0, device, &mFX); } Effect::~Effect() { SAFE_RELEASE(mFX); } #pragma endregion #pragma region StandardShaderEffect StandardShaderEffect::StandardShaderEffect(ID3D11Device* device, const std::wstring& filename) : Effect(device, filename) { DebugTech = mFX->GetTechniqueByName("Debug"); StandardTech = mFX->GetTechniqueByName("Standard"); CartoonTech = mFX->GetTechniqueByName("Cartoon"); DepthTech = mFX->GetTechniqueByName("Depth"); UseDiffuse = mFX->GetVariableByName("gUseDiffuse")->AsScalar(); UseAlphaClip = mFX->GetVariableByName("gUseAlphaClip")->AsScalar(); UseNormal = mFX->GetVariableByName("gUseNormal")->AsScalar(); UseSpecular = mFX->GetVariableByName("gUseSpecular")->AsScalar(); UseReflect = mFX->GetVariableByName("gUseReflect")->AsScalar(); UseCartoon = mFX->GetVariableByName("gUseCartoon")->AsScalar(); UseFog = mFX->GetVariableByName("gUseFog")->AsScalar(); UseShadow = mFX->GetVariableByName("gUseShadow")->AsScalar(); UseRimLight = mFX->GetVariableByName("gRimLight")->AsScalar(); UseSkinning = mFX->GetVariableByName("gSkinning")->AsScalar(); WorldViewProj = mFX->GetVariableByName("gWorldViewProj")->AsMatrix(); World = mFX->GetVariableByName("gWorld")->AsMatrix(); WorldView = mFX->GetVariableByName("gWorldView")->AsMatrix(); WorldInvTranspose = mFX->GetVariableByName("gWorldInvTranspose")->AsMatrix(); ShadowTransform = mFX->GetVariableByName("gShadowTransform")->AsMatrix(); TexTransform = mFX->GetVariableByName("gTexTransform")->AsMatrix(); BoneTransforms = mFX->GetVariableByName("gBoneTransforms")->AsMatrix(); NearFar = mFX->GetVariableByName("gNearFar")->AsVector(); EyePosW = mFX->GetVariableByName("gEyePosW")->AsVector(); FogColor = mFX->GetVariableByName("gFogColor")->AsVector(); FogStart = mFX->GetVariableByName("gFogStart")->AsScalar(); FogRange = mFX->GetVariableByName("gFogRange")->AsScalar(); DirLights = mFX->GetVariableByName("gDirLights"); PointLights = mFX->GetVariableByName("gPointLights"); SpotLights = mFX->GetVariableByName("gSpotLights"); MatNum = mFX->GetVariableByName("gMatNum")->AsScalar(); PointLightCount = mFX->GetVariableByName("gPointLightCount")->AsScalar(); SpotLightCount = mFX->GetVariableByName("gSpotLightCount")->AsScalar(); Mat = mFX->GetVariableByName("gMaterial"); DiffuseMap = mFX->GetVariableByName("gDiffuseMap")->AsShaderResource(); SpecularMap = mFX->GetVariableByName("gSpecularMap")->AsShaderResource(); CubeMap = mFX->GetVariableByName("gCubeMap")->AsShaderResource(); NormalMap = mFX->GetVariableByName("gNormalMap")->AsShaderResource(); ShadowMap = mFX->GetVariableByName("gShadowMap")->AsShaderResource(); } StandardShaderEffect::~StandardShaderEffect() { } #pragma endregion #pragma region BuildShadowMapEffect BuildShadowMapEffect::BuildShadowMapEffect(ID3D11Device* device, const std::wstring& filename) : Effect(device, filename) { UseSkinning = mFX->GetVariableByName("gSkinning")->AsScalar(); BoneTransforms = mFX->GetVariableByName("gBoneTransforms")->AsMatrix(); BuildShadowMapTech = mFX->GetTechniqueByName("BuildShadowMapTech"); ViewProj = mFX->GetVariableByName("gViewProj")->AsMatrix(); WorldViewProj = mFX->GetVariableByName("gWorldViewProj")->AsMatrix(); World = mFX->GetVariableByName("gWorld")->AsMatrix(); WorldInvTranspose = mFX->GetVariableByName("gWorldInvTranspose")->AsMatrix(); TexTransform = mFX->GetVariableByName("gTexTransform")->AsMatrix(); EyePosW = mFX->GetVariableByName("gEyePosW")->AsVector(); HeightScale = mFX->GetVariableByName("gHeightScale")->AsScalar(); } BuildShadowMapEffect::~BuildShadowMapEffect() { } #pragma endregion #pragma region SkyEffect SkyEffect::SkyEffect(ID3D11Device* device, const std::wstring& filename) : Effect(device, filename) { SkyTech = mFX->GetTechniqueByName("SkyTech"); WorldViewProj = mFX->GetVariableByName("gWorldViewProj")->AsMatrix(); WorldView = mFX->GetVariableByName("gWorldView")->AsMatrix(); NearFar = mFX->GetVariableByName("gNearFar")->AsVector(); CubeMap = mFX->GetVariableByName("gCubeMap")->AsShaderResource(); } SkyEffect::~SkyEffect() { } #pragma endregion #pragma region ParticleEffect ParticleEffect::ParticleEffect(ID3D11Device* device, const std::wstring& filename) : Effect(device, filename) { StreamOutTech = mFX->GetTechniqueByName("StreamOutTech"); DrawTech = mFX->GetTechniqueByName("DrawTech"); NearFar = mFX->GetVariableByName("gNearFar")->AsVector(); View = mFX->GetVariableByName("gView")->AsMatrix(); ViewProj = mFX->GetVariableByName("gViewProj")->AsMatrix(); GameTime = mFX->GetVariableByName("gGameTime")->AsScalar(); TimeStep = mFX->GetVariableByName("gTimeStep")->AsScalar(); EmitSpread = mFX->GetVariableByName("gEmitSpread")->AsScalar(); CreateIntervalTime = mFX->GetVariableByName("gCreateIntervalTime")->AsScalar(); DeleteTime = mFX->GetVariableByName("gDeleteTime")->AsScalar(); FadeTime = mFX->GetVariableByName("gFadeTime")->AsScalar(); RandomizePosition = mFX->GetVariableByName("gRandomizePosition")->AsScalar(); EyePosW = mFX->GetVariableByName("gEyePosW")->AsVector(); EmitPosW = mFX->GetVariableByName("gEmitPosW")->AsVector(); EmitDirW = mFX->GetVariableByName("gEmitDirW")->AsVector(); EmitColor = mFX->GetVariableByName("gEmitColor")->AsVector(); EmitSizeW = mFX->GetVariableByName("gEmitSizeW")->AsVector(); EmitMove = mFX->GetVariableByName("gAccelW")->AsVector(); TexArray = mFX->GetVariableByName("gTexArray")->AsShaderResource(); RandomTex = mFX->GetVariableByName("gRandomTex")->AsShaderResource(); } ParticleEffect::~ParticleEffect() { } #pragma endregion #pragma region PostProcessingEffect PostProcessingEffect::PostProcessingEffect(ID3D11Device* device, const std::wstring& filename) : Effect(device, filename) { PostProcessingTech = mFX->GetTechniqueByName("PostProcessing"); DownSamplingTech = mFX->GetTechniqueByName("DownSampling"); SSAOTech = mFX->GetTechniqueByName("ScreenSpaceAmbientOcclusion"); RayMarchingTech = mFX->GetTechniqueByName("RayMarching"); CameraRotMat = mFX->GetVariableByName("gCameraRotMat")->AsMatrix(); Proj = mFX->GetVariableByName("gProj")->AsMatrix(); View = mFX->GetVariableByName("gView")->AsMatrix(); EyePosW = mFX->GetVariableByName("gEyePosW")->AsVector(); Resolution = mFX->GetVariableByName("gResolution")->AsVector(); NearFar = mFX->GetVariableByName("gNearFar")->AsVector(); DirLights = mFX->GetVariableByName("gDirLights"); PointLights = mFX->GetVariableByName("gPointLights"); SpotLights = mFX->GetVariableByName("gSpotLights"); PointLightCount = mFX->GetVariableByName("gPointLightCount")->AsScalar(); SpotLightCount = mFX->GetVariableByName("gSpotLightCount")->AsScalar(); LutSize = mFX->GetVariableByName("gLutSize")->AsScalar(); LutCoordinateInverse = mFX->GetVariableByName("gLutCoordinateInverse")->AsScalar(); TotalTime = mFX->GetVariableByName("gTotalTime")->AsScalar(); StartFadeInTime = mFX->GetVariableByName("gStartFadeInTime")->AsScalar(); StartFadeOutTime = mFX->GetVariableByName("gStartFadeOutTime")->AsScalar(); Mat = mFX->GetVariableByName("gMaterial"); DownsampledScreenTexture = mFX->GetVariableByName("gDownsampledScreenTexture")->AsShaderResource(); SSAOTexture = mFX->GetVariableByName("gSSAOTexture")->AsShaderResource(); RayMarchingTexture = mFX->GetVariableByName("gRayMarchingTexture")->AsShaderResource(); ScreenTexture = mFX->GetVariableByName("gScreenTexture")->AsShaderResource(); PreScreenTexture = mFX->GetVariableByName("gPreScreenTexture")->AsShaderResource(); DepthTexture = mFX->GetVariableByName("gDepthTexture")->AsShaderResource(); LutTexture = mFX->GetVariableByName("gLutTexture")->AsShaderResource(); GrayNoiseTexture = mFX->GetVariableByName("gGrayNoiseTexture")->AsShaderResource(); CubeMap = mFX->GetVariableByName("gSkyBox")->AsShaderResource(); //RayMarching RayMarching = mFX->GetVariableByName("gRaymarching")->AsScalar(); //Ambient Occlusion & Dark SSAO = mFX->GetVariableByName("gSSAO")->AsScalar(); SSAOradius = mFX->GetVariableByName("gSsaoRadius")->AsScalar(); SSAObias = mFX->GetVariableByName("gSsaoBias")->AsScalar(); SSAOscale = mFX->GetVariableByName("gSsaoScale")->AsScalar(); SSAOamount = mFX->GetVariableByName("gSsaoAmount")->AsScalar(); Dark = mFX->GetVariableByName("gDark")->AsScalar(); DarkAmount = mFX->GetVariableByName("gDarkAmount")->AsScalar(); //Depth of field DepthOfField = mFX->GetVariableByName("gDepthOfField")->AsScalar(); DepthOfFieldAmount = mFX->GetVariableByName("gDepthOfFieldAmount")->AsScalar(); DepthOfFieldFocalDepth = mFX->GetVariableByName("gDepthOfFieldFocalDepth")->AsScalar(); DepthOfFieldFallOffStart = mFX->GetVariableByName("gDepthOfFieldFallOffStart")->AsScalar(); DepthOfFieldFallOffEnd = mFX->GetVariableByName("gDepthOfFieldFallOffEnd")->AsScalar(); //Blur MotionBlur = mFX->GetVariableByName("gMotionBlur")->AsScalar(); MotionBlurReferenceDistance = mFX->GetVariableByName("gMdotionBlurReferenceDistance")->AsScalar(); MotionBlurAmount = mFX->GetVariableByName("gMdotionBlurAmount")->AsScalar(); GaussianBlur = mFX->GetVariableByName("gGaussianBlur")->AsScalar(); GaussianBlurAmount = mFX->GetVariableByName("gGaussianBlurAmount")->AsScalar(); BoxBlur = mFX->GetVariableByName("gBoxBlur")->AsScalar(); BoxBlurAmount = mFX->GetVariableByName("gBoxBlurAmount")->AsScalar(); VerticalBlur = mFX->GetVariableByName("gVerticalBlur")->AsScalar(); VerticalBlurAmount = mFX->GetVariableByName("gVerticalBlurAmount")->AsScalar(); HorizontalBlur = mFX->GetVariableByName("gHorizontalBlur")->AsScalar(); HorizontalBlurAmount = mFX->GetVariableByName("gHorizontalBlurAmount")->AsScalar(); //Lens Distortion Rain = mFX->GetVariableByName("gRain")->AsScalar(); RainSpeed = mFX->GetVariableByName("gRainSpeed")->AsScalar(); RainAmount = mFX->GetVariableByName("gRainAmount")->AsScalar(); Blood = mFX->GetVariableByName("gBlood")->AsScalar(); BloodSpeed = mFX->GetVariableByName("gBloodSpeed")->AsScalar(); BloodAmount = mFX->GetVariableByName("gBloodAmount")->AsScalar(); //Chromatic Averration ChromaticAberration = mFX->GetVariableByName("gChromaticAberration")->AsScalar(); ChromaticAberrationAmount = mFX->GetVariableByName("gChromaticAberrationAmount")->AsScalar(); //Bloom Bloom = mFX->GetVariableByName("gBloom")->AsScalar(); OverBloom = mFX->GetVariableByName("gOverBloom")->AsScalar(); BloomAmount = mFX->GetVariableByName("gBloomAmount")->AsScalar(); //Vignette Vignette = mFX->GetVariableByName("gVignetting")->AsScalar(); VignetteAmount = mFX->GetVariableByName("gVignettingAmount")->AsScalar(); //Color Grading Gamma = mFX->GetVariableByName("gGamma")->AsScalar(); GammaAmount = mFX->GetVariableByName("gGammaAmount")->AsScalar(); Contrast = mFX->GetVariableByName("gContrast")->AsScalar(); ContrastAmount = mFX->GetVariableByName("gContrastAmount")->AsScalar(); Bright = mFX->GetVariableByName("gBright")->AsScalar(); BrightAmount = mFX->GetVariableByName("gBrightAmount")->AsScalar(); Saturate = mFX->GetVariableByName("gSaturate")->AsScalar(); SaturateAmount = mFX->GetVariableByName("gSaturateAmount")->AsScalar(); SmoothStep = mFX->GetVariableByName("gSmoothStep")->AsScalar(); SmoothStepMin = mFX->GetVariableByName("gSmoothStepMin")->AsScalar(); SmoothStepMax = mFX->GetVariableByName("gSmoothStepMax")->AsScalar(); Tint = mFX->GetVariableByName("gTint")->AsScalar(); TintColor = mFX->GetVariableByName("gTintColor")->AsVector(); Sepia = mFX->GetVariableByName("gSepia")->AsScalar(); GrayScale = mFX->GetVariableByName("gGrayScale")->AsScalar(); Inverse = mFX->GetVariableByName("gInverse")->AsScalar(); Lut = mFX->GetVariableByName("gLUT")->AsScalar(); LutAmount = mFX->GetVariableByName("gLutAmount")->AsScalar(); TonemapACES = mFX->GetVariableByName("gTonemapACES")->AsScalar(); TonemapUnreal = mFX->GetVariableByName("gTonemapUnreal")->AsScalar(); TonemapUnrealExposure = mFX->GetVariableByName("gTonemapUnrealExposure")->AsScalar(); TonemapReinhard = mFX->GetVariableByName("gTonemapReinhard")->AsScalar(); //Film Effect OldGame = mFX->GetVariableByName("gOldGame")->AsScalar(); OldGameAmount = mFX->GetVariableByName("gOldGameMosaicAmount")->AsScalar(); OldGameLevel = mFX->GetVariableByName("gOldGameColorLevel")->AsScalar(); OldGameMaxColor = mFX->GetVariableByName("gOldGameMaxColor")->AsVector(); OldGameMinColor = mFX->GetVariableByName("gOldGameMinColor")->AsVector(); Edge = mFX->GetVariableByName("gEdge")->AsScalar(); EdgeIndex = mFX->GetVariableByName("gEdgeIndex")->AsScalar(); Embossed = mFX->GetVariableByName("gEmbossed")->AsScalar(); Flicker = mFX->GetVariableByName("gFlicker")->AsScalar(); FlickerAmount = mFX->GetVariableByName("gFlickerAmount")->AsScalar(); FlickerFrequence = mFX->GetVariableByName("gFlickerFrequence")->AsScalar(); Cartoon = mFX->GetVariableByName("gCartoon")->AsScalar(); Mosaic = mFX->GetVariableByName("gMosaic")->AsScalar(); MosaicAmount = mFX->GetVariableByName("gMosaicAmount")->AsScalar(); VerticalLines = mFX->GetVariableByName("gVerticalLines")->AsScalar(); VerticalLinesAmount = mFX->GetVariableByName("gVerticalLinesAmount")->AsScalar(); HorizontalLines = mFX->GetVariableByName("gHorizontalLines")->AsScalar(); HorizontalLinesAmount = mFX->GetVariableByName("gHorizontalLinesAmount")->AsScalar(); Noise = mFX->GetVariableByName("gNoise")->AsScalar(); NoiseFiness = mFX->GetVariableByName("gNoiseFiness")->AsScalar(); NoiseBlend = mFX->GetVariableByName("gNoiseBlend")->AsScalar(); CinematicLine = mFX->GetVariableByName("gCinematicLine")->AsScalar(); CinematicLineAmount = mFX->GetVariableByName("gCinematicLineAmount")->AsScalar(); //Fade In, Out FadeIn = mFX->GetVariableByName("gFadeIn")->AsScalar(); FadeInSpeed = mFX->GetVariableByName("gFadeInSpeed")->AsScalar(); FadeOut = mFX->GetVariableByName("gFadeOut")->AsScalar(); FadeOutSpeed = mFX->GetVariableByName("gFadeOutSpeed")->AsScalar(); } PostProcessingEffect::~PostProcessingEffect() { } #pragma endregion #pragma region Effects StandardShaderEffect* Effects::StandardShaderFX = 0; SkyEffect* Effects::SkyFX = 0; ParticleEffect* Effects::ParticleFX = 0; BuildShadowMapEffect* Effects::BuildShadowMapFX = 0; PostProcessingEffect* Effects::PostProcessingFX = 0; void Effects::InitAll(ID3D11Device* device) { StandardShaderFX = new StandardShaderEffect(device, TEXT("01_Asset/Fx/StandardShader.fxo")); SkyFX = new SkyEffect(device, TEXT("01_Asset/Fx/Sky.fxo")); ParticleFX = new ParticleEffect(device, TEXT("01_Asset/Fx/Particle.fxo")); BuildShadowMapFX = new BuildShadowMapEffect(device, TEXT("01_Asset/Fx/BuildShadowMap.fxo")); PostProcessingFX = new PostProcessingEffect(device, TEXT("01_Asset/Fx/PostProcessing.fxo")); } void Effects::DestroyAll() { SAFE_DELETE(StandardShaderFX); SAFE_DELETE(SkyFX); SAFE_DELETE(ParticleFX); SAFE_DELETE(BuildShadowMapFX); SAFE_DELETE(PostProcessingFX); } #pragma endregion
48.504762
100
0.764579
jerrypoiu
4fc85e6f8b442318826ffe1ce34fbd11be8ab96c
14,962
cc
C++
src/connectivity/wlan/drivers/third_party/broadcom/brcmfmac/btcoex.cc
zhangpf/fuchsia-rs
903568f28ddf45f09157ead36d61b50322c9cf49
[ "BSD-3-Clause" ]
null
null
null
src/connectivity/wlan/drivers/third_party/broadcom/brcmfmac/btcoex.cc
zhangpf/fuchsia-rs
903568f28ddf45f09157ead36d61b50322c9cf49
[ "BSD-3-Clause" ]
5
2020-09-06T09:02:06.000Z
2022-03-02T04:44:22.000Z
src/connectivity/wlan/drivers/third_party/broadcom/brcmfmac/btcoex.cc
ZVNexus/fuchsia
c5610ad15208208c98693618a79c705af935270c
[ "BSD-3-Clause" ]
null
null
null
/* * Copyright (c) 2013 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "btcoex.h" #include "brcmu_utils.h" #include "brcmu_wifi.h" #include "cfg80211.h" #include "core.h" #include "debug.h" #include "defs.h" #include "device.h" #include "fwil.h" #include "fwil_types.h" #include "linuxisms.h" #include "p2p.h" #include "workqueue.h" /* T1 start SCO/eSCO priority suppression */ #define BRCMF_BTCOEX_OPPR_WIN_TIME_MSEC (2000) /* BT registers values during DHCP */ #define BRCMF_BT_DHCP_REG50 0x8022 #define BRCMF_BT_DHCP_REG51 0 #define BRCMF_BT_DHCP_REG64 0 #define BRCMF_BT_DHCP_REG65 0 #define BRCMF_BT_DHCP_REG71 0 #define BRCMF_BT_DHCP_REG66 0x2710 #define BRCMF_BT_DHCP_REG41 0x33 #define BRCMF_BT_DHCP_REG68 0x190 /* number of samples for SCO detection */ #define BRCMF_BT_SCO_SAMPLES 12 /** * enum brcmf_btcoex_state - BT coex DHCP state machine states * @BRCMF_BT_DHCP_IDLE: DCHP is idle * @BRCMF_BT_DHCP_START: DHCP started, wait before * boosting wifi priority * @BRCMF_BT_DHCP_OPPR_WIN: graceful DHCP opportunity ended, * boost wifi priority * @BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT: wifi priority boost end, * restore defaults */ enum brcmf_btcoex_state { BRCMF_BT_DHCP_IDLE, BRCMF_BT_DHCP_START, BRCMF_BT_DHCP_OPPR_WIN, BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT }; /** * struct brcmf_btcoex_info - BT coex related information * @vif: interface for which request was done. * @timer: timer for DHCP state machine * @timeout: configured timeout. * @timer_on: DHCP timer active * @dhcp_done: DHCP finished before T1/T2 timer expiration * @bt_state: DHCP state machine state * @work: DHCP state machine work * @cfg: driver private data for cfg80211 interface * @reg66: saved value of btc_params 66 * @reg41: saved value of btc_params 41 * @reg68: saved value of btc_params 68 * @saved_regs_part1: flag indicating regs 66,41,68 * have been saved * @reg51: saved value of btc_params 51 * @reg64: saved value of btc_params 64 * @reg65: saved value of btc_params 65 * @reg71: saved value of btc_params 71 * @saved_regs_part1: flag indicating regs 50,51,64,65,71 * have been saved */ struct brcmf_btcoex_info { struct brcmf_cfg80211_vif* vif; brcmf_timer_info_t timer; uint16_t timeout; bool timer_on; bool dhcp_done; enum brcmf_btcoex_state bt_state; struct work_struct work; struct brcmf_cfg80211_info* cfg; uint32_t reg66; uint32_t reg41; uint32_t reg68; bool saved_regs_part1; uint32_t reg50; uint32_t reg51; uint32_t reg64; uint32_t reg65; uint32_t reg71; bool saved_regs_part2; }; /** * brcmf_btcoex_params_write() - write btc_params firmware variable * @ifp: interface * @addr: btc_params register number * @data: data to write */ static zx_status_t brcmf_btcoex_params_write(struct brcmf_if* ifp, uint32_t addr, uint32_t data) { struct { uint32_t addr; uint32_t data; } reg_write; reg_write.addr = addr; reg_write.data = data; return brcmf_fil_iovar_data_set(ifp, "btc_params", &reg_write, sizeof(reg_write), nullptr); } /** * brcmf_btcoex_params_read() - read btc_params firmware variable * @ifp: interface * @addr: btc_params register number * @data: read data */ static zx_status_t brcmf_btcoex_params_read(struct brcmf_if* ifp, uint32_t addr, uint32_t* data) { *data = addr; return brcmf_fil_iovar_int_get(ifp, "btc_params", data, nullptr); } /** * brcmf_btcoex_boost_wifi() - control BT SCO/eSCO parameters * @btci: BT coex info * @trump_sco: * true - set SCO/eSCO parameters for compatibility * during DHCP window * false - restore saved parameter values * * Enhanced BT COEX settings for eSCO compatibility during DHCP window */ static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info* btci, bool trump_sco) { struct brcmf_if* ifp = brcmf_get_ifp(btci->cfg->pub, 0); if (trump_sco && !btci->saved_regs_part2) { /* this should reduce eSCO agressive * retransmit w/o breaking it */ /* save current */ BRCMF_DBG(INFO, "new SCO/eSCO coex algo {save & override}\n"); brcmf_btcoex_params_read(ifp, 50, &btci->reg50); brcmf_btcoex_params_read(ifp, 51, &btci->reg51); brcmf_btcoex_params_read(ifp, 64, &btci->reg64); brcmf_btcoex_params_read(ifp, 65, &btci->reg65); brcmf_btcoex_params_read(ifp, 71, &btci->reg71); btci->saved_regs_part2 = true; BRCMF_DBG(INFO, "saved bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n", btci->reg50, btci->reg51, btci->reg64, btci->reg65, btci->reg71); /* pacify the eSco */ brcmf_btcoex_params_write(ifp, 50, BRCMF_BT_DHCP_REG50); brcmf_btcoex_params_write(ifp, 51, BRCMF_BT_DHCP_REG51); brcmf_btcoex_params_write(ifp, 64, BRCMF_BT_DHCP_REG64); brcmf_btcoex_params_write(ifp, 65, BRCMF_BT_DHCP_REG65); brcmf_btcoex_params_write(ifp, 71, BRCMF_BT_DHCP_REG71); } else if (btci->saved_regs_part2) { /* restore previously saved bt params */ BRCMF_DBG(INFO, "Do new SCO/eSCO coex algo {restore}\n"); brcmf_btcoex_params_write(ifp, 50, btci->reg50); brcmf_btcoex_params_write(ifp, 51, btci->reg51); brcmf_btcoex_params_write(ifp, 64, btci->reg64); brcmf_btcoex_params_write(ifp, 65, btci->reg65); brcmf_btcoex_params_write(ifp, 71, btci->reg71); BRCMF_DBG(INFO, "restored bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n", btci->reg50, btci->reg51, btci->reg64, btci->reg65, btci->reg71); btci->saved_regs_part2 = false; } else { BRCMF_DBG(INFO, "attempted to restore not saved BTCOEX params\n"); } } /** * brcmf_btcoex_is_sco_active() - check if SCO/eSCO is active * @ifp: interface * * return: true if SCO/eSCO session is active */ static bool brcmf_btcoex_is_sco_active(struct brcmf_if* ifp) { int ioc_res = 0; bool res = false; int sco_id_cnt = 0; uint32_t param27; int i; for (i = 0; i < BRCMF_BT_SCO_SAMPLES; i++) { ioc_res = brcmf_btcoex_params_read(ifp, 27, &param27); if (ioc_res < 0) { BRCMF_ERR("ioc read btc params error\n"); break; } BRCMF_DBG(INFO, "sample[%d], btc_params 27:%x\n", i, param27); if ((param27 & 0x6) == 2) { /* count both sco & esco */ sco_id_cnt++; } if (sco_id_cnt > 2) { BRCMF_DBG(INFO, "sco/esco detected, pkt id_cnt:%d samples:%d\n", sco_id_cnt, i); res = true; break; } } BRCMF_DBG(TRACE, "exit: result=%d\n", res); return res; } /** * btcmf_btcoex_save_part1() - save first step parameters. */ static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info* btci) { struct brcmf_if* ifp = btci->vif->ifp; if (!btci->saved_regs_part1) { /* Retrieve and save original reg value */ brcmf_btcoex_params_read(ifp, 66, &btci->reg66); brcmf_btcoex_params_read(ifp, 41, &btci->reg41); brcmf_btcoex_params_read(ifp, 68, &btci->reg68); btci->saved_regs_part1 = true; BRCMF_DBG(INFO, "saved btc_params regs (66,41,68) 0x%x 0x%x 0x%x\n", btci->reg66, btci->reg41, btci->reg68); } } /** * brcmf_btcoex_restore_part1() - restore first step parameters. */ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info* btci) { struct brcmf_if* ifp; if (btci->saved_regs_part1) { btci->saved_regs_part1 = false; ifp = btci->vif->ifp; brcmf_btcoex_params_write(ifp, 66, btci->reg66); brcmf_btcoex_params_write(ifp, 41, btci->reg41); brcmf_btcoex_params_write(ifp, 68, btci->reg68); BRCMF_DBG(INFO, "restored btc_params regs {66,41,68} 0x%x 0x%x 0x%x\n", btci->reg66, btci->reg41, btci->reg68); } } /** * brcmf_btcoex_timerfunc() - BT coex timer callback */ static void brcmf_btcoex_timerfunc(void* data) { pthread_mutex_lock(&irq_callback_lock); struct brcmf_btcoex_info* bt_local = static_cast<decltype(bt_local)>(data); BRCMF_DBG(TRACE, "enter\n"); bt_local->timer_on = false; workqueue_schedule_default(&bt_local->work); pthread_mutex_unlock(&irq_callback_lock); } /** * brcmf_btcoex_handler() - BT coex state machine work handler * @work: work */ static void brcmf_btcoex_handler(struct work_struct* work) { struct brcmf_btcoex_info* btci; btci = containerof(work, struct brcmf_btcoex_info, work); if (btci->timer_on) { btci->timer_on = false; brcmf_timer_stop(&btci->timer); } switch (btci->bt_state) { case BRCMF_BT_DHCP_START: /* DHCP started provide OPPORTUNITY window to get DHCP address */ BRCMF_DBG(INFO, "DHCP started\n"); btci->bt_state = BRCMF_BT_DHCP_OPPR_WIN; if (btci->timeout < BRCMF_BTCOEX_OPPR_WIN_TIME_MSEC) { // TODO(cphoenix): Was btci->timer.expires which wasn't set anywhere brcmf_timer_set(&btci->timer, btci->timeout); } else { btci->timeout -= BRCMF_BTCOEX_OPPR_WIN_TIME_MSEC; brcmf_timer_set(&btci->timer, ZX_MSEC(BRCMF_BTCOEX_OPPR_WIN_TIME_MSEC)); } btci->timer_on = true; break; case BRCMF_BT_DHCP_OPPR_WIN: if (btci->dhcp_done) { BRCMF_DBG(INFO, "DHCP done before T1 expiration\n"); goto idle; } /* DHCP is not over yet, start lowering BT priority */ BRCMF_DBG(INFO, "DHCP T1:%d expired\n", BRCMF_BTCOEX_OPPR_WIN_TIME_MSEC); brcmf_btcoex_boost_wifi(btci, true); btci->bt_state = BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT; brcmf_timer_set(&btci->timer, ZX_MSEC(btci->timeout)); btci->timer_on = true; break; case BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT: if (btci->dhcp_done) { BRCMF_DBG(INFO, "DHCP done before T2 expiration\n"); } else { BRCMF_DBG(INFO, "DHCP T2:%d expired\n", BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT); } goto idle; default: BRCMF_ERR("invalid state=%d !!!\n", btci->bt_state); goto idle; } return; idle: btci->bt_state = BRCMF_BT_DHCP_IDLE; btci->timer_on = false; brcmf_btcoex_boost_wifi(btci, false); cfg80211_crit_proto_stopped(&btci->vif->wdev); brcmf_btcoex_restore_part1(btci); btci->vif = NULL; } /** * brcmf_btcoex_attach() - initialize BT coex data * @cfg: driver private cfg80211 data * * return: 0 on success */ zx_status_t brcmf_btcoex_attach(struct brcmf_cfg80211_info* cfg) { struct brcmf_btcoex_info* btci = NULL; BRCMF_DBG(TRACE, "enter\n"); btci = static_cast<decltype(btci)>(malloc(sizeof(struct brcmf_btcoex_info))); if (!btci) { return ZX_ERR_NO_MEMORY; } btci->bt_state = BRCMF_BT_DHCP_IDLE; /* Set up timer for BT */ btci->timer_on = false; btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME_MSEC; brcmf_timer_init(&btci->timer, brcmf_btcoex_timerfunc, btci); btci->cfg = cfg; btci->saved_regs_part1 = false; btci->saved_regs_part2 = false; workqueue_init_work(&btci->work, brcmf_btcoex_handler); cfg->btcoex = btci; return ZX_OK; } /** * brcmf_btcoex_detach - clean BT coex data * @cfg: driver private cfg80211 data */ void brcmf_btcoex_detach(struct brcmf_cfg80211_info* cfg) { BRCMF_DBG(TRACE, "enter\n"); if (!cfg->btcoex) { return; } if (cfg->btcoex->timer_on) { cfg->btcoex->timer_on = false; brcmf_timer_stop(&cfg->btcoex->timer); } workqueue_cancel_work(&cfg->btcoex->work); brcmf_btcoex_boost_wifi(cfg->btcoex, false); brcmf_btcoex_restore_part1(cfg->btcoex); free(cfg->btcoex); cfg->btcoex = NULL; } static void brcmf_btcoex_dhcp_start(struct brcmf_btcoex_info* btci) { struct brcmf_if* ifp = btci->vif->ifp; btcmf_btcoex_save_part1(btci); /* set new regs values */ brcmf_btcoex_params_write(ifp, 66, BRCMF_BT_DHCP_REG66); brcmf_btcoex_params_write(ifp, 41, BRCMF_BT_DHCP_REG41); brcmf_btcoex_params_write(ifp, 68, BRCMF_BT_DHCP_REG68); btci->dhcp_done = false; btci->bt_state = BRCMF_BT_DHCP_START; workqueue_schedule_default(&btci->work); BRCMF_DBG(TRACE, "enable BT DHCP Timer\n"); } static void brcmf_btcoex_dhcp_end(struct brcmf_btcoex_info* btci) { /* Stop any bt timer because DHCP session is done */ btci->dhcp_done = true; if (btci->timer_on) { BRCMF_DBG(INFO, "disable BT DHCP Timer\n"); btci->timer_on = false; brcmf_timer_stop(&btci->timer); /* schedule worker if transition to IDLE is needed */ if (btci->bt_state != BRCMF_BT_DHCP_IDLE) { BRCMF_DBG(INFO, "bt_state:%d\n", btci->bt_state); workqueue_schedule_default(&btci->work); } } else { /* Restore original values */ brcmf_btcoex_restore_part1(btci); } } /** * brcmf_btcoex_set_mode - set BT coex mode * @cfg: driver private cfg80211 data * @mode: Wifi-Bluetooth coexistence mode * * return: 0 on success */ zx_status_t brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif* vif, enum brcmf_btcoex_mode mode, uint16_t duration) { struct brcmf_cfg80211_info* cfg = wiphy_to_cfg(vif->wdev.wiphy); struct brcmf_btcoex_info* btci = cfg->btcoex; struct brcmf_if* ifp = brcmf_get_ifp(cfg->pub, 0); switch (mode) { case BRCMF_BTCOEX_DISABLED: BRCMF_DBG(INFO, "DHCP session starts\n"); if (btci->bt_state != BRCMF_BT_DHCP_IDLE) { return ZX_ERR_UNAVAILABLE; } /* Start BT timer only for SCO connection */ if (brcmf_btcoex_is_sco_active(ifp)) { btci->timeout = duration; btci->vif = vif; brcmf_btcoex_dhcp_start(btci); } break; case BRCMF_BTCOEX_ENABLED: BRCMF_DBG(INFO, "DHCP session ends\n"); if (btci->bt_state != BRCMF_BT_DHCP_IDLE && vif == btci->vif) { brcmf_btcoex_dhcp_end(btci); } break; default: BRCMF_DBG(INFO, "Unknown mode, ignored\n"); } return ZX_OK; }
31.699153
99
0.670499
zhangpf
4fcca642dc4aeab95c4edaad254336a279d46d27
5,675
cpp
C++
src/render/EnvironmentMapPass.cpp
Kuranes/KickstartRT_demo
6de7453ca42e46db180f8bead7ba23f9e8936b69
[ "MIT" ]
83
2021-07-19T13:55:33.000Z
2022-03-29T16:00:57.000Z
src/render/EnvironmentMapPass.cpp
CompileException/donut
bc400a8c2c9db9c3c5ed16190dc108e75722b503
[ "MIT" ]
2
2021-11-04T06:41:28.000Z
2021-11-30T08:25:28.000Z
src/render/EnvironmentMapPass.cpp
CompileException/donut
bc400a8c2c9db9c3c5ed16190dc108e75722b503
[ "MIT" ]
10
2021-07-19T15:03:58.000Z
2022-01-10T07:15:35.000Z
/* * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <donut/render/EnvironmentMapPass.h> #include <donut/engine/FramebufferFactory.h> #include <donut/engine/ShaderFactory.h> #include <donut/engine/CommonRenderPasses.h> #include <donut/engine/View.h> #include <donut/core/math/math.h> using namespace donut::math; #include <donut/shaders/sky_cb.h> using namespace donut::engine; using namespace donut::render; EnvironmentMapPass::EnvironmentMapPass( nvrhi::IDevice* device, std::shared_ptr<ShaderFactory> shaderFactory, std::shared_ptr<CommonRenderPasses> commonPasses, std::shared_ptr<FramebufferFactory> framebufferFactory, const ICompositeView& compositeView, nvrhi::ITexture* environmentMap) : m_CommonPasses(commonPasses) , m_FramebufferFactory(framebufferFactory) { nvrhi::TextureDimension envMapDimension = environmentMap->getDesc().dimension; bool isCubeMap = (envMapDimension == nvrhi::TextureDimension::TextureCube) || (envMapDimension == nvrhi::TextureDimension::TextureCubeArray); std::vector<engine::ShaderMacro> PSMacros; PSMacros.push_back(engine::ShaderMacro("LATLONG_TEXTURE", isCubeMap ? "0" : "1")); m_PixelShader = shaderFactory->CreateShader("donut/passes/environment_map_ps.hlsl", "main", &PSMacros, nvrhi::ShaderType::Pixel); nvrhi::BufferDesc constantBufferDesc; constantBufferDesc.byteSize = sizeof(SkyConstants); constantBufferDesc.debugName = "SkyConstants"; constantBufferDesc.isConstantBuffer = true; constantBufferDesc.isVolatile = true; constantBufferDesc.maxVersions = engine::c_MaxRenderPassConstantBufferVersions; m_SkyCB = device->createBuffer(constantBufferDesc); const IView* sampleView = compositeView.GetChildView(ViewType::PLANAR, 0); nvrhi::IFramebuffer* sampleFramebuffer = m_FramebufferFactory->GetFramebuffer(*sampleView); { nvrhi::BindingLayoutDesc layoutDesc; layoutDesc.visibility = nvrhi::ShaderType::Pixel; layoutDesc.bindings = { nvrhi::BindingLayoutItem::VolatileConstantBuffer(0), nvrhi::BindingLayoutItem::Texture_SRV(0), nvrhi::BindingLayoutItem::Sampler(0) }; m_RenderBindingLayout = device->createBindingLayout(layoutDesc); nvrhi::BindingSetDesc bindingSetDesc; bindingSetDesc.bindings = { nvrhi::BindingSetItem::ConstantBuffer(0, m_SkyCB), nvrhi::BindingSetItem::Texture_SRV(0, environmentMap), nvrhi::BindingSetItem::Sampler(0, commonPasses->m_LinearWrapSampler) }; m_RenderBindingSet = device->createBindingSet(bindingSetDesc, m_RenderBindingLayout); nvrhi::GraphicsPipelineDesc pipelineDesc; pipelineDesc.primType = nvrhi::PrimitiveType::TriangleStrip; pipelineDesc.VS = sampleView->IsReverseDepth() ? m_CommonPasses->m_FullscreenVS : m_CommonPasses->m_FullscreenAtOneVS; pipelineDesc.PS = m_PixelShader; pipelineDesc.bindingLayouts = { m_RenderBindingLayout }; pipelineDesc.renderState.rasterState.setCullNone(); pipelineDesc.renderState.depthStencilState .enableDepthTest() .disableDepthWrite() .disableStencil() .setDepthFunc(sampleView->IsReverseDepth() ? nvrhi::ComparisonFunc::GreaterOrEqual : nvrhi::ComparisonFunc::LessOrEqual); m_RenderPso = device->createGraphicsPipeline(pipelineDesc, sampleFramebuffer); } } void EnvironmentMapPass::Render( nvrhi::ICommandList* commandList, const ICompositeView& compositeView) { commandList->beginMarker("Environment Map"); for (uint viewIndex = 0; viewIndex < compositeView.GetNumChildViews(ViewType::PLANAR); viewIndex++) { const IView* view = compositeView.GetChildView(ViewType::PLANAR, viewIndex); nvrhi::GraphicsState state; state.pipeline = m_RenderPso; state.framebuffer = m_FramebufferFactory->GetFramebuffer(*view); state.bindings = { m_RenderBindingSet }; state.viewport = view->GetViewportState(); SkyConstants skyConstants = {}; skyConstants.matClipToTranslatedWorld = view->GetInverseViewProjectionMatrix() * affineToHomogeneous(translation(-view->GetViewOrigin())); commandList->writeBuffer(m_SkyCB, &skyConstants, sizeof(skyConstants)); commandList->setGraphicsState(state); nvrhi::DrawArguments args; args.instanceCount = 1; args.vertexCount = 4; commandList->draw(args); } commandList->endMarker(); }
42.350746
146
0.728458
Kuranes
4fcddf10c2a59215adebec21f2dfd072d169b17d
2,256
cpp
C++
src/base/ShaderUniform.cpp
kostrykin/Carna
099783bb7f8a6f52fcc8ccd4666e491cf0aa864c
[ "BSD-3-Clause" ]
null
null
null
src/base/ShaderUniform.cpp
kostrykin/Carna
099783bb7f8a6f52fcc8ccd4666e491cf0aa864c
[ "BSD-3-Clause" ]
null
null
null
src/base/ShaderUniform.cpp
kostrykin/Carna
099783bb7f8a6f52fcc8ccd4666e491cf0aa864c
[ "BSD-3-Clause" ]
3
2015-07-23T12:10:14.000Z
2021-06-08T16:07:05.000Z
/* * Copyright (C) 2010 - 2015 Leonid Kostrykin * * Chair of Medical Engineering (mediTEC) * RWTH Aachen University * Pauwelsstr. 20 * 52074 Aachen * Germany * */ #include <Carna/base/glew.h> #include <Carna/base/ShaderUniform.h> namespace Carna { namespace base { // ---------------------------------------------------------------------------------- // ShaderUniformBase // ---------------------------------------------------------------------------------- ShaderUniformBase::ShaderUniformBase( const std::string& name ) : name( name ) { } ShaderUniformBase::~ShaderUniformBase() { } int ShaderUniformBase::location( const ShaderProgram& shader ) const { const GLint location = glGetUniformLocation( shader.id, name.c_str() ); return location; } bool ShaderUniformBase::upload() const { GLContext& glc = GLContext::current(); const int loc = location( glc.shader() ); if( loc != NULL_UNIFORM_LOCATION ) { uploadTo( loc ); return true; } else { return false; } } // ---------------------------------------------------------------------------------- // uploadUniform // ---------------------------------------------------------------------------------- void uploadUniform( int location, const int value ) { glUniform1i( location, value ); } void uploadUniform( int location, const unsigned int value ) { glUniform1ui( location, value ); } void uploadUniform( int location, const float value ) { glUniform1f( location, value ); } void uploadUniform( int location, const math::Vector2f& value ) { glUniform2f( location, value.x(), value.y() ); } void uploadUniform( int location, const math::Vector3f& value ) { glUniform3f( location, value.x(), value.y(), value.z() ); } void uploadUniform( int location, const math::Vector4f& value ) { glUniform4f( location, value.x(), value.y(), value.z(), value.w() ); } void uploadUniform( int location, const math::Matrix3f& value ) { glUniformMatrix3fv( location, 1, false, value.data() ); } void uploadUniform( int location, const math::Matrix4f& value ) { glUniformMatrix4fv( location, 1, false, value.data() ); } } // namespace Carna :: base } // namespace Carna
19.118644
85
0.564716
kostrykin
4fd0363b306ceb4c7c6dc060c357e2cec1a701a8
6,434
hh
C++
GeometryService/inc/G4GeometryOptions.hh
lborrel/Offline
db9f647bad3c702171ab5ffa5ccc04c82b3f8984
[ "Apache-2.0" ]
9
2020-03-28T00:21:41.000Z
2021-12-09T20:53:26.000Z
GeometryService/inc/G4GeometryOptions.hh
lborrel/Offline
db9f647bad3c702171ab5ffa5ccc04c82b3f8984
[ "Apache-2.0" ]
684
2019-08-28T23:37:43.000Z
2022-03-31T22:47:45.000Z
GeometryService/inc/G4GeometryOptions.hh
lborrel/Offline
db9f647bad3c702171ab5ffa5ccc04c82b3f8984
[ "Apache-2.0" ]
61
2019-08-16T23:28:08.000Z
2021-12-20T08:29:48.000Z
#ifndef G4GEOMETRY_OPTIONS #define G4GEOMETRY_OPTIONS // // G4 geometry options look-up facility, to be used in conjunction // with SimpleConfig. // // // Original author: Kyle Knoepfel // // This method is used for setting and overriding various flags that // are specified when creating volumes in G4. Ideally, it would go in // the Mu2eG4Helper service, but it is tied to GeometryService because of // SimpleConfig and linkage loops. // // The idiom of this helper is the following: // // (1) A SimpleConfig file can specify the following assignments: // // bool <var_prefix>.isVisible = [ true or false ]; // bool <var_prefix>.isSolid = [ true or false ]; // bool <var_prefix>.forceAuxEdgeVisible = [ true or false ]; // bool <var_prefix>.placePV = [ true or false ]; // bool <var_prefix>.doSurfaceCheck = [ true or false ]; // // (2) The various flags are loaded into the option maps by the // following syntax within a .cc file: // // G4GeometryOptions* geomOptions = art::ServiceHandle<GeometryService>()->geomOptions(); // geomOption->loadEntry( configFile, "MATCHING_TOKEN", <var_prefix> ); // // where the "MATCHING_TOKEN" is specified by the User in terms // of what you want the querying functions to look for. Normally // the value of "MATCHING_TOKEN" applies to several volumes, but // it could be chosen for each volume. If "loadEntry" is // not included for a given volume, then the 5 flags above // default to global values. // // (3) To access the flags, the following can be done: // // const auto geomOptions = art::ServiceHandle<GeometryService>()->geomOptions(); // geomOptions->isVisible( "MATCHING_TOKEN" ); // geomOptions->isSolid ( "MATCHING_TOKEN" ); // etc. // // If one were to do the following (the following is pseudo-code): // // vector<VolumeParams> volumes; // A vector with a lot of volume parameters // // for ( const auto& volParams ; volumes ) { // // finishNesting( volParams, // ... // geomOptions->isVisible( volParams.volumeName ); // ... ); // } // // such a query could take a long time. For that reason, the // "MATCHING_TOKEN" value does not need to match that of the // volume name to be created. The following can be much faster: // // vector<VolumeParams> volumes; // A vector with a lot of volume parameters // bool isVisible = geomOptions->isVisible( "Straw" ); // look-up once. // for ( const auto& volParams ; volumes ) { // // finishNesting( volParams, // ... // isVisible // ... ); // } // // Note that an individual volume (e.g. straw) can be viewed by // specifying an override (see point 5). // // (4) The (e.g.) visible() facility will first search through the // corresponding map for a match. If no match is found---i.e. an // entry corresponding to the requested "MATCHING_TOKEN" does not // exist---the default visible value is returned. // // (5) The value returned from step 4 can be overridden by specifying // override commands in Mu2eG4/geom/g4_userOptions.txt (e.g.): // // bool g4.doSurfaceCheck = true; // vector<string> g4.doSurfaceCheck.drop = {"*"}; // vector<string> g4.doSurfaceCheck.keep = {"PSShield*"}; // vector<string> g4.doSurfaceCheck.order = { "g4.doSurfaceCheck.drop", // "g4.doSurfaceCheck.keep" }; // // In this case, the default "doSurfaceCheck" value is true, but // the doSurfaceCheck's for all volumes are disabled by the drop // "*" command, since "*" matches to all volumes. All volumes // that match "PSShield*" then have their surface checks enabled. // Note that the commands in "drop" and "keep" always override // the default g4.doSurfaceCheck value. // // The actual drop/keep commands are not implemented unless they // are specified in the *.order vector in the order desired. // // Additional drop/keep commands can be added. The only // requirement is that their suffixes must of the form *.keep* or // *.drop*. #include <map> #include <string> #include <vector> #include <regex> namespace mu2e { class SimpleConfig; class G4GeometryOptData { public: typedef std::vector<std::string> VS; typedef std::pair<bool,std::regex> Ordering; typedef std::vector<Ordering> OrderingList; G4GeometryOptData( bool defaultValue, const std::string& name ); void loadOrderingStrings( const SimpleConfig& config, const std::string& varString ); void mapInserter ( const std::string& volName, bool value ); bool queryMap ( const std::string& volName ) const; bool default_value() const {return default_;} private: std::pair<bool,bool> flagOverridden( const std::string& volName ) const; std::string name_; std::map<std::string, bool> map_; OrderingList ordering_; bool default_; }; class G4GeometryOptions { public: G4GeometryOptions( const SimpleConfig& config ); // Disable copy c'tor and copy assignment G4GeometryOptions (const G4GeometryOptions&) = delete; G4GeometryOptions& operator=(const G4GeometryOptions&) = delete; void loadEntry( const SimpleConfig& config, const std::string& volName, const std::string& prefix ); bool isSolid ( const std::string& volName ) const; bool isVisible ( const std::string& volName ) const; bool doSurfaceCheck ( const std::string& volName ) const; bool forceAuxEdgeVisible( const std::string& volName ) const; bool placePV ( const std::string& volName ) const; private: G4GeometryOptData dataSurfaceCheck_; G4GeometryOptData dataIsVisible_; G4GeometryOptData dataIsSolid_; G4GeometryOptData dataForceAuxEdge_; G4GeometryOptData dataPlacePV_; }; } #endif /*G4GEOMETRY_OPTIONS*/
36.350282
106
0.613926
lborrel
4fd0fa30ee8922027abd6fd8dc0fa3da87685323
291
cpp
C++
mitsui/mitsuiA.cpp
KoukiNAGATA/c-
ae51bacb9facb936a151dd777beb6688383a2dcd
[ "MIT" ]
null
null
null
mitsui/mitsuiA.cpp
KoukiNAGATA/c-
ae51bacb9facb936a151dd777beb6688383a2dcd
[ "MIT" ]
3
2021-03-31T01:39:25.000Z
2021-05-04T10:02:35.000Z
mitsui/mitsuiA.cpp
KoukiNAGATA/c-
ae51bacb9facb936a151dd777beb6688383a2dcd
[ "MIT" ]
null
null
null
#include <iostream> #include <vector> #include <cstdio> #include <string> #include <algorithm> using namespace std; int main() { int A, B, C, D; cin >> A >> B >> C >> D; if (A != C) { cout << 1 << "\n"; return 0; } cout << 0 << "\n"; return 0; }
14.55
28
0.474227
KoukiNAGATA
4fd515a9a5d03507aada46efca7aed38585a0101
1,009
hpp
C++
include/xtr/timespec.hpp
uilianries/xtr
b1dccc51b024369e6c1a2f6d3fcf5f405735289b
[ "MIT" ]
10
2021-09-25T10:40:55.000Z
2022-03-19T01:05:05.000Z
include/xtr/timespec.hpp
uilianries/xtr
b1dccc51b024369e6c1a2f6d3fcf5f405735289b
[ "MIT" ]
2
2021-09-24T12:59:08.000Z
2021-09-24T19:17:47.000Z
include/xtr/timespec.hpp
uilianries/xtr
b1dccc51b024369e6c1a2f6d3fcf5f405735289b
[ "MIT" ]
1
2021-09-24T13:45:29.000Z
2021-09-24T13:45:29.000Z
#ifndef XTR_TIMESPEC_HPP #define XTR_TIMESPEC_HPP #include <ctime> #include <fmt/chrono.h> namespace xtr { // This class exists to avoid clashing with user code---if a formatter // was created for std::timespec then it may conflict with a user // defined formatter. struct timespec : std::timespec { timespec() = default; // lack of explicit is intentional timespec(std::timespec ts) : std::timespec(ts) { } }; } template<> struct fmt::formatter<xtr::timespec> { template<typename ParseContext> constexpr auto parse(ParseContext &ctx) { return ctx.begin(); } template<typename FormatContext> auto format(const xtr::timespec ts, FormatContext &ctx) { std::tm temp; return fmt::format_to( ctx.out(), "{:%Y-%m-%d %T}.{:06}", *::gmtime_r(&ts.tv_sec, &temp), ts.tv_nsec / 1000); } }; #endif
20.591837
74
0.562934
uilianries
4fd8ce5bb6a5438c2c7c90bf28f7aace7da37961
1,791
hpp
C++
include/StackImpl.hpp
Slava-100/lab-05-stack
06a365e8c27870e2b133cc9ee9102b9e41247a49
[ "MIT" ]
null
null
null
include/StackImpl.hpp
Slava-100/lab-05-stack
06a365e8c27870e2b133cc9ee9102b9e41247a49
[ "MIT" ]
null
null
null
include/StackImpl.hpp
Slava-100/lab-05-stack
06a365e8c27870e2b133cc9ee9102b9e41247a49
[ "MIT" ]
null
null
null
// Copyright 2021 Slava-100 <svat.strel.2001@gmail.com> #ifndef INCLUDE_STACKIMPL_HPP_ #define INCLUDE_STACKIMPL_HPP_ #include <utility> template <typename T> class StackImpl { public: StackImpl() : _tail(nullptr), _size(0) {} StackImpl(const StackImpl &) = delete; StackImpl(StackImpl &&) = delete; StackImpl &operator=(const StackImpl &) = delete; StackImpl &operator=(StackImpl &&) = delete; ~StackImpl() { while (_tail != nullptr) { auto tmp = _tail; _tail = _tail->prev; delete tmp; } } void push(const T &value) { auto new_node = new _list_node(value); _add_new_node(new_node); } void push(T &&value) { auto new_node = new _list_node(std::move(value)); _add_new_node(new_node); } template <typename... args_t> void emplace(args_t &&...args) { auto new_node = new _list_node(std::forward<args_t>(args)...); _add_new_node(new_node); } T pop() { if (!_tail) throw std::runtime_error("pop from empty stack"); auto tmp = _tail; auto ret_value = tmp->value; _tail = _tail->prev; delete tmp; --_size; return ret_value; } std::size_t size() const { return _size; } bool empty() const { return _tail == nullptr; } private: struct _list_node { explicit _list_node(const T &val) : value(val), prev(nullptr) {} explicit _list_node(T &&val) : value(std::move(val)), prev(nullptr) {} template <typename... args_t> explicit _list_node(args_t &&...args) : value(std::forward<args_t>(args)...), prev(nullptr) {} T value; _list_node *prev; }; void _add_new_node(_list_node *new_node) { new_node->prev = _tail; _tail = new_node; ++_size; } _list_node *_tail; std::size_t _size; }; #endif // INCLUDE_STACKIMPL_HPP_
22.3875
74
0.639308
Slava-100
4fd93003389cc1ccc0a29376623c280ab7874358
6,055
cpp
C++
libgpopt/src/xforms/CXformSimplifyGbAgg.cpp
davidli2010/gporca
4c946e5e41051c832736b2fce712c37ca651ddf5
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
libgpopt/src/xforms/CXformSimplifyGbAgg.cpp
davidli2010/gporca
4c946e5e41051c832736b2fce712c37ca651ddf5
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
libgpopt/src/xforms/CXformSimplifyGbAgg.cpp
davidli2010/gporca
4c946e5e41051c832736b2fce712c37ca651ddf5
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
//--------------------------------------------------------------------------- // Greenplum Database // Copyright (C) 2012 EMC Corp. // // @filename: // CXformSimplifyGbAgg.cpp // // @doc: // Implementation of simplifying an aggregate expression by finding // the minimal grouping columns based on functional dependencies //--------------------------------------------------------------------------- #include "gpos/base.h" #include "gpopt/base/CUtils.h" #include "gpopt/base/CKeyCollection.h" #include "gpopt/operators/ops.h" #include "gpopt/operators/COperator.h" #include "gpopt/xforms/CXformSimplifyGbAgg.h" using namespace gpmd; using namespace gpopt; //--------------------------------------------------------------------------- // @function: // CXformSimplifyGbAgg::CXformSimplifyGbAgg // // @doc: // Ctor // //--------------------------------------------------------------------------- CXformSimplifyGbAgg::CXformSimplifyGbAgg ( CMemoryPool *mp ) : CXformExploration ( // pattern GPOS_NEW(mp) CExpression ( mp, GPOS_NEW(mp) CLogicalGbAgg(mp), GPOS_NEW(mp) CExpression(mp, GPOS_NEW(mp) CPatternLeaf(mp)), // relational child GPOS_NEW(mp) CExpression(mp, GPOS_NEW(mp) CPatternTree(mp)) // scalar project list ) ) {} //--------------------------------------------------------------------------- // @function: // CXformSimplifyGbAgg::Exfp // // @doc: // Compute xform promise for a given expression handle; // aggregate must have grouping columns // //--------------------------------------------------------------------------- CXform::EXformPromise CXformSimplifyGbAgg::Exfp ( CExpressionHandle &exprhdl ) const { CLogicalGbAgg *popAgg = CLogicalGbAgg::PopConvert(exprhdl.Pop()); GPOS_ASSERT(COperator::EgbaggtypeGlobal == popAgg->Egbaggtype()); if (0 == popAgg->Pdrgpcr()->Size() || NULL != popAgg->PdrgpcrMinimal()) { return CXform::ExfpNone; } return CXform::ExfpHigh; } //--------------------------------------------------------------------------- // @function: // CXformSimplifyGbAgg::FDropGbAgg // // @doc: // Return true if GbAgg operator can be dropped because grouping // columns include a key // //--------------------------------------------------------------------------- BOOL CXformSimplifyGbAgg::FDropGbAgg ( CMemoryPool *mp, CExpression *pexpr, CXformResult *pxfres ) { CLogicalGbAgg *popAgg = CLogicalGbAgg::PopConvert(pexpr->Pop()); CExpression *pexprRelational = (*pexpr)[0]; CExpression *pexprProjectList = (*pexpr)[1]; if (0 < pexprProjectList->Arity()) { // GbAgg cannot be dropped if Agg functions are computed return false; } CKeyCollection *pkc = CDrvdPropRelational::GetRelationalProperties(pexprRelational->PdpDerive())->Pkc(); if (NULL == pkc) { // relational child does not have key return false; } const ULONG ulKeys = pkc->Keys(); BOOL fDrop = false; for (ULONG ul = 0; !fDrop && ul < ulKeys; ul++) { CColRefArray *pdrgpcrKey = pkc->PdrgpcrKey(mp, ul); CColRefSet *pcrs = GPOS_NEW(mp) CColRefSet(mp, pdrgpcrKey); pdrgpcrKey->Release(); CColRefSet *pcrsGrpCols = GPOS_NEW(mp) CColRefSet(mp); pcrsGrpCols->Include(popAgg->Pdrgpcr()); BOOL fGrpColsHasKey = pcrsGrpCols->ContainsAll(pcrs); pcrs->Release(); pcrsGrpCols->Release(); if (fGrpColsHasKey) { // Gb operator can be dropped pexprRelational->AddRef(); CExpression *pexprResult = CUtils::PexprLogicalSelect(mp, pexprRelational, CPredicateUtils::PexprConjunction(mp, NULL)); pxfres->Add(pexprResult); fDrop = true; } } return fDrop; } //--------------------------------------------------------------------------- // @function: // CXformSimplifyGbAgg::Transform // // @doc: // Actual transformation to simplify a aggregate expression // //--------------------------------------------------------------------------- void CXformSimplifyGbAgg::Transform ( CXformContext *pxfctxt, CXformResult *pxfres, CExpression *pexpr ) const { GPOS_ASSERT(NULL != pxfctxt); GPOS_ASSERT(NULL != pxfres); GPOS_ASSERT(FPromising(pxfctxt->Pmp(), this, pexpr)); GPOS_ASSERT(FCheckPattern(pexpr)); CMemoryPool *mp = pxfctxt->Pmp(); if (FDropGbAgg(mp, pexpr,pxfres)) { // grouping columns could be dropped, GbAgg is transformed to a Select return; } // extract components CLogicalGbAgg *popAgg = CLogicalGbAgg::PopConvert(pexpr->Pop()); CExpression *pexprRelational = (*pexpr)[0]; CExpression *pexprProjectList = (*pexpr)[1]; CColRefArray *colref_array = popAgg->Pdrgpcr(); CColRefSet *pcrsGrpCols = GPOS_NEW(mp) CColRefSet(mp); pcrsGrpCols->Include(colref_array); CColRefSet *pcrsCovered = GPOS_NEW(mp) CColRefSet(mp); // set of grouping columns covered by FD's CColRefSet *pcrsMinimal = GPOS_NEW(mp) CColRefSet(mp); // a set of minimal grouping columns based on FD's CFunctionalDependencyArray *pdrgpfd = CDrvdPropRelational::GetRelationalProperties(pexpr->PdpDerive())->Pdrgpfd(); // collect grouping columns FD's const ULONG size = (pdrgpfd == NULL) ? 0 : pdrgpfd->Size(); for (ULONG ul = 0; ul < size; ul++) { CFunctionalDependency *pfd = (*pdrgpfd)[ul]; if (pfd->FIncluded(pcrsGrpCols)) { pcrsCovered->Include(pfd->PcrsDetermined()); pcrsCovered->Include(pfd->PcrsKey()); pcrsMinimal->Include(pfd->PcrsKey()); } } BOOL fCovered = pcrsCovered->Equals(pcrsGrpCols); pcrsGrpCols->Release(); pcrsCovered->Release(); if (!fCovered) { // the union of RHS of collected FD's does not cover all grouping columns pcrsMinimal->Release(); return; } // create a new Agg with minimal grouping columns colref_array->AddRef(); CLogicalGbAgg *popAggNew = GPOS_NEW(mp) CLogicalGbAgg(mp, colref_array, pcrsMinimal->Pdrgpcr(mp), popAgg->Egbaggtype()); pcrsMinimal->Release(); GPOS_ASSERT(!popAgg->Matches(popAggNew) && "Simplified aggregate matches original aggregate"); pexprRelational->AddRef(); pexprProjectList->AddRef(); CExpression *pexprResult = GPOS_NEW(mp) CExpression(mp, popAggNew, pexprRelational, pexprProjectList); pxfres->Add(pexprResult); } // EOF
26.911111
121
0.623452
davidli2010
4fda8c1ba1d5cff07e468a00c222575519007a6b
42,409
cpp
C++
frida/frida-cycript/src/Replace.cpp
bzxy/cydia
f8c838cdbd86e49dddf15792e7aa56e2af80548d
[ "MIT" ]
678
2017-11-17T08:33:19.000Z
2022-03-26T10:40:20.000Z
frida/frida-cycript/src/Replace.cpp
chenfanfang/Cydia
5efce785bfd5f1064b9c0f0e29a9cc05aa24cad0
[ "MIT" ]
22
2019-04-16T05:51:53.000Z
2021-11-08T06:18:45.000Z
frida/frida-cycript/src/Replace.cpp
chenfanfang/Cydia
5efce785bfd5f1064b9c0f0e29a9cc05aa24cad0
[ "MIT" ]
170
2018-06-10T07:59:20.000Z
2022-03-22T16:19:33.000Z
/* Cycript - The Truly Universal Scripting Language * Copyright (C) 2009-2016 Jay Freeman (saurik) */ /* GNU Affero General Public License, Version 3 {{{ */ /* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. **/ /* }}} */ #include <iomanip> #include <map> #include "Replace.hpp" #include "Syntax.hpp" CYFunctionExpression *CYNonLocalize(CYContext &context, CYFunctionExpression *function) { function->nonlocal_ = context.nextlocal_; return function; } CYFunctionExpression *CYSuperize(CYContext &context, CYFunctionExpression *function) { function->super_ = context.super_; return function; } CYStatement *CYDefineProperty(CYExpression *object, CYExpression *name, bool configurable, bool enumerable, CYProperty *descriptor) { return $E($C3($M($V("Object"), $S("defineProperty")), object, name, $ CYObject(CYList<CYProperty>() ->* (configurable ? $ CYPropertyValue($S("configurable"), $ CYTrue()) : NULL) ->* (enumerable ? $ CYPropertyValue($S("enumerable"), $ CYTrue()) : NULL) ->* descriptor))); } static void CYImplicitReturn(CYStatement *&code) { if (CYStatement *&last = CYGetLast(code)) last = last->Return(); } CYExpression *CYAdd::Replace(CYContext &context) { CYInfix::Replace(context); CYString *lhs(dynamic_cast<CYString *>(lhs_)); CYString *rhs(dynamic_cast<CYString *>(rhs_)); if (lhs != NULL || rhs != NULL) { if (lhs == NULL) { lhs = lhs_->String(context); if (lhs == NULL) return this; } else if (rhs == NULL) { rhs = rhs_->String(context); if (rhs == NULL) return this; } return lhs->Concat(context, rhs); } if (CYNumber *lhn = lhs_->Number(context)) if (CYNumber *rhn = rhs_->Number(context)) return $D(lhn->Value() + rhn->Value()); return this; } CYExpression *CYAddressOf::Replace(CYContext &context) { return $C0($M(rhs_, $S("$cya"))); } CYTarget *CYApply::AddArgument(CYContext &context, CYExpression *value) { CYArgument **argument(&arguments_); while (*argument != NULL) argument = &(*argument)->next_; *argument = $ CYArgument(value); return this; } CYArgument *CYArgument::Replace(CYContext &context) { $T(NULL) context.Replace(value_); next_ = next_->Replace(context); if (value_ == NULL) { if (next_ == NULL) return NULL; else value_ = $U; } return this; } CYTarget *CYArray::Replace(CYContext &context) { CYForEach (element, elements_) element->Replace(context); return this; } CYTarget *CYArrayComprehension::Replace(CYContext &context) { CYIdentifier *cyv(context.Unique()); return $C0($F(NULL, $P1($B(cyv), comprehensions_->Parameters(context)), $$ ->* $E($ CYAssign($V(cyv), $ CYArray())) ->* comprehensions_->Replace(context, $E($C1($M($V(cyv), $S("push")), expression_))) ->* $ CYReturn($V(cyv)) )); } CYExpression *CYAssignment::Replace(CYContext &context) { // XXX: this is a horrible hack but I'm a month over schedule :( if (CYSubscriptMember *subscript = dynamic_cast<CYSubscriptMember *>(lhs_)) return $C2($M(subscript->object_, $S("$cys")), subscript->property_, rhs_); context.Replace(lhs_); context.Replace(rhs_); return this; } CYTarget *CYAttemptMember::Replace(CYContext &context) { CYIdentifier *value(context.Unique()); return $C1($F(NULL, $P1($B(value)), $$ ->* $ CYReturn($ CYCondition($V(value), $M($V(value), property_), $V(value))) ), object_); } CYStatement *CYBlock::Return() { CYImplicitReturn(code_); return this; } CYStatement *CYBlock::Replace(CYContext &context) { CYScope scope(true, context); context.ReplaceAll(code_); scope.Close(context); if (code_ == NULL) return $ CYEmpty(); return this; } CYStatement *CYBreak::Replace(CYContext &context) { return this; } CYTarget *CYCall::Replace(CYContext &context) { // XXX: this also is a horrible hack but I'm still a month over schedule :( if (CYAttemptMember *member = dynamic_cast<CYAttemptMember *>(function_)) { CYIdentifier *value(context.Unique()); return $C1($F(NULL, $P1($B(value)), $$ ->* $ CYReturn($ CYCondition($V(value), $C($M($V(value), member->property_), arguments_), $V(value))) ), member->object_); } context.Replace(function_); arguments_->Replace(context); return this; } namespace cy { namespace Syntax { void Catch::Replace(CYContext &context) { $T() CYScope scope(true, context); name_ = name_->Replace(context, CYIdentifierCatch); context.ReplaceAll(code_); scope.Close(context); } } } CYTarget *CYClassExpression::Replace(CYContext &context) { CYBuilder builder; CYIdentifier *super(context.Unique()); CYIdentifier *old(context.super_); context.super_ = super; CYIdentifier *constructor(context.Unique()); CYForEach (member, tail_->static_) member->Replace(context, builder, $V(constructor), true); CYIdentifier *prototype(context.Unique()); CYForEach (member, tail_->instance_) member->Replace(context, builder, $V(prototype), true); if (tail_->constructor_ == NULL) tail_->constructor_ = $ CYFunctionExpression(NULL, NULL, NULL); tail_->constructor_->name_ = name_; tail_->constructor_ = CYSuperize(context, tail_->constructor_); context.super_ = old; return $C1($ CYFunctionExpression(NULL, $P($B(super)), $$ ->* $ CYVar($B1($B(constructor, tail_->constructor_))) ->* $ CYVar($B1($B(prototype, $ CYFunctionExpression(NULL, NULL, NULL)))) ->* $E($ CYAssign($M($V(prototype), $S("prototype")), $M($V(super), $S("prototype")))) ->* $E($ CYAssign($V(prototype), $N($V(prototype)))) ->* CYDefineProperty($V(prototype), $S("constructor"), false, false, $ CYPropertyValue($S("value"), $V(constructor))) ->* $ CYVar(builder.bindings_) ->* builder.statements_ ->* CYDefineProperty($V(constructor), $S("prototype"), false, false, $ CYPropertyValue($S("value"), $V(prototype))) ->* $ CYReturn($V(constructor)) ), tail_->extends_ ? tail_->extends_ : $V($I("Object"))); } CYStatement *CYClassStatement::Replace(CYContext &context) { return $ CYVar($B1($B(name_, $ CYClassExpression(name_, tail_)))); } void CYClause::Replace(CYContext &context) { $T() context.Replace(value_); context.ReplaceAll(code_); next_->Replace(context); } CYExpression *CYCompound::Replace(CYContext &context) { context.Replace(expression_); context.Replace(next_); if (CYCompound *compound = dynamic_cast<CYCompound *>(expression_)) { expression_ = compound->expression_; compound->expression_ = compound->next_; compound->next_ = next_; next_ = compound; } return this; } CYFunctionParameter *CYCompound::Parameter() const { CYFunctionParameter *next(next_->Parameter()); if (next == NULL) return NULL; CYFunctionParameter *parameter(expression_->Parameter()); if (parameter == NULL) return NULL; parameter->SetNext(next); return parameter; } CYFunctionParameter *CYComprehension::Parameters(CYContext &context) const { $T(NULL) CYFunctionParameter *next(next_->Parameters(context)); if (CYFunctionParameter *parameter = Parameter(context)) { parameter->SetNext(next); return parameter; } else return next; } CYStatement *CYComprehension::Replace(CYContext &context, CYStatement *statement) const { return next_ == NULL ? statement : next_->Replace(context, statement); } CYExpression *CYComputed::PropertyName(CYContext &context) { return expression_; } CYExpression *CYCondition::Replace(CYContext &context) { context.Replace(test_); context.Replace(true_); context.Replace(false_); return this; } void CYContext::NonLocal(CYStatement *&statements) { CYContext &context(*this); if (nextlocal_ != NULL && nextlocal_->identifier_ != NULL) { CYIdentifier *cye($I("$cye")->Replace(context, CYIdentifierGlobal)); CYIdentifier *unique(nextlocal_->identifier_->Replace(context, CYIdentifierGlobal)); CYStatement *declare( $ CYVar($B1($B(unique, $ CYObject())))); cy::Syntax::Catch *rescue( $ cy::Syntax::Catch(cye, $$ ->* $ CYIf($ CYIdentical($M($V(cye), $S("$cyk")), $V(unique)), $$ ->* $ CYReturn($M($V(cye), $S("$cyv")))) ->* $ cy::Syntax::Throw($V(cye)))); context.Replace(declare); rescue->Replace(context); statements = $$ ->* declare ->* $ cy::Syntax::Try(statements, rescue, NULL); } } CYIdentifier *CYContext::Unique() { return $ CYIdentifier($pool.strcat("$cy", $pool.itoa(unique_++), NULL)); } CYStatement *CYContinue::Replace(CYContext &context) { return this; } CYStatement *CYDebugger::Replace(CYContext &context) { return this; } CYTarget *CYBinding::Target(CYContext &context) { return $V(identifier_); } CYAssignment *CYBinding::Replace(CYContext &context, CYIdentifierKind kind) { identifier_ = identifier_->Replace(context, kind); if (initializer_ == NULL) return NULL; CYAssignment *value($ CYAssign(Target(context), initializer_)); initializer_ = NULL; return value; } CYExpression *CYBindings::Replace(CYContext &context, CYIdentifierKind kind) { $T(NULL) CYAssignment *assignment(binding_->Replace(context, kind)); CYExpression *compound(next_->Replace(context, kind)); if (assignment != NULL) if (compound == NULL) compound = assignment; else compound = $ CYCompound(assignment, compound); return compound; } CYFunctionParameter *CYBindings::Parameter(CYContext &context) { $T(NULL) return $ CYFunctionParameter($ CYBinding(binding_->identifier_), next_->Parameter(context)); } CYArgument *CYBindings::Argument(CYContext &context) { $T(NULL) return $ CYArgument(binding_->initializer_, next_->Argument(context)); } CYTarget *CYDirectMember::Replace(CYContext &context) { context.Replace(object_); context.Replace(property_); return this; } CYStatement *CYDoWhile::Replace(CYContext &context) { context.Replace(test_); context.ReplaceAll(code_); return this; } void CYElementSpread::Replace(CYContext &context) { context.Replace(value_); } void CYElementValue::Replace(CYContext &context) { context.Replace(value_); } CYForInitializer *CYEmpty::Replace(CYContext &context) { return NULL; } CYTarget *CYEncodedType::Replace(CYContext &context) { return typed_->Replace(context); } CYTarget *CYEval::Replace(CYContext &context) { context.scope_->Damage(); if (arguments_ != NULL) arguments_->value_ = $C1($M($V("Cycript"), $S("compile")), arguments_->value_); return $C($V("eval"), arguments_); } CYStatement *CYExpress::Return() { return $ CYReturn(expression_); } CYForInitializer *CYExpress::Replace(CYContext &context) { context.Replace(expression_); return this; } CYTarget *CYExpression::AddArgument(CYContext &context, CYExpression *value) { return $C1(this, value); } CYFunctionParameter *CYExpression::Parameter() const { return NULL; } CYTarget *CYExtend::Replace(CYContext &context) { return object_.Replace(context, lhs_); } CYStatement *CYExternalDefinition::Replace(CYContext &context) { return $E($ CYAssign($V(name_), $ CYExternalExpression(abi_, type_, name_))); } CYTarget *CYExternalExpression::Replace(CYContext &context) { CYExpression *expression(name_->Number(context)); if (expression == NULL) expression = $C2($V("dlsym"), $V("RTLD_DEFAULT"), name_->PropertyName(context)); return $C1(type_->Replace(context), expression); } CYNumber *CYFalse::Number(CYContext &context) { return $D(0); } CYString *CYFalse::String(CYContext &context) { return $S("false"); } CYExpression *CYFatArrow::Replace(CYContext &context) { CYFunctionExpression *function($ CYFunctionExpression(NULL, parameters_, code_)); function->this_.SetNext(context.this_); return function; } void CYFinally::Replace(CYContext &context) { $T() CYScope scope(true, context); context.ReplaceAll(code_); scope.Close(context); } CYStatement *CYFor::Replace(CYContext &context) { CYScope outer(true, context); context.Replace(initializer_); context.Replace(test_); { CYScope inner(true, context); context.ReplaceAll(code_); inner.Close(context); } context.Replace(increment_); outer.Close(context); return this; } CYStatement *CYForLexical::Initialize(CYContext &context, CYExpression *value) { if (value == NULL) { if (binding_->initializer_ == NULL) return NULL; value = binding_->initializer_; } return $ CYLexical(constant_, $B1($ CYBinding(binding_->identifier_, value))); } CYTarget *CYForLexical::Replace(CYContext &context) { _assert(binding_->Replace(context, CYIdentifierLexical) == NULL); return binding_->Target(context); } CYStatement *CYForIn::Replace(CYContext &context) { CYScope scope(true, context); context.Replace(initializer_); context.Replace(iterable_); context.ReplaceAll(code_); scope.Close(context); return this; } CYStatement *CYForInitialized::Replace(CYContext &context) { CYAssignment *assignment(binding_->Replace(context, CYIdentifierVariable)); return $ CYBlock($$ ->* (assignment == NULL ? NULL : $ CYExpress(assignment)) ->* $ CYForIn(binding_->Target(context), iterable_, code_)); } CYFunctionParameter *CYForInComprehension::Parameter(CYContext &context) const { return $ CYFunctionParameter(binding_); } CYStatement *CYForInComprehension::Replace(CYContext &context, CYStatement *statement) const { return $ CYForIn(binding_->Target(context), iterable_, CYComprehension::Replace(context, statement)); } CYStatement *CYForOf::Replace(CYContext &context) { CYIdentifier *item(context.Unique()), *list(context.Unique()); return $ CYBlock($$ ->* initializer_->Initialize(context, NULL) ->* $ CYLexical(false, $B2($B(list, iterable_), $B(item))) ->* $ CYForIn($V(item), $V(list), $ CYBlock($$ ->* initializer_->Initialize(context, $M($V(list), $V(item))) ->* code_ ))); } CYFunctionParameter *CYForOfComprehension::Parameter(CYContext &context) const { return $ CYFunctionParameter(binding_); } CYStatement *CYForOfComprehension::Replace(CYContext &context, CYStatement *statement) const { CYIdentifier *cys(context.Unique()); return $ CYBlock($$ ->* $ CYLexical(false, $B1($B(cys, iterable_))) ->* $ CYForIn(binding_->Target(context), $V(cys), $ CYBlock($$ ->* $E($ CYAssign(binding_->Target(context), $M($V(cys), binding_->Target(context)))) ->* CYComprehension::Replace(context, statement) ))); } CYStatement *CYForVariable::Initialize(CYContext &context, CYExpression *value) { if (value == NULL) { if (binding_->initializer_ == NULL) return NULL; value = binding_->initializer_; } return $ CYVar($B1($ CYBinding(binding_->identifier_, value))); } CYTarget *CYForVariable::Replace(CYContext &context) { _assert(binding_->Replace(context, CYIdentifierVariable) == NULL); return binding_->Target(context); } // XXX: this is evil evil black magic. don't ask, don't tell... don't believe! #define MappingSet "0etnirsoalfucdphmgyvbxTwSNECAFjDLkMOIBPqzRH$_WXUVGYKQJZ" //#define MappingSet "0abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ$_" void CYFunction::Replace(CYContext &context) { CYThisScope *_this(context.this_); context.this_ = &this_; context.this_ = CYGetLast(context.this_); CYIdentifier *super(context.super_); context.super_ = super_; CYNonLocal *nonlocal(context.nonlocal_); CYNonLocal *nextlocal(context.nextlocal_); bool localize; if (nonlocal_ != NULL) { localize = false; context.nonlocal_ = nonlocal_; } else { localize = true; nonlocal_ = $ CYNonLocal(); context.nextlocal_ = nonlocal_; } CYScope scope(!localize, context); $I("arguments")->Replace(context, CYIdentifierMagic); parameters_->Replace(context, code_); context.ReplaceAll(code_); if (implicit_) CYImplicitReturn(code_); if (CYIdentifier *identifier = this_.identifier_) { context.scope_->Declare(context, identifier, CYIdentifierVariable); code_ = $$ ->* $E($ CYAssign($V(identifier), $ CYThis())) ->* code_; } if (localize) context.NonLocal(code_); context.nextlocal_ = nextlocal; context.nonlocal_ = nonlocal; context.super_ = super; context.this_ = _this; scope.Close(context, code_); } CYTarget *CYFunctionExpression::Replace(CYContext &context) { CYScope scope(false, context); if (name_ != NULL) name_ = name_->Replace(context, CYIdentifierOther); CYFunction::Replace(context); scope.Close(context); return this; } void CYFunctionParameter::Replace(CYContext &context, CYStatement *&statements) { $T() CYAssignment *assignment(binding_->Replace(context, CYIdentifierArgument)); next_->Replace(context, statements); if (assignment != NULL) statements = $$ ->* $ CYIf($ CYIdentical($ CYTypeOf(binding_->Target(context)), $S("undefined")), $$ ->* $E(assignment)) ->* statements; } CYStatement *CYFunctionStatement::Replace(CYContext &context) { name_ = name_->Replace(context, CYIdentifierOther); CYFunction::Replace(context); return this; } CYIdentifier *CYIdentifier::Replace(CYContext &context, CYIdentifierKind kind) { if (next_ == this) return this; if (next_ != NULL) return next_->Replace(context, kind); next_ = context.scope_->Declare(context, this, kind)->identifier_; return next_; } CYStatement *CYIf::Return() { CYImplicitReturn(true_); CYImplicitReturn(false_); return this; } CYStatement *CYIf::Replace(CYContext &context) { context.Replace(test_); context.ReplaceAll(true_); context.ReplaceAll(false_); return this; } CYFunctionParameter *CYIfComprehension::Parameter(CYContext &context) const { return NULL; } CYStatement *CYIfComprehension::Replace(CYContext &context, CYStatement *statement) const { return $ CYIf(test_, CYComprehension::Replace(context, statement)); } CYStatement *CYImport::Replace(CYContext &context) { return $ CYVar($B1($B($I(module_->part_->Word()), $C1($V("require"), module_->Replace(context, "/"))))); } CYStatement *CYImportDeclaration::Replace(CYContext &context) { CYIdentifier *module(context.Unique()); CYList<CYStatement> statements; CYForEach (specifier, specifiers_) statements->*specifier->Replace(context, module); return $ CYBlock($$ ->* $ CYLexical(false, $B1($B(module, $C1($V("require"), module_)))) ->* statements); } CYStatement *CYImportSpecifier::Replace(CYContext &context, CYIdentifier *module) { binding_ = binding_->Replace(context, CYIdentifierLexical); CYExpression *import($V(module)); if (name_ != NULL) import = $M(import, $S(name_)); return $E($ CYAssign($V(binding_), import)); } CYTarget *CYIndirect::Replace(CYContext &context) { return $M(rhs_, $S("$cyi")); } CYTarget *CYIndirectMember::Replace(CYContext &context) { return $M($ CYIndirect(object_), property_); } CYExpression *CYInfix::Replace(CYContext &context) { context.Replace(lhs_); context.Replace(rhs_); return this; } CYStatement *CYLabel::Replace(CYContext &context) { context.Replace(statement_); return this; } CYTarget *CYLambda::Replace(CYContext &context) { return $N2($V("Functor"), $ CYFunctionExpression(NULL, parameters_->Parameters(context), code_), parameters_->TypeSignature(context, typed_->Replace(context))); } CYForInitializer *CYLexical::Replace(CYContext &context) { if (CYExpression *expression = bindings_->Replace(context, CYIdentifierLexical)) return $E(expression); return $ CYEmpty(); } CYFunctionExpression *CYMethod::Constructor() { return NULL; } void CYMethod::Replace(CYContext &context) { CYFunction::Replace(context); } CYString *CYModule::Replace(CYContext &context, const char *separator) const { if (next_ == NULL) return $ CYString(part_); return $ CYString($pool.strcat(next_->Replace(context, separator)->Value(), separator, part_->Word(), NULL)); } CYExpression *CYMultiply::Replace(CYContext &context) { CYInfix::Replace(context); if (CYNumber *lhn = lhs_->Number(context)) if (CYNumber *rhn = rhs_->Number(context)) return $D(lhn->Value() * rhn->Value()); return this; } namespace cy { namespace Syntax { CYTarget *New::AddArgument(CYContext &context, CYExpression *value) { CYSetLast(arguments_) = $ CYArgument(value); return this; } CYTarget *New::Replace(CYContext &context) { context.Replace(constructor_); arguments_->Replace(context); return this; } } } CYNumber *CYNull::Number(CYContext &context) { return $D(0); } CYString *CYNull::String(CYContext &context) { return $S("null"); } CYNumber *CYNumber::Number(CYContext &context) { return this; } CYString *CYNumber::String(CYContext &context) { // XXX: there is a precise algorithm for this return $S($pool.sprintf(24, "%.17g", Value())); } CYExpression *CYNumber::PropertyName(CYContext &context) { return String(context); } CYTarget *CYObject::Replace(CYContext &context, CYTarget *seed) { CYBuilder builder; if (properties_ != NULL) properties_ = properties_->ReplaceAll(context, builder, $ CYThis(), seed != this); if (builder) { return $C1($M($ CYFunctionExpression(NULL, builder.bindings_->Parameter(context), builder.statements_ ->* $ CYReturn($ CYThis()) ), $S("call")), seed, builder.bindings_->Argument(context)); } CYForEach (property, properties_) property->Replace(context); return seed; } CYTarget *CYObject::Replace(CYContext &context) { return Replace(context, this); } CYTarget *CYParenthetical::Replace(CYContext &context) { // XXX: return expression_; context.Replace(expression_); return this; } CYExpression *CYPostfix::Replace(CYContext &context) { context.Replace(lhs_); return this; } CYExpression *CYPrefix::Replace(CYContext &context) { context.Replace(rhs_); return this; } CYProperty *CYProperty::ReplaceAll(CYContext &context, CYBuilder &builder, CYExpression *self, bool update) { update |= Update(); if (update) Replace(context, builder, self, false); if (next_ != NULL) next_ = next_->ReplaceAll(context, builder, self, update); return update ? next_ : this; } void CYProperty::Replace(CYContext &context, CYBuilder &builder, CYExpression *self, bool protect) { CYExpression *name(name_->PropertyName(context)); if (name_->Computed()) { CYIdentifier *unique(context.Unique()); builder.bindings_ ->* $B1($B(unique, name)); name = $V(unique); } Replace(context, builder, self, name, protect); } bool CYProperty::Update() const { return name_->Computed(); } void CYPropertyGetter::Replace(CYContext &context, CYBuilder &builder, CYExpression *self, CYExpression *name, bool protect) { CYIdentifier *unique(context.Unique()); builder.bindings_ ->* $B1($B(unique, CYSuperize(context, $ CYFunctionExpression(NULL, parameters_, code_)))); builder.statements_ ->* CYDefineProperty(self, name, true, !protect, $ CYPropertyValue($S("get"), $V(unique))); } CYFunctionExpression *CYPropertyMethod::Constructor() { return name_->Constructor() ? $ CYFunctionExpression(NULL, parameters_, code_) : NULL; } void CYPropertyMethod::Replace(CYContext &context, CYBuilder &builder, CYExpression *self, CYExpression *name, bool protect) { CYIdentifier *unique(context.Unique()); builder.bindings_ ->* $B1($B(unique, CYSuperize(context, $ CYFunctionExpression(NULL, parameters_, code_)))); builder.statements_ ->* (!protect ? $E($ CYAssign($M(self, name), $V(unique))) : CYDefineProperty(self, name, true, !protect, $ CYPropertyValue($S("value"), $V(unique), $ CYPropertyValue($S("writable"), $ CYTrue())))); } bool CYPropertyMethod::Update() const { return true; } void CYPropertySetter::Replace(CYContext &context, CYBuilder &builder, CYExpression *self, CYExpression *name, bool protect) { CYIdentifier *unique(context.Unique()); builder.bindings_ ->* $B1($B(unique, CYSuperize(context, $ CYFunctionExpression(NULL, parameters_, code_)))); builder.statements_ ->* CYDefineProperty(self, name, true, !protect, $ CYPropertyValue($S("set"), $V(unique))); } void CYPropertyValue::Replace(CYContext &context, CYBuilder &builder, CYExpression *self, CYExpression *name, bool protect) { _assert(!protect); CYIdentifier *unique(context.Unique()); builder.bindings_ ->* $B1($B(unique, value_)); builder.statements_ ->* $E($ CYAssign($M(self, name), $V(unique))); } void CYPropertyValue::Replace(CYContext &context) { context.Replace(value_); } void CYScript::Replace(CYContext &context) { CYScope scope(false, context); context.scope_->Damage(); context.nextlocal_ = $ CYNonLocal(); context.ReplaceAll(code_); context.NonLocal(code_); scope.Close(context, code_); unsigned offset(0); for (std::vector<CYIdentifier *>::const_iterator i(context.replace_.begin()); i != context.replace_.end(); ++i) { const char *name; if (context.options_.verbose_) name = $pool.strcat("$", $pool.itoa(offset++), NULL); else { char id[8]; id[7] = '\0'; id: unsigned position(7), local(offset++ + 1); do { unsigned index(local % (sizeof(MappingSet) - 1)); local /= sizeof(MappingSet) - 1; id[--position] = MappingSet[index]; } while (local != 0); if (scope.Lookup(context, id + position) != NULL) goto id; // XXX: at some point, this could become a keyword name = $pool.strmemdup(id + position, 7 - position); } CYIdentifier *identifier(*i); _assert(identifier->next_ == identifier); identifier->next_ = $I(name); } } CYTarget *CYResolveMember::Replace(CYContext &context) { return $M($M(object_, $S("$cyr")), property_); } CYStatement *CYReturn::Replace(CYContext &context) { if (context.nonlocal_ != NULL) { CYProperty *value(value_ == NULL ? NULL : $ CYPropertyValue($S("$cyv"), value_)); return $ cy::Syntax::Throw($ CYObject( $ CYPropertyValue($S("$cyk"), $V(context.nonlocal_->Target(context)), value) )); } context.Replace(value_); return this; } CYTarget *CYRubyBlock::Replace(CYContext &context) { return lhs_->AddArgument(context, proc_->Replace(context)); } CYTarget *CYRubyBlock::AddArgument(CYContext &context, CYExpression *value) { return Replace(context)->AddArgument(context, value); } CYTarget *CYRubyProc::Replace(CYContext &context) { CYFunctionExpression *function($ CYFunctionExpression(NULL, parameters_, code_)); function = CYNonLocalize(context, function); function->implicit_ = true; return function; } CYScope::CYScope(bool transparent, CYContext &context) : transparent_(transparent), parent_(context.scope_), damaged_(false), shadow_(NULL), internal_(NULL) { _assert(!transparent_ || parent_ != NULL); context.scope_ = this; } void CYScope::Damage() { damaged_ = true; if (parent_ != NULL) parent_->Damage(); } CYIdentifierFlags *CYScope::Lookup(CYContext &context, const char *word) { CYForEach (i, internal_) if (strcmp(i->identifier_->Word(), word) == 0) return i; return NULL; } CYIdentifierFlags *CYScope::Lookup(CYContext &context, CYIdentifier *identifier) { return Lookup(context, identifier->Word()); } CYIdentifierFlags *CYScope::Declare(CYContext &context, CYIdentifier *identifier, CYIdentifierKind kind) { _assert(identifier->next_ == NULL || identifier->next_ == identifier); CYIdentifierFlags *existing(Lookup(context, identifier)); if (existing == NULL) internal_ = $ CYIdentifierFlags(identifier, kind, internal_); ++internal_->count_; if (existing == NULL) return internal_; if (kind == CYIdentifierGlobal); else if (existing->kind_ == CYIdentifierGlobal || existing->kind_ == CYIdentifierMagic) existing->kind_ = kind; else if (existing->kind_ == CYIdentifierLexical || kind == CYIdentifierLexical) _assert(false); else if (transparent_ && existing->kind_ == CYIdentifierArgument && kind == CYIdentifierVariable) _assert(false); // XXX: throw new SyntaxError() instead of these asserts return existing; } void CYScope::Merge(CYContext &context, const CYIdentifierFlags *flags) { _assert(flags->identifier_->next_ == flags->identifier_); CYIdentifierFlags *existing(Declare(context, flags->identifier_, flags->kind_)); flags->identifier_->next_ = existing->identifier_; existing->count_ += flags->count_; if (existing->offset_ < flags->offset_) existing->offset_ = flags->offset_; } void CYScope::Close(CYContext &context, CYStatement *&statements) { Close(context); CYList<CYBindings> bindings; CYForEach (i, internal_) if (i->kind_ == CYIdentifierVariable) bindings ->* $ CYBindings($ CYBinding(i->identifier_)); if (bindings) { CYVar *var($ CYVar(bindings)); var->SetNext(statements); statements = var; } } void CYScope::Close(CYContext &context) { context.scope_ = parent_; CYForEach (i, internal_) { _assert(i->identifier_->next_ == i->identifier_); switch (i->kind_) { case CYIdentifierLexical: { if (!damaged_) { CYIdentifier *replace(context.Unique()); replace->next_ = replace; i->identifier_->next_ = replace; i->identifier_ = replace; } if (!transparent_) i->kind_ = CYIdentifierVariable; else parent_->Declare(context, i->identifier_, CYIdentifierVariable); } break; case CYIdentifierVariable: { if (transparent_) { parent_->Declare(context, i->identifier_, i->kind_); i->kind_ = CYIdentifierGlobal; } } break; default:; } } if (damaged_) return; typedef std::multimap<unsigned, CYIdentifier *> CYIdentifierOffsetMap; CYIdentifierOffsetMap offsets; CYForEach (i, internal_) { _assert(i->identifier_->next_ == i->identifier_); switch (i->kind_) { case CYIdentifierArgument: case CYIdentifierVariable: offsets.insert(CYIdentifierOffsetMap::value_type(i->offset_, i->identifier_)); break; default:; } } unsigned offset(0); for (CYIdentifierOffsetMap::const_iterator i(offsets.begin()); i != offsets.end(); ++i) { if (offset < i->first) offset = i->first; CYIdentifier *identifier(i->second); if (offset >= context.replace_.size()) context.replace_.resize(offset + 1, NULL); CYIdentifier *&replace(context.replace_[offset++]); if (replace == NULL) replace = identifier; else { _assert(replace->next_ == replace); identifier->next_ = replace; } } if (parent_ == NULL) return; CYForEach (i, internal_) { switch (i->kind_) { case CYIdentifierGlobal: { if (i->offset_ < offset) i->offset_ = offset; parent_->Merge(context, i); } break; default:; } } } CYTarget *CYSubscriptMember::Replace(CYContext &context) { return $C1($M(object_, $S("$cyg")), property_); } CYElementValue *CYSpan::Replace(CYContext &context) { $T(NULL) return $ CYElementValue(expression_, $ CYElementValue(string_, next_->Replace(context))); } CYStatement *CYStatement::Return() { return this; } CYString *CYString::Concat(CYContext &context, CYString *rhs) const { size_t size(size_ + rhs->size_); char *value($ char[size + 1]); memcpy(value, value_, size_); memcpy(value + size_, rhs->value_, rhs->size_); value[size] = '\0'; return $S(value, size); } CYIdentifier *CYString::Identifier() const { if (const char *word = Word()) return $ CYIdentifier(word); return NULL; } CYNumber *CYString::Number(CYContext &context) { // XXX: there is a precise algorithm for this return NULL; } CYExpression *CYString::PropertyName(CYContext &context) { return this; } CYString *CYString::String(CYContext &context) { return this; } CYStatement *CYStructDefinition::Replace(CYContext &context) { CYTarget *target(tail_->Replace(context)); if (name_ != NULL) target = $C1($M(target, $S("withName")), $S(name_->Word())); return $ CYLexical(false, $B1($B($I($pool.strcat(name_->Word(), "$cy", NULL)), target))); } CYTarget *CYStructTail::Replace(CYContext &context) { CYList<CYElementValue> types; CYList<CYElementValue> names; CYForEach (field, fields_) { types->*$ CYElementValue(field->type_->Replace(context)); CYExpression *name; if (field->name_ == NULL) name = NULL; else name = field->name_->PropertyName(context); names->*$ CYElementValue(name); } return $N2($V("Type"), $ CYArray(types), $ CYArray(names)); } CYTarget *CYSuperAccess::Replace(CYContext &context) { return $C1($M($M($M($V(context.super_), $S("prototype")), property_), $S("bind")), $ CYThis()); } CYTarget *CYSuperCall::Replace(CYContext &context) { return $C($C1($M($V(context.super_), $S("bind")), $ CYThis()), arguments_); } CYTarget *CYSymbol::Replace(CYContext &context) { return $C1($M($V("Symbol"), $S("for")), $S(name_)); } CYStatement *CYSwitch::Replace(CYContext &context) { context.Replace(value_); clauses_->Replace(context); return this; } CYStatement *CYTarget::Initialize(CYContext &context, CYExpression *value) { if (value == NULL) return NULL; return $E($ CYAssign(this, value)); } CYTarget *CYTemplate::Replace(CYContext &context) { return $C2($M($M($M($V("String"), $S("prototype")), $S("concat")), $S("apply")), $S(""), $ CYArray($ CYElementValue(string_, spans_->Replace(context)))); } CYString *CYTemplate::String(CYContext &context) { // XXX: implement this over local concat if (spans_ != NULL) return NULL; return string_; } CYTarget *CYThis::Replace(CYContext &context) { if (context.this_ != NULL) return $V(context.this_->Identifier(context)); return this; } namespace cy { namespace Syntax { CYStatement *Throw::Replace(CYContext &context) { context.Replace(value_); return this; } } } CYTarget *CYTrivial::Replace(CYContext &context) { return this; } CYNumber *CYTrue::Number(CYContext &context) { return $D(1); } CYString *CYTrue::String(CYContext &context) { return $S("true"); } namespace cy { namespace Syntax { CYStatement *Try::Replace(CYContext &context) { CYScope scope(true, context); context.ReplaceAll(code_); scope.Close(context); catch_->Replace(context); finally_->Replace(context); return this; } } } CYTarget *CYTypeArrayOf::Replace_(CYContext &context, CYTarget *type) { return next_->Replace(context, $ CYCall($ CYDirectMember(type, $ CYString("arrayOf")), $ CYArgument(size_))); } CYTarget *CYTypeBlockWith::Replace_(CYContext &context, CYTarget *type) { return next_->Replace(context, $ CYCall($ CYDirectMember(type, $ CYString("blockWith")), parameters_->Argument(context))); } CYTarget *CYTypeCharacter::Replace(CYContext &context) { switch (signing_) { case CYTypeNeutral: return $V("char"); case CYTypeSigned: return $V("schar"); case CYTypeUnsigned: return $V("uchar"); default: _assert(false); } } CYTarget *CYTypeConstant::Replace_(CYContext &context, CYTarget *type) { return next_->Replace(context, $ CYCall($ CYDirectMember(type, $ CYString("constant")))); } CYStatement *CYTypeDefinition::Replace(CYContext &context) { return $ CYLexical(false, $B1($B(name_, $ CYTypeExpression(type_)))); } CYTarget *CYTypeEnum::Replace(CYContext &context) { CYList<CYProperty> properties; CYForEach (constant, constants_) properties->*$ CYPropertyValue($S(constant->name_->Word()), constant->value_); CYObject *constants($ CYObject(properties)); if (specifier_ == NULL) return $N1($V("Type"), constants); else return $C1($M(specifier_->Replace(context), $S("enumFor")), constants); } CYTarget *CYTypeError::Replace(CYContext &context) { _assert(false); return NULL; } CYTarget *CYTypeExpression::Replace(CYContext &context) { return typed_->Replace(context); } CYTarget *CYTypeFloating::Replace(CYContext &context) { switch (length_) { case 0: return $V("float"); case 1: return $V("double"); case 2: return $V("longdouble"); default: _assert(false); } } CYTarget *CYTypeInt128::Replace(CYContext &context) { return $V(signing_ == CYTypeUnsigned ? "uint128" : "int128"); } CYTarget *CYTypeIntegral::Replace(CYContext &context) { bool u(signing_ == CYTypeUnsigned); switch (length_) { case 0: return $V(u ? "ushort" : "short"); case 1: return $V(u ? "uint" : "int"); case 2: return $V(u ? "ulong" : "long"); case 3: return $V(u ? "ulonglong" : "longlong"); default: _assert(false); } } CYTarget *CYTypeModifier::Replace(CYContext &context, CYTarget *type) { $T(type) return Replace_(context, type); } CYTarget *CYTypeFunctionWith::Replace_(CYContext &context, CYTarget *type) { CYList<CYArgument> arguments(parameters_->Argument(context)); if (variadic_) arguments->*$C_($ CYNull()); return next_->Replace(context, $ CYCall($ CYDirectMember(type, $ CYString("functionWith")), arguments)); } CYTarget *CYTypePointerTo::Replace_(CYContext &context, CYTarget *type) { return next_->Replace(context, $ CYCall($ CYDirectMember(type, $ CYString("pointerTo")))); } CYTarget *CYTypeReference::Replace(CYContext &context) { const char *prefix; switch (kind_) { case CYTypeReferenceStruct: prefix = "$cys"; break; case CYTypeReferenceEnum: prefix = "$cye"; break; default: _assert(false); } return $V($pool.strcat(prefix, name_->Word(), NULL)); } CYTarget *CYTypeStruct::Replace(CYContext &context) { CYTarget *target(tail_->Replace(context)); if (name_ != NULL) target = $C1($M(target, $S("withName")), $S(name_->Word())); return target; } CYTarget *CYTypeVariable::Replace(CYContext &context) { return $V(name_); } CYTarget *CYTypeVoid::Replace(CYContext &context) { return $N1($V("Type"), $ CYString("v")); } CYTarget *CYTypeVolatile::Replace_(CYContext &context, CYTarget *type) { return next_->Replace(context, $ CYCall($ CYDirectMember(type, $ CYString("volatile")))); } CYTarget *CYType::Replace(CYContext &context) { return modifier_->Replace(context, specifier_->Replace(context)); } CYTypeFunctionWith *CYType::Function() { CYTypeModifier *&modifier(CYGetLast(modifier_)); if (modifier == NULL) return NULL; CYTypeFunctionWith *function(modifier->Function()); if (function == NULL) return NULL; modifier = NULL; return function; } CYArgument *CYTypedParameter::Argument(CYContext &context) { $T(NULL) return $ CYArgument(type_->Replace(context), next_->Argument(context)); } CYFunctionParameter *CYTypedParameter::Parameters(CYContext &context) { $T(NULL) return $ CYFunctionParameter($ CYBinding(name_ ? name_ : context.Unique()), next_->Parameters(context)); } CYExpression *CYTypedParameter::TypeSignature(CYContext &context, CYExpression *prefix) { $T(prefix) return next_->TypeSignature(context, $ CYAdd(prefix, type_->Replace(context))); } CYForInitializer *CYVar::Replace(CYContext &context) { if (CYExpression *expression = bindings_->Replace(context, CYIdentifierVariable)) return $E(expression); return $ CYEmpty(); } CYTarget *CYVariable::Replace(CYContext &context) { name_ = name_->Replace(context, CYIdentifierGlobal); return this; } CYFunctionParameter *CYVariable::Parameter() const { return $ CYFunctionParameter($ CYBinding(name_)); } CYStatement *CYWhile::Replace(CYContext &context) { context.Replace(test_); context.ReplaceAll(code_); return this; } CYStatement *CYWith::Replace(CYContext &context) { context.Replace(scope_); CYScope scope(true, context); scope.Damage(); context.ReplaceAll(code_); scope.Close(context); return this; } CYExpression *CYWord::PropertyName(CYContext &context) { return $S(this); }
29.865493
164
0.655026
bzxy
4fdab5cd94358d08eac7f8b041bf16d09042f0bd
5,993
cc
C++
paddle/fluid/framework/details/broadcast_op_handle.cc
SnailTowardThesun/Paddle
7a5f3f750bcbe084796f7840ae2937925432b413
[ "Apache-2.0" ]
null
null
null
paddle/fluid/framework/details/broadcast_op_handle.cc
SnailTowardThesun/Paddle
7a5f3f750bcbe084796f7840ae2937925432b413
[ "Apache-2.0" ]
null
null
null
paddle/fluid/framework/details/broadcast_op_handle.cc
SnailTowardThesun/Paddle
7a5f3f750bcbe084796f7840ae2937925432b413
[ "Apache-2.0" ]
null
null
null
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/details/broadcast_op_handle.h" #include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/variable_visitor.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { namespace details { void BroadcastOpHandle::RunImpl() { platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second); if (places_.size() == 1) return; // The input and output may have dummy vars. VarHandle *in_var_handle; { auto in_var_handles = DynamicCast<VarHandle>(inputs_); PADDLE_ENFORCE_EQ(in_var_handles.size(), 1, "The number of input should be one."); in_var_handle = in_var_handles[0]; } auto out_var_handles = DynamicCast<VarHandle>(outputs_); PADDLE_ENFORCE_EQ( out_var_handles.size(), places_.size(), "The number of output should equal to the number of places."); WaitInputVarGenerated(); std::vector<const Scope *> var_scopes; for (auto *s : local_scopes_) { var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get<Scope *>()); } auto *in_var = var_scopes.at(in_var_handle->scope_idx_)->FindVar(in_var_handle->name_); PADDLE_ENFORCE_NOT_NULL(in_var); Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); InitOutputValue(*in_var_handle, out_var_handles); if (platform::is_cpu_place(in_tensor.place())) { for (auto *out_var_handle : out_var_handles) { if (out_var_handle->IsTheSameVar(*in_var_handle)) { continue; } auto &out_p = out_var_handle->place_; auto *out_var = var_scopes.at(out_var_handle->scope_idx_) ->FindVar(out_var_handle->name_); RunAndRecordEvent(out_p, [in_tensor, out_var] { paddle::framework::TensorCopy( in_tensor, platform::CPUPlace(), &VariableVisitor::GetMutableTensor(out_var)); }); } } else { #ifdef PADDLE_WITH_CUDA VarHandle *out_handle = nullptr; int root_id = boost::get<platform::CUDAPlace>(in_tensor.place()).device; std::vector<std::function<void()>> broadcast_calls; int type = platform::ToNCCLDataType(in_tensor.type()); size_t numel = static_cast<size_t>(in_tensor.numel()); for (auto out_var_handle : out_var_handles) { Variable *out_var = var_scopes.at(out_var_handle->scope_idx_) ->FindVar(out_var_handle->name_); int dst_id = boost::get<platform::CUDAPlace>(out_var_handle->place_).device; auto &nccl_ctx = nccl_ctxs_->at(dst_id); void *send_recv_buffer = nullptr; if (root_id == dst_id) { send_recv_buffer = const_cast<void *>(in_tensor.data<void>()); out_handle = out_var_handle; } else { send_recv_buffer = VariableVisitor::GetMutableTensor(out_var) .Resize(in_tensor.dims()) .mutable_data(out_var_handle->place_); } broadcast_calls.emplace_back( [send_recv_buffer, numel, type, root_id, &nccl_ctx] { PADDLE_ENFORCE(platform::dynload::ncclBcast( send_recv_buffer, numel, static_cast<ncclDataType_t>(type), root_id, nccl_ctx.comm_, nccl_ctx.stream())); }); } this->RunAndRecordEvent([&] { { platform::NCCLGroupGuard guard; for (auto &call : broadcast_calls) { call(); } } if (!out_handle->IsTheSameVar(*in_var_handle)) { auto out_var = var_scopes.at(in_var_handle->scope_idx_) ->FindVar(out_var_handles[0]->name_); paddle::framework::TensorCopy( in_tensor, in_var_handle->place_, *(dev_ctxes_.at(in_var_handle->place_)), &VariableVisitor::GetMutableTensor(out_var)); } }); #else PADDLE_THROW("CUDA is not enabled."); #endif } } void BroadcastOpHandle::InitOutputValue( const VarHandle &in_var_handle, const std::vector<VarHandle *> &out_var_handles) const { std::vector<const Scope *> var_scopes; for (auto *s : local_scopes_) { var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get<Scope *>()); } auto *in_var = var_scopes.at(in_var_handle.scope_idx_)->FindVar(in_var_handle.name_); Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); // NOTE: The tensors' Place of input and output must be all on GPU or all on // CPU. for (auto *out_var_handle : out_var_handles) { if (out_var_handle->IsTheSameVar(in_var_handle)) { continue; } auto t_out_p = out_var_handle->place_; auto *out_var = var_scopes.at(out_var_handle->scope_idx_) ->FindVar(out_var_handle->name_); PADDLE_ENFORCE_NOT_NULL(out_var); if (is_gpu_place(in_tensor.place())) { PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), "Places of input and output must be all on GPU."); } else { t_out_p = platform::CPUPlace(); } VariableVisitor::ShareDimsAndLoD(*in_var, out_var); VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p, in_tensor.type()); } } std::string BroadcastOpHandle::Name() const { return "broadcast"; } } // namespace details } // namespace framework } // namespace paddle
35.252941
78
0.660604
SnailTowardThesun
4fdc6c1719a2d84e03001c2f4eb4ce38d1fc074b
4,189
cc
C++
src/mem/spm/governor/explicit_local_spm.cc
danned/gem5spm-riscv
4790fd1ec5972dae40c1871283121041296984e5
[ "BSD-3-Clause" ]
3
2019-03-26T14:51:39.000Z
2021-12-23T04:47:09.000Z
src/mem/spm/governor/explicit_local_spm.cc
danned/gem5spm-riscv
4790fd1ec5972dae40c1871283121041296984e5
[ "BSD-3-Clause" ]
null
null
null
src/mem/spm/governor/explicit_local_spm.cc
danned/gem5spm-riscv
4790fd1ec5972dae40c1871283121041296984e5
[ "BSD-3-Clause" ]
1
2019-04-01T03:22:57.000Z
2019-04-01T03:22:57.000Z
#include "mem/spm/governor/explicit_local_spm.hh" #include <iostream> ExplicitLocalSPM * ExplicitLocalSPMParams::create() { return new ExplicitLocalSPM(this); } ExplicitLocalSPM::ExplicitLocalSPM(const Params *p) : BaseGovernor(p) { gov_type = "ExplicitLocal"; } ExplicitLocalSPM::~ExplicitLocalSPM() { } void ExplicitLocalSPM::init() { } int ExplicitLocalSPM::allocate(GOVRequest *gov_request) { printRequestStatus(gov_request); const int total_num_pages = gov_request->getNumberOfPages(Unserved_Aligned); if (total_num_pages <= 0) { return 0; } int remaining_pages = total_num_pages; // just do this if we are not called by a child policy if (!gov_type.compare("ExplicitLocal") && hybrid_mem) { cache_invalidator_helper(gov_request); } // Allocate on local SPM PMMU *host_pmmu = gov_request->getPMMUPtr(); HostInfo host_info (gov_request->getThreadContext(), gov_request->getPMMUPtr(), host_pmmu, (Addr)gov_request->annotations->spm_addr, total_num_pages); host_info.setAllocMode(gov_request->getAnnotations()->alloc_mode); remaining_pages -= allocation_helper_on_free_pages(gov_request, &host_info); // just do this if we are not called by a child policy if (!gov_type.compare("ExplicitLocal") && uncacheable_spm) { add_mapping_unallocated_pages(gov_request); } assert (total_num_pages == remaining_pages); return total_num_pages - remaining_pages; } int ExplicitLocalSPM::deAllocate(GOVRequest *gov_request) { printRequestStatus(gov_request); int total_num_pages = gov_request->getNumberOfPages(Unserved_Aligned); if (total_num_pages <= 0) { return 0; } HostInfo host_info (gov_request->getThreadContext(), gov_request->getPMMUPtr(), nullptr, Addr(0), total_num_pages); host_info.setDeallocMode(gov_request->getAnnotations()->dealloc_mode); int num_removed_pages = dallocation_helper_virtual_address(gov_request, &host_info); return num_removed_pages; } int ExplicitLocalSPM::allocation_helper_on_free_pages(GOVRequest *gov_request, HostInfo *host_info) { PMMU *requester_pmmu = gov_request->getPMMUPtr(); int total_num_pages = gov_request->getNumberOfPages(Unserved_Aligned); int remaining_pages = total_num_pages; // since we are allocating explicitly, we must ensure that end_spm_addr // is not greater than max_spm_addr if ((host_info->getSPMaddress()/host_info->getHostPMMU()->getPageSizeBytes() + total_num_pages) <= host_info->getHostPMMU()->getSPMSizePages()) { int num_added_pages = requester_pmmu->addATTMappingsVAddress(gov_request, host_info); host_info->getHostPMMU()->setUsedPages(host_info->getSPMaddress(), num_added_pages, gov_request->getRequesterNodeID()); DPRINTF(GOV, "%s: Allocating %d/%d/%d free SPM slot(s) for node (%d,%d) on node (%d,%d) " "starting from slot address = %u\n", gov_type, num_added_pages, host_info->getNumPages(), total_num_pages, host_info->getUserPMMU()->getNodeID() / num_column, host_info->getUserPMMU()->getNodeID() % num_column, host_info->getHostPMMU()->getNodeID() / num_column, host_info->getHostPMMU()->getNodeID() % num_column, host_info->getSPMaddress()); gov_request->incPagesServed(host_info->getNumPages()); remaining_pages -= host_info->getNumPages(); } else { // not enough space on this SPM, allocation too large DPRINTF(GOV, "%s: Couldn't allocate %d SPM slot(s) for node (%d,%d) on node (%d,%d)\n", gov_type, remaining_pages, host_info->getUserPMMU()->getNodeID() / num_column, host_info->getUserPMMU()->getNodeID() % num_column, host_info->getHostPMMU()->getNodeID() / num_column, host_info->getHostPMMU()->getNodeID() % num_column); } return total_num_pages - remaining_pages; }
34.908333
97
0.664359
danned
4fdc781745bebc8001b93cc758ce45c155e059a4
693
hpp
C++
boost/boost/icl/type_traits/size.hpp
randolphwong/mcsema
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
[ "BSD-3-Clause" ]
12,278
2015-01-29T17:11:33.000Z
2022-03-31T21:12:00.000Z
boost/boost/icl/type_traits/size.hpp
randolphwong/mcsema
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
[ "BSD-3-Clause" ]
9,469
2015-01-30T05:33:07.000Z
2022-03-31T16:17:21.000Z
boost/boost/icl/type_traits/size.hpp
randolphwong/mcsema
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
[ "BSD-3-Clause" ]
892
2015-01-29T16:26:19.000Z
2022-03-20T07:44:30.000Z
/*-----------------------------------------------------------------------------+ Copyright (c) 2008-2009: Joachim Faulhaber +------------------------------------------------------------------------------+ Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENCE.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +-----------------------------------------------------------------------------*/ #ifndef BOOST_ICL_TYPE_TRAITS_SIZE_HPP_JOFA_080911 #define BOOST_ICL_TYPE_TRAITS_SIZE_HPP_JOFA_080911 namespace boost{ namespace icl { template <class Type> struct size{ typedef std::size_t type; }; }} // namespace boost icl #endif
36.473684
84
0.489177
randolphwong
4fdc7935166d58b7a66106112a6a2a1f4ae9af45
8,768
cc
C++
lite/kernels/host/generate_proposals_compute.cc
wanglei91/Paddle-Lite
8b2479f4cdd6970be507203d791bede5a453c09d
[ "Apache-2.0" ]
1,799
2019-08-19T03:29:38.000Z
2022-03-31T14:30:50.000Z
lite/kernels/host/generate_proposals_compute.cc
wanglei91/Paddle-Lite
8b2479f4cdd6970be507203d791bede5a453c09d
[ "Apache-2.0" ]
3,767
2019-08-19T03:36:04.000Z
2022-03-31T14:37:26.000Z
lite/kernels/host/generate_proposals_compute.cc
wanglei91/Paddle-Lite
8b2479f4cdd6970be507203d791bede5a453c09d
[ "Apache-2.0" ]
798
2019-08-19T02:28:23.000Z
2022-03-31T08:31:54.000Z
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/kernels/host/generate_proposals_compute.h" #include <string> #include <utility> #include <vector> #include "lite/backends/host/math/bbox_util.h" #include "lite/backends/host/math/gather.h" #include "lite/backends/host/math/nms_util.h" #include "lite/backends/host/math/transpose.h" #include "lite/core/op_registry.h" namespace paddle { namespace lite { namespace kernels { namespace host { std::pair<Tensor, Tensor> ProposalForOneImage( const Tensor &im_info_slice, const Tensor &anchors, const Tensor &variances, // H * W * A * 4 const Tensor &bbox_deltas_slice, // [A, 4] const Tensor &scores_slice, // [A, 1] int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta) { // sort scores_slice Tensor index_t; index_t.Resize(std::vector<int64_t>({scores_slice.numel()})); auto *index = index_t.mutable_data<int>(); for (int i = 0; i < index_t.numel(); i++) { index[i] = i; } auto *scores_data = scores_slice.data<float>(); auto compare_func = [scores_data](const int64_t &i, const int64_t &j) { return scores_data[i] > scores_data[j]; }; if (pre_nms_top_n <= 0 || pre_nms_top_n >= scores_slice.numel()) { std::stable_sort(index, index + scores_slice.numel(), compare_func); } else { std::nth_element(index, index + pre_nms_top_n, index + scores_slice.numel(), compare_func); index_t.Resize({pre_nms_top_n}); } Tensor scores_sel, bbox_sel, anchor_sel, var_sel; scores_sel.Resize(std::vector<int64_t>({index_t.numel(), 1})); bbox_sel.Resize(std::vector<int64_t>({index_t.numel(), 4})); anchor_sel.Resize(std::vector<int64_t>({index_t.numel(), 4})); var_sel.Resize(std::vector<int64_t>({index_t.numel(), 4})); lite::host::math::Gather<float>(scores_slice, index_t, &scores_sel); lite::host::math::Gather<float>(bbox_deltas_slice, index_t, &bbox_sel); lite::host::math::Gather<float>(anchors, index_t, &anchor_sel); lite::host::math::Gather<float>(variances, index_t, &var_sel); Tensor proposals; proposals.Resize(std::vector<int64_t>({index_t.numel(), 4})); lite::host::math::BoxCoder<float>( &anchor_sel, &bbox_sel, &var_sel, &proposals); lite::host::math::ClipTiledBoxes<float>( im_info_slice, proposals, &proposals, false); Tensor keep; lite::host::math::FilterBoxes<float>( &proposals, min_size, im_info_slice, true, &keep); Tensor scores_filter; scores_filter.Resize(std::vector<int64_t>({keep.numel(), 1})); bbox_sel.Resize(std::vector<int64_t>({keep.numel(), 4})); lite::host::math::Gather<float>(scores_sel, keep, &scores_filter); lite::host::math::Gather<float>(proposals, keep, &bbox_sel); if (nms_thresh <= 0) { return std::make_pair(bbox_sel, scores_filter); } Tensor keep_nms = lite::host::math::NMS<float>(&bbox_sel, &scores_filter, nms_thresh, eta); if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) { keep_nms.Resize(std::vector<int64_t>({post_nms_top_n})); } proposals.Resize(std::vector<int64_t>({keep_nms.numel(), 4})); scores_sel.Resize(std::vector<int64_t>({keep_nms.numel(), 1})); lite::host::math::Gather<float>(bbox_sel, keep_nms, &proposals); lite::host::math::Gather<float>(scores_filter, keep_nms, &scores_sel); return std::make_pair(proposals, scores_sel); } void GenerateProposalsCompute::Run() { auto &param = Param<param_t>(); auto *scores = param.Scores; // N * A * H * W auto *bbox_deltas = param.BboxDeltas; // N * 4A * H * W auto *im_info = param.ImInfo; // N * 3 auto *anchors = param.Anchors; // H * W * A * 4 auto *variances = param.Variances; // H * W * A * 4 auto *rpn_rois = param.RpnRois; // A * 4 auto *rpn_roi_probs = param.RpnRoiProbs; // A * 1 int pre_nms_top_n = param.pre_nms_topN; int post_nms_top_n = param.post_nms_topN; float nms_thresh = param.nms_thresh; float min_size = param.min_size; float eta = param.eta; auto &scores_dim = scores->dims(); int64_t num = scores_dim[0]; int64_t c_score = scores_dim[1]; int64_t h_score = scores_dim[2]; int64_t w_score = scores_dim[3]; auto &bbox_dim = bbox_deltas->dims(); int64_t c_bbox = bbox_dim[1]; int64_t h_bbox = bbox_dim[2]; int64_t w_bbox = bbox_dim[3]; rpn_rois->Resize({bbox_deltas->numel(), 4}); rpn_roi_probs->Resize(std::vector<int64_t>({scores->numel(), 1})); Tensor bbox_deltas_swap, scores_swap; scores_swap.Resize(std::vector<int64_t>({num, h_score, w_score, c_score})); bbox_deltas_swap.Resize(std::vector<int64_t>({num, h_bbox, w_bbox, c_bbox})); std::vector<int> orders({0, 2, 3, 1}); lite::host::math::Transpose<float>(*scores, &scores_swap, orders); lite::host::math::Transpose<float>(*bbox_deltas, &bbox_deltas_swap, orders); LoD lod; lod.resize(1); auto &lod0 = lod[0]; lod0.push_back(0); anchors->Resize(std::vector<int64_t>({anchors->numel() / 4, 4})); variances->Resize(std::vector<int64_t>({variances->numel() / 4, 4})); std::vector<int64_t> tmp_lod; std::vector<int64_t> tmp_num; int64_t num_proposals = 0; for (int64_t i = 0; i < num; ++i) { Tensor im_info_slice = im_info->Slice<float>(i, i + 1); Tensor bbox_deltas_slice = bbox_deltas_swap.Slice<float>(i, i + 1); Tensor scores_slice = scores_swap.Slice<float>(i, i + 1); bbox_deltas_slice.Resize( std::vector<int64_t>({c_bbox * h_bbox * w_bbox / 4, 4})); scores_slice.Resize(std::vector<int64_t>({c_score * h_score * w_score, 1})); std::pair<Tensor, Tensor> tensor_pair = ProposalForOneImage(im_info_slice, *anchors, *variances, bbox_deltas_slice, scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta); Tensor &proposals = tensor_pair.first; Tensor &scores = tensor_pair.second; lite::host::math::AppendTensor<float>( rpn_rois, 4 * num_proposals, proposals); lite::host::math::AppendTensor<float>(rpn_roi_probs, num_proposals, scores); num_proposals += proposals.dims()[0]; lod0.push_back(num_proposals); tmp_lod.push_back(num_proposals); tmp_num.push_back(proposals.dims()[0]); } if (param.RpnRoisLod != nullptr) { param.RpnRoisLod->Resize(DDim(std::vector<DDim::value_type>({num}))); int64_t *lod_data = param.RpnRoisLod->mutable_data<int64_t>(); for (int i = 0; i < num; i++) { lod_data[i] = tmp_lod[i]; } } if (param.RpnRoisNum != nullptr) { param.RpnRoisNum->Resize(DDim(std::vector<DDim::value_type>({num}))); int64_t *num_data = param.RpnRoisNum->mutable_data<int64_t>(); for (int i = 0; i < num; i++) { num_data[i] = tmp_num[i]; } } rpn_rois->set_lod(lod); rpn_roi_probs->set_lod(lod); rpn_rois->Resize({num_proposals, 4}); rpn_roi_probs->Resize({num_proposals, 1}); } } // namespace host } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(generate_proposals, kHost, kFloat, kNCHW, paddle::lite::kernels::host::GenerateProposalsCompute, def) .BindInput("Scores", {LiteType::GetTensorTy(TARGET(kHost))}) .BindInput("BboxDeltas", {LiteType::GetTensorTy(TARGET(kHost))}) .BindInput("ImInfo", {LiteType::GetTensorTy(TARGET(kHost))}) .BindInput("Anchors", {LiteType::GetTensorTy(TARGET(kHost))}) .BindInput("Variances", {LiteType::GetTensorTy(TARGET(kHost))}) .BindOutput("RpnRois", {LiteType::GetTensorTy(TARGET(kHost))}) .BindOutput("RpnRoiProbs", {LiteType::GetTensorTy(TARGET(kHost))}) .BindOutput("RpnRoisLod", {LiteType::GetTensorTy(TARGET(kHost), PRECISION(kInt64))}) .BindOutput("RpnRoisNum", {LiteType::GetTensorTy(TARGET(kHost), PRECISION(kInt64))}) .Finalize();
38.625551
80
0.649293
wanglei91
4fdcc9f7ff1c8b0928366d3a70940ed2cf2e80a4
30,056
cpp
C++
Source/core/svg/SVGUseElement.cpp
scheib/blink
d18c0cc5b4e96ea87c556bfa57955538de498a9f
[ "BSD-3-Clause" ]
1
2017-08-25T05:15:52.000Z
2017-08-25T05:15:52.000Z
Source/core/svg/SVGUseElement.cpp
scheib/blink
d18c0cc5b4e96ea87c556bfa57955538de498a9f
[ "BSD-3-Clause" ]
null
null
null
Source/core/svg/SVGUseElement.cpp
scheib/blink
d18c0cc5b4e96ea87c556bfa57955538de498a9f
[ "BSD-3-Clause" ]
null
null
null
/* * Copyright (C) 2004, 2005, 2006, 2007, 2008 Nikolas Zimmermann <zimmermann@kde.org> * Copyright (C) 2004, 2005, 2006, 2007 Rob Buis <buis@kde.org> * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. * Copyright (C) 2011 Torch Mobile (Beijing) Co. Ltd. All rights reserved. * Copyright (C) 2012 University of Szeged * Copyright (C) 2012 Renata Hodovan <reni@webkit.org> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "config.h" #include "core/svg/SVGUseElement.h" #include "bindings/core/v8/ExceptionStatePlaceholder.h" #include "core/XLinkNames.h" #include "core/dom/Document.h" #include "core/dom/ElementTraversal.h" #include "core/events/Event.h" #include "core/dom/shadow/ElementShadow.h" #include "core/dom/shadow/ShadowRoot.h" #include "core/fetch/FetchRequest.h" #include "core/fetch/ResourceFetcher.h" #include "core/rendering/svg/RenderSVGTransformableContainer.h" #include "core/svg/SVGGElement.h" #include "core/svg/SVGLengthContext.h" #include "core/svg/SVGSVGElement.h" #include "core/xml/parser/XMLDocumentParser.h" namespace blink { inline SVGUseElement::SVGUseElement(Document& document) : SVGGraphicsElement(SVGNames::useTag, document) , m_x(SVGAnimatedLength::create(this, SVGNames::xAttr, SVGLength::create(LengthModeWidth), AllowNegativeLengths)) , m_y(SVGAnimatedLength::create(this, SVGNames::yAttr, SVGLength::create(LengthModeHeight), AllowNegativeLengths)) , m_width(SVGAnimatedLength::create(this, SVGNames::widthAttr, SVGLength::create(LengthModeWidth), ForbidNegativeLengths)) , m_height(SVGAnimatedLength::create(this, SVGNames::heightAttr, SVGLength::create(LengthModeHeight), ForbidNegativeLengths)) , m_haveFiredLoadEvent(false) , m_needsShadowTreeRecreation(false) , m_svgLoadEventTimer(this, &SVGElement::svgLoadEventTimerFired) { SVGURIReference::initialize(this); ASSERT(hasCustomStyleCallbacks()); addToPropertyMap(m_x); addToPropertyMap(m_y); addToPropertyMap(m_width); addToPropertyMap(m_height); } PassRefPtrWillBeRawPtr<SVGUseElement> SVGUseElement::create(Document& document) { // Always build a user agent #shadow-root for SVGUseElement. RefPtrWillBeRawPtr<SVGUseElement> use = adoptRefWillBeNoop(new SVGUseElement(document)); use->ensureUserAgentShadowRoot(); return use.release(); } SVGUseElement::~SVGUseElement() { setDocumentResource(0); #if !ENABLE(OILPAN) clearResourceReferences(); #endif } void SVGUseElement::trace(Visitor* visitor) { visitor->trace(m_x); visitor->trace(m_y); visitor->trace(m_width); visitor->trace(m_height); visitor->trace(m_targetElementInstance); SVGGraphicsElement::trace(visitor); SVGURIReference::trace(visitor); } bool SVGUseElement::isSupportedAttribute(const QualifiedName& attrName) { DEFINE_STATIC_LOCAL(HashSet<QualifiedName>, supportedAttributes, ()); if (supportedAttributes.isEmpty()) { SVGURIReference::addSupportedAttributes(supportedAttributes); supportedAttributes.add(SVGNames::xAttr); supportedAttributes.add(SVGNames::yAttr); supportedAttributes.add(SVGNames::widthAttr); supportedAttributes.add(SVGNames::heightAttr); } return supportedAttributes.contains<SVGAttributeHashTranslator>(attrName); } void SVGUseElement::parseAttribute(const QualifiedName& name, const AtomicString& value) { parseAttributeNew(name, value); } #if ENABLE(ASSERT) static inline bool isWellFormedDocument(Document* document) { if (document->isXMLDocument()) return static_cast<XMLDocumentParser*>(document->parser())->wellFormed(); return true; } #endif Node::InsertionNotificationRequest SVGUseElement::insertedInto(ContainerNode* rootParent) { // This functions exists to assure assumptions made in the code regarding SVGElementInstance creation/destruction are satisfied. SVGGraphicsElement::insertedInto(rootParent); if (!rootParent->inDocument()) return InsertionDone; ASSERT(!m_targetElementInstance || !isWellFormedDocument(&document())); ASSERT(!hasPendingResources() || !isWellFormedDocument(&document())); invalidateShadowTree(); if (!isStructurallyExternal()) sendSVGLoadEventIfPossibleAsynchronously(); return InsertionDone; } void SVGUseElement::removedFrom(ContainerNode* rootParent) { SVGGraphicsElement::removedFrom(rootParent); if (rootParent->inDocument()) clearResourceReferences(); } TreeScope* SVGUseElement::referencedScope() const { if (!isExternalURIReference(hrefString(), document())) return &treeScope(); return externalDocument(); } Document* SVGUseElement::externalDocument() const { if (m_resource && m_resource->isLoaded()) { // Gracefully handle error condition. if (m_resource->errorOccurred()) return 0; ASSERT(m_resource->document()); return m_resource->document(); } return 0; } void transferUseWidthAndHeightIfNeeded(const SVGUseElement& use, SVGElement* shadowElement, const SVGElement& originalElement) { DEFINE_STATIC_LOCAL(const AtomicString, hundredPercentString, ("100%", AtomicString::ConstructFromLiteral)); ASSERT(shadowElement); if (isSVGSymbolElement(*shadowElement)) { // Spec (<use> on <symbol>): This generated 'svg' will always have explicit values for attributes width and height. // If attributes width and/or height are provided on the 'use' element, then these attributes // will be transferred to the generated 'svg'. If attributes width and/or height are not specified, // the generated 'svg' element will use values of 100% for these attributes. shadowElement->setAttribute(SVGNames::widthAttr, use.width()->isSpecified() ? AtomicString(use.width()->currentValue()->valueAsString()) : hundredPercentString); shadowElement->setAttribute(SVGNames::heightAttr, use.height()->isSpecified() ? AtomicString(use.height()->currentValue()->valueAsString()) : hundredPercentString); } else if (isSVGSVGElement(*shadowElement)) { // Spec (<use> on <svg>): If attributes width and/or height are provided on the 'use' element, then these // values will override the corresponding attributes on the 'svg' in the generated tree. if (use.width()->isSpecified()) shadowElement->setAttribute(SVGNames::widthAttr, AtomicString(use.width()->currentValue()->valueAsString())); else shadowElement->setAttribute(SVGNames::widthAttr, originalElement.getAttribute(SVGNames::widthAttr)); if (use.height()->isSpecified()) shadowElement->setAttribute(SVGNames::heightAttr, AtomicString(use.height()->currentValue()->valueAsString())); else shadowElement->setAttribute(SVGNames::heightAttr, originalElement.getAttribute(SVGNames::heightAttr)); } } void SVGUseElement::svgAttributeChanged(const QualifiedName& attrName) { if (!isSupportedAttribute(attrName)) { SVGGraphicsElement::svgAttributeChanged(attrName); return; } SVGElement::InvalidationGuard invalidationGuard(this); RenderObject* renderer = this->renderer(); if (attrName == SVGNames::xAttr || attrName == SVGNames::yAttr || attrName == SVGNames::widthAttr || attrName == SVGNames::heightAttr) { updateRelativeLengthsInformation(); if (m_targetElementInstance) { ASSERT(m_targetElementInstance->correspondingElement()); transferUseWidthAndHeightIfNeeded(*this, m_targetElementInstance.get(), *m_targetElementInstance->correspondingElement()); } if (renderer) markForLayoutAndParentResourceInvalidation(renderer); return; } if (SVGURIReference::isKnownAttribute(attrName)) { bool isExternalReference = isExternalURIReference(hrefString(), document()); if (isExternalReference) { KURL url = document().completeURL(hrefString()); if (url.hasFragmentIdentifier()) { FetchRequest request(ResourceRequest(url), localName()); setDocumentResource(document().fetcher()->fetchSVGDocument(request)); } } else { setDocumentResource(0); } invalidateShadowTree(); return; } if (!renderer) return; ASSERT_NOT_REACHED(); } static bool isDisallowedElement(Node* node) { // Spec: "Any 'svg', 'symbol', 'g', graphics element or other 'use' is potentially a template object that can be re-used // (i.e., "instanced") in the SVG document via a 'use' element." // "Graphics Element" is defined as 'circle', 'ellipse', 'image', 'line', 'path', 'polygon', 'polyline', 'rect', 'text' // Excluded are anything that is used by reference or that only make sense to appear once in a document. // We must also allow the shadow roots of other use elements. if (node->isShadowRoot() || node->isTextNode()) return false; if (!node->isSVGElement()) return true; Element* element = toElement(node); DEFINE_STATIC_LOCAL(HashSet<QualifiedName>, allowedElementTags, ()); if (allowedElementTags.isEmpty()) { allowedElementTags.add(SVGNames::aTag); allowedElementTags.add(SVGNames::circleTag); allowedElementTags.add(SVGNames::descTag); allowedElementTags.add(SVGNames::ellipseTag); allowedElementTags.add(SVGNames::gTag); allowedElementTags.add(SVGNames::imageTag); allowedElementTags.add(SVGNames::lineTag); allowedElementTags.add(SVGNames::metadataTag); allowedElementTags.add(SVGNames::pathTag); allowedElementTags.add(SVGNames::polygonTag); allowedElementTags.add(SVGNames::polylineTag); allowedElementTags.add(SVGNames::rectTag); allowedElementTags.add(SVGNames::svgTag); allowedElementTags.add(SVGNames::switchTag); allowedElementTags.add(SVGNames::symbolTag); allowedElementTags.add(SVGNames::textTag); allowedElementTags.add(SVGNames::textPathTag); allowedElementTags.add(SVGNames::titleTag); allowedElementTags.add(SVGNames::tspanTag); allowedElementTags.add(SVGNames::useTag); } return !allowedElementTags.contains<SVGAttributeHashTranslator>(element->tagQName()); } static bool subtreeContainsDisallowedElement(Node* start) { if (isDisallowedElement(start)) return true; for (Node* cur = start->firstChild(); cur; cur = cur->nextSibling()) { if (subtreeContainsDisallowedElement(cur)) return true; } return false; } void SVGUseElement::scheduleShadowTreeRecreation() { if (!referencedScope() || inUseShadowTree()) return; m_needsShadowTreeRecreation = true; document().scheduleUseShadowTreeUpdate(*this); } void SVGUseElement::clearResourceReferences() { if (m_targetElementInstance) m_targetElementInstance = nullptr; // FIXME: We should try to optimize this, to at least allow partial reclones. if (ShadowRoot* shadowTreeRootElement = userAgentShadowRoot()) shadowTreeRootElement->removeChildren(OmitSubtreeModifiedEvent); m_needsShadowTreeRecreation = false; document().unscheduleUseShadowTreeUpdate(*this); removeAllOutgoingReferences(); } void SVGUseElement::buildPendingResource() { if (!referencedScope() || inUseShadowTree()) return; clearResourceReferences(); if (!inDocument()) return; AtomicString id; Element* target = SVGURIReference::targetElementFromIRIString(hrefString(), treeScope(), &id, externalDocument()); if (!target || !target->inDocument()) { // If we can't find the target of an external element, just give up. // We can't observe if the target somewhen enters the external document, nor should we do it. if (externalDocument()) return; if (id.isEmpty()) return; referencedScope()->document().accessSVGExtensions().addPendingResource(id, this); ASSERT(hasPendingResources()); return; } if (target->isSVGElement()) { buildShadowAndInstanceTree(toSVGElement(target)); invalidateDependentShadowTrees(); } ASSERT(!m_needsShadowTreeRecreation); } static PassRefPtrWillBeRawPtr<Node> cloneNodeAndAssociate(Node& toClone) { RefPtrWillBeRawPtr<Node> clone = toClone.cloneNode(false); if (!clone->isSVGElement()) return clone.release(); SVGElement& svgElement = toSVGElement(toClone); ASSERT(!svgElement.correspondingElement()); toSVGElement(clone.get())->setCorrespondingElement(&svgElement); if (EventTargetData* data = toClone.eventTargetData()) data->eventListenerMap.copyEventListenersNotCreatedFromMarkupToTarget(clone.get()); TrackExceptionState exceptionState; for (Node* node = toClone.firstChild(); node && !exceptionState.hadException(); node = node->nextSibling()) clone->appendChild(cloneNodeAndAssociate(*node), exceptionState); return clone.release(); } void SVGUseElement::buildShadowAndInstanceTree(SVGElement* target) { ASSERT(!m_targetElementInstance); // <use> creates a "user agent" shadow root. Do not build the shadow/instance tree for <use> // elements living in a user agent shadow tree because they will get expanded in a second // pass -- see expandUseElementsInShadowTree(). if (inUseShadowTree()) return; // Do not allow self-referencing. // 'target' may be null, if it's a non SVG namespaced element. if (!target || target == this) return; // Set up root SVG element in shadow tree. RefPtrWillBeRawPtr<Element> newChild = target->cloneElementWithoutChildren(); m_targetElementInstance = toSVGElement(newChild.get()); ShadowRoot* shadowTreeRootElement = userAgentShadowRoot(); shadowTreeRootElement->appendChild(newChild.release()); // Clone the target subtree into the shadow tree, not handling <use> and <symbol> yet. // SVG specification does not say a word about <use> & cycles. My view on this is: just ignore it! // Non-appearing <use> content is easier to debug, then half-appearing content. if (!buildShadowTree(target, m_targetElementInstance.get(), false)) { clearResourceReferences(); return; } if (instanceTreeIsLoading(m_targetElementInstance.get())) return; // Assure shadow tree building was successfull ASSERT(m_targetElementInstance); ASSERT(m_targetElementInstance->correspondingUseElement() == this); ASSERT(m_targetElementInstance->correspondingElement() == target); // Expand all <use> elements in the shadow tree. // Expand means: replace the actual <use> element by what it references. if (!expandUseElementsInShadowTree(m_targetElementInstance.get())) { clearResourceReferences(); return; } // Expand all <symbol> elements in the shadow tree. // Expand means: replace the actual <symbol> element by the <svg> element. expandSymbolElementsInShadowTree(toSVGElement(shadowTreeRootElement->firstChild())); m_targetElementInstance = toSVGElement(shadowTreeRootElement->firstChild()); transferUseWidthAndHeightIfNeeded(*this, m_targetElementInstance.get(), *m_targetElementInstance->correspondingElement()); ASSERT(m_targetElementInstance->parentNode() == shadowTreeRootElement); // Update relative length information. updateRelativeLengthsInformation(); } RenderObject* SVGUseElement::createRenderer(RenderStyle*) { return new RenderSVGTransformableContainer(this); } static bool isDirectReference(const SVGElement& element) { return isSVGPathElement(element) || isSVGRectElement(element) || isSVGCircleElement(element) || isSVGEllipseElement(element) || isSVGPolygonElement(element) || isSVGPolylineElement(element) || isSVGTextElement(element); } void SVGUseElement::toClipPath(Path& path) { ASSERT(path.isEmpty()); Node* n = userAgentShadowRoot()->firstChild(); if (!n || !n->isSVGElement()) return; SVGElement& element = toSVGElement(*n); if (element.isSVGGraphicsElement()) { if (!isDirectReference(element)) { // Spec: Indirect references are an error (14.3.5) document().accessSVGExtensions().reportError("Not allowed to use indirect reference in <clip-path>"); } else { toSVGGraphicsElement(element).toClipPath(path); // FIXME: Avoid manual resolution of x/y here. Its potentially harmful. SVGLengthContext lengthContext(this); path.translate(FloatSize(m_x->currentValue()->value(lengthContext), m_y->currentValue()->value(lengthContext))); path.transform(calculateAnimatedLocalTransform()); } } } RenderObject* SVGUseElement::rendererClipChild() const { if (Node* n = userAgentShadowRoot()->firstChild()) { if (n->isSVGElement() && isDirectReference(toSVGElement(*n))) return n->renderer(); } return 0; } bool SVGUseElement::buildShadowTree(SVGElement* target, SVGElement* targetInstance, bool foundUse) { ASSERT(target); ASSERT(targetInstance); // Spec: If the referenced object is itself a 'use', or if there are 'use' subelements within the referenced // object, the instance tree will contain recursive expansion of the indirect references to form a complete tree. if (isSVGUseElement(*target)) { // We only need to track first degree <use> dependencies. Indirect references are handled // as the invalidation bubbles up the dependency chain. if (!foundUse && !isStructurallyExternal()) { addReferenceTo(target); foundUse = true; } } else if (isDisallowedElement(target)) { return false; } targetInstance->setCorrespondingElement(target); if (EventTargetData* data = target->eventTargetData()) data->eventListenerMap.copyEventListenersNotCreatedFromMarkupToTarget(targetInstance); for (Node* child = target->firstChild(); child; child = child->nextSibling()) { // Skip any disallowed element. if (isDisallowedElement(child)) continue; RefPtrWillBeRawPtr<Node> newChild = child->cloneNode(false); targetInstance->appendChild(newChild.get()); if (newChild->isSVGElement()) { // Enter recursion, appending new instance tree nodes to the "instance" object. if (!buildShadowTree(toSVGElement(child), toSVGElement(newChild), foundUse)) return false; } } return true; } bool SVGUseElement::hasCycleUseReferencing(SVGUseElement* use, ContainerNode* targetInstance, SVGElement*& newTarget) { ASSERT(referencedScope()); Element* targetElement = SVGURIReference::targetElementFromIRIString(use->hrefString(), *referencedScope()); newTarget = 0; if (targetElement && targetElement->isSVGElement()) newTarget = toSVGElement(targetElement); if (!newTarget) return false; // Shortcut for self-references if (newTarget == this) return true; AtomicString targetId = newTarget->getIdAttribute(); ContainerNode* instance = targetInstance->parentNode(); while (instance && instance->isSVGElement()) { SVGElement* element = toSVGElement(instance); if (element->hasID() && element->getIdAttribute() == targetId && element->document() == newTarget->document()) return true; instance = instance->parentNode(); } return false; } static inline void removeDisallowedElementsFromSubtree(Element& subtree) { ASSERT(!subtree.inDocument()); Element* element = ElementTraversal::firstWithin(subtree); while (element) { if (isDisallowedElement(element)) { Element* next = ElementTraversal::nextSkippingChildren(*element, &subtree); // The subtree is not in document so this won't generate events that could mutate the tree. element->parentNode()->removeChild(element); element = next; } else { element = ElementTraversal::next(*element, &subtree); } } } bool SVGUseElement::expandUseElementsInShadowTree(SVGElement* element) { ASSERT(element); // Why expand the <use> elements in the shadow tree here, and not just // do this directly in buildShadowTree, if we encounter a <use> element? // // Short answer: Because we may miss to expand some elements. For example, if a <symbol> // contains <use> tags, we'd miss them. So once we're done with setting up the // actual shadow tree (after the special case modification for svg/symbol) we have // to walk it completely and expand all <use> elements. if (isSVGUseElement(*element)) { SVGUseElement* use = toSVGUseElement(element); ASSERT(!use->resourceIsStillLoading()); SVGElement* target = 0; if (hasCycleUseReferencing(toSVGUseElement(use->correspondingElement()), use, target)) return false; if (target && isDisallowedElement(target)) return false; // Don't ASSERT(target) here, it may be "pending", too. // Setup sub-shadow tree root node RefPtrWillBeRawPtr<SVGGElement> cloneParent = SVGGElement::create(referencedScope()->document()); cloneParent->setCorrespondingElement(use->correspondingElement()); // Move already cloned elements to the new <g> element for (Node* child = use->firstChild(); child; ) { Node* nextChild = child->nextSibling(); cloneParent->appendChild(child); child = nextChild; } // Spec: In the generated content, the 'use' will be replaced by 'g', where all attributes from the // 'use' element except for x, y, width, height and xlink:href are transferred to the generated 'g' element. transferUseAttributesToReplacedElement(use, cloneParent.get()); if (target) { RefPtrWillBeRawPtr<Node> newChild = cloneNodeAndAssociate(*target); ASSERT(newChild->isSVGElement()); transferUseWidthAndHeightIfNeeded(*use, toSVGElement(newChild.get()), *target); cloneParent->appendChild(newChild.release()); } // We don't walk the target tree element-by-element, and clone each element, // but instead use cloneElementWithChildren(). This is an optimization for the common // case where <use> doesn't contain disallowed elements (ie. <foreignObject>). // Though if there are disallowed elements in the subtree, we have to remove them. // For instance: <use> on <g> containing <foreignObject> (indirect case). if (subtreeContainsDisallowedElement(cloneParent.get())) removeDisallowedElementsFromSubtree(*cloneParent); RefPtrWillBeRawPtr<SVGElement> replacingElement(cloneParent.get()); // Replace <use> with referenced content. ASSERT(use->parentNode()); use->parentNode()->replaceChild(cloneParent.release(), use); // Expand the siblings because the *element* is replaced and we will // lose the sibling chain when we are back from recursion. element = replacingElement.get(); for (RefPtrWillBeRawPtr<SVGElement> sibling = Traversal<SVGElement>::nextSibling(*element); sibling; sibling = Traversal<SVGElement>::nextSibling(*sibling)) { if (!expandUseElementsInShadowTree(sibling.get())) return false; } } for (RefPtrWillBeRawPtr<SVGElement> child = Traversal<SVGElement>::firstChild(*element); child; child = Traversal<SVGElement>::nextSibling(*child)) { if (!expandUseElementsInShadowTree(child.get())) return false; } return true; } void SVGUseElement::expandSymbolElementsInShadowTree(SVGElement* element) { ASSERT(element); if (isSVGSymbolElement(*element)) { // Spec: The referenced 'symbol' and its contents are deep-cloned into the generated tree, // with the exception that the 'symbol' is replaced by an 'svg'. This generated 'svg' will // always have explicit values for attributes width and height. If attributes width and/or // height are provided on the 'use' element, then these attributes will be transferred to // the generated 'svg'. If attributes width and/or height are not specified, the generated // 'svg' element will use values of 100% for these attributes. ASSERT(referencedScope()); RefPtrWillBeRawPtr<SVGSVGElement> svgElement = SVGSVGElement::create(referencedScope()->document()); // Transfer all data (attributes, etc.) from <symbol> to the new <svg> element. svgElement->cloneDataFromElement(*element); svgElement->setCorrespondingElement(element->correspondingElement()); // Move already cloned elements to the new <svg> element for (Node* child = element->firstChild(); child; ) { Node* nextChild = child->nextSibling(); svgElement->appendChild(child); child = nextChild; } // We don't walk the target tree element-by-element, and clone each element, // but instead use cloneNode(deep=true). This is an optimization for the common // case where <use> doesn't contain disallowed elements (ie. <foreignObject>). // Though if there are disallowed elements in the subtree, we have to remove them. // For instance: <use> on <g> containing <foreignObject> (indirect case). if (subtreeContainsDisallowedElement(svgElement.get())) removeDisallowedElementsFromSubtree(*svgElement); RefPtrWillBeRawPtr<SVGElement> replacingElement(svgElement.get()); // Replace <symbol> with <svg>. ASSERT(element->parentNode()); element->parentNode()->replaceChild(svgElement.release(), element); // Expand the siblings because the *element* is replaced and we will // lose the sibling chain when we are back from recursion. element = replacingElement.get(); } for (RefPtrWillBeRawPtr<SVGElement> child = Traversal<SVGElement>::firstChild(*element); child; child = Traversal<SVGElement>::nextSibling(*child)) expandSymbolElementsInShadowTree(child.get()); } void SVGUseElement::invalidateShadowTree() { if (!inActiveDocument() || m_needsShadowTreeRecreation) return; scheduleShadowTreeRecreation(); invalidateDependentShadowTrees(); } void SVGUseElement::invalidateDependentShadowTrees() { // Recursively invalidate dependent <use> shadow trees const WillBeHeapHashSet<RawPtrWillBeWeakMember<SVGElement> >& instances = instancesForElement(); for (SVGElement* instance : instances) { if (SVGUseElement* element = instance->correspondingUseElement()) { ASSERT(element->inDocument()); element->invalidateShadowTree(); } } } void SVGUseElement::transferUseAttributesToReplacedElement(SVGElement* from, SVGElement* to) const { ASSERT(from); ASSERT(to); to->cloneDataFromElement(*from); to->removeAttribute(SVGNames::xAttr); to->removeAttribute(SVGNames::yAttr); to->removeAttribute(SVGNames::widthAttr); to->removeAttribute(SVGNames::heightAttr); to->removeAttribute(XLinkNames::hrefAttr); } bool SVGUseElement::selfHasRelativeLengths() const { if (m_x->currentValue()->isRelative() || m_y->currentValue()->isRelative() || m_width->currentValue()->isRelative() || m_height->currentValue()->isRelative()) return true; if (!m_targetElementInstance) return false; return m_targetElementInstance->hasRelativeLengths(); } void SVGUseElement::notifyFinished(Resource* resource) { if (!inDocument()) return; invalidateShadowTree(); if (resource->errorOccurred()) dispatchEvent(Event::create(EventTypeNames::error)); else if (!resource->wasCanceled()) { if (m_haveFiredLoadEvent) return; if (!isStructurallyExternal()) return; ASSERT(!m_haveFiredLoadEvent); m_haveFiredLoadEvent = true; sendSVGLoadEventIfPossibleAsynchronously(); } } bool SVGUseElement::resourceIsStillLoading() { if (m_resource && m_resource->isLoading()) return true; return false; } bool SVGUseElement::instanceTreeIsLoading(SVGElement* targetInstance) { for (SVGElement* element = Traversal<SVGElement>::firstChild(*targetInstance); element; element = Traversal<SVGElement>::nextSibling(*element)) { if (SVGUseElement* use = element->correspondingUseElement()) { if (use->resourceIsStillLoading()) return true; } if (element->hasChildren() && instanceTreeIsLoading(element)) return true; } return false; } void SVGUseElement::setDocumentResource(ResourcePtr<DocumentResource> resource) { if (m_resource == resource) return; if (m_resource) m_resource->removeClient(this); m_resource = resource; if (m_resource) m_resource->addClient(this); } }
39.288889
172
0.694504
scheib
4fdee9a5c071edd7361349f9787638d4247466b2
10,690
cpp
C++
source/common/widgets/StackedContainer.cpp
varunamachi/quartz
29b0cf7fb981ec95db894259e32af233f64fa616
[ "MIT" ]
6
2018-01-07T18:11:27.000Z
2022-03-25T03:32:45.000Z
source/common/widgets/StackedContainer.cpp
varunamachi/quartz
29b0cf7fb981ec95db894259e32af233f64fa616
[ "MIT" ]
8
2019-02-28T02:25:53.000Z
2019-02-28T15:47:18.000Z
source/common/widgets/StackedContainer.cpp
varunamachi/quartz
29b0cf7fb981ec95db894259e32af233f64fa616
[ "MIT" ]
4
2016-05-28T16:31:06.000Z
2019-09-25T07:13:45.000Z
#include <QVariant> #include <QMouseEvent> #include <QStackedWidget> #include <QHBoxLayout> #include <QVBoxLayout> #include <QPushButton> #include <common/iconstore/IconFontStore.h> #include "QzScroller.h" #include "IdButton.h" #include "StackedContainer.h" namespace Quartz { struct Item { using Ptr = std::shared_ptr<Item>; IdButton* m_btn; QWidget* m_widget; int m_index; inline Item(int index, IdButton* btn, QWidget* widget) : m_index(index) , m_btn(btn) , m_widget(widget) { } static inline Item::Ptr create(int index, IdButton* btn, QWidget* widget) { return std::make_shared<Item>(index, btn, widget); } }; struct AbstractContainer::Data { Data(int selectorDimention, int buttonDimention, Position selectorPosition, Qt::Orientation orientation, QzScroller* scroller, QStackedWidget* stackedWidget) : m_btnHeight(selectorDimention) , m_btnWidth(buttonDimention) , m_selectorPosition(selectorPosition) , m_orientation(orientation) , m_selector(scroller) , m_stackWidget(stackedWidget) , m_autoSelPolicy(AutoSelectionPolicy::SelectFirstAdded) , m_selectedId("") { } int m_btnHeight; int m_btnWidth; Position m_selectorPosition; Qt::Orientation m_orientation; QzScroller* m_selector; QStackedWidget* m_stackWidget; AutoSelectionPolicy m_autoSelPolicy; QString m_selectedId; QHash<QString, Item::Ptr> m_items; }; AbstractContainer::AbstractContainer(int selectorDimention, int buttonDimention, Position selectorPosition, Qt::Orientation orientation, QWidget* parent) : QWidget(parent) , m_data(new Data{selectorDimention, buttonDimention, selectorPosition, orientation, new QzScroller(orientation, selectorDimention, selectorDimention, this), new QStackedWidget(this)}) { m_data->m_btnWidth = buttonDimention; m_data->m_btnHeight = selectorDimention; if (orientation == Qt::Horizontal) { m_data->m_selector->setMaximumHeight(selectorDimention); } else { m_data->m_selector->setMaximumWidth(selectorDimention); } m_data->m_selector->setContentsMargins(QMargins()); m_data->m_stackWidget->setContentsMargins(QMargins()); this->setContentsMargins(QMargins()); } AbstractContainer::~AbstractContainer() { } QWidget* AbstractContainer::widget(const QString& id) const { QWidget* widget = nullptr; auto item = m_data->m_items.value(id); if (item) { widget = item->m_widget; } return widget; } QWidget* AbstractContainer::selectedWidget() const { QWidget* selected = nullptr; auto item = m_data->m_items.value(m_data->m_selectedId); if (item != nullptr) { selected = item->m_widget; } return selected; } QString AbstractContainer::currentId() const { return m_data->m_selectedId; } QList<QString> AbstractContainer::allIds() const { return m_data->m_items.keys(); } void AbstractContainer::addWidget(const QString& id, const QString& displayName, QWidget* widget) { this->addWidget(id, displayName, QIcon{}, QIcon{}, widget); } void AbstractContainer::addWidget(const QString& id, const QString& displayName, const QIcon& icon, const QIcon& activeIcon, QWidget* widget) { if (widget != nullptr) { IdButton* btn = nullptr; if (icon.isNull()) { btn = new IdButton( id, displayName, m_data->m_btnHeight, m_data->m_btnWidth, this); } else { auto btmTxt = this->containerOrientation() == Qt::Vertical; btn = new IdButton(id, displayName, m_data->m_btnHeight, m_data->m_btnWidth, icon, activeIcon, btmTxt, this); } btn->setContentsMargins({}); widget->setContentsMargins({}); auto index = m_data->m_stackWidget->addWidget(widget); auto item = Item::create(index, btn, widget); m_data->m_items.insert(id, item); m_data->m_selector->addWidget(btn); m_data->m_stackWidget->addWidget(widget); connect(btn, SIGNAL(activated(QString)), this, SLOT(select(QString))); widget->setProperty("item_id", id); if (m_data->m_autoSelPolicy == AutoSelectionPolicy::SelectFirstAdded) { if (m_data->m_selectedId.isEmpty()) { this->select(id); } } else if (m_data->m_autoSelPolicy == AutoSelectionPolicy::SelectLastAdded) { this->select(id); } else { m_data->m_stackWidget->setVisible(false); } emit sigAdded(id, widget); } } void AbstractContainer::removeWidget(const QString& id) { auto item = m_data->m_items.value(id); if (item) { auto theWidget = widget(id); m_data->m_selector->removeWidget(item->m_btn); m_data->m_stackWidget->removeWidget(item->m_widget); m_data->m_items.remove(id); if (m_data->m_selectedId == id) { m_data->m_selectedId = m_data->m_items.isEmpty() ? "" : m_data->m_items.begin().key(); emit sigSelected(m_data->m_selectedId, selectedWidget()); } updateIndeces(); theWidget->setProperty("item_id", QVariant()); emit sigRemoved(id); } } void AbstractContainer::removeWidget(QWidget* widget) { for (auto it = m_data->m_items.begin(); it != m_data->m_items.end(); ++it) { auto item = it.value(); if (item->m_widget == widget) { removeWidget(it.key()); /* I am not breaking here because same widget might have been added * multiple times. If later if we find it not important we can break * here. */ } } } void AbstractContainer::select(const QString& id) { auto item = m_data->m_items.value(id); if (item) { if (m_data->m_selectedId != "" && item->m_index == m_data->m_stackWidget->currentIndex()) { m_data->m_stackWidget->setVisible(false); item->m_btn->setChecked(false); m_data->m_selectedId = ""; } else { auto prev = m_data->m_items.value(m_data->m_selectedId); item->m_btn->setChecked(true); m_data->m_stackWidget->setCurrentIndex(item->m_index); m_data->m_selectedId = id; if (prev != nullptr) { prev->m_btn->setChecked(false); } else { m_data->m_stackWidget->setVisible(true); } } emit sigSelected(id, item->m_widget); } } void AbstractContainer::hideAll() { auto item = m_data->m_items.value(m_data->m_selectedId); if (item) { m_data->m_stackWidget->setVisible(false); item->m_btn->setChecked(false); m_data->m_selectedId = ""; } } void AbstractContainer::setAutoSelectionPolicy(AutoSelectionPolicy policy) { m_data->m_autoSelPolicy = policy; } QStackedWidget* AbstractContainer::stackedWidget() const { return m_data->m_stackWidget; } QzScroller* AbstractContainer::selector() const { return m_data->m_selector; } AbstractContainer::Position AbstractContainer::selectorPosition() const { return m_data->m_selectorPosition; } Qt::Orientation AbstractContainer::containerOrientation() const { return m_data->m_orientation; } int AbstractContainer::buttonWidth() const { return m_data->m_btnWidth; } int AbstractContainer::buttonHeight() const { return m_data->m_btnHeight; } AutoSelectionPolicy AbstractContainer::autoSelectionPolicy() const { return m_data->m_autoSelPolicy; } void AbstractContainer::updateIndeces() { for (int i = 0; i < m_data->m_stackWidget->count() && i < m_data->m_items.size(); ++i) { auto widget = m_data->m_stackWidget->widget(i); auto itemId = widget->property("item_id"); if (itemId.isValid()) { auto id = itemId.toString(); auto item = m_data->m_items.value(id); item->m_index = i; } } } int AbstractContainer::numWidgets() const { return m_data->m_items.size(); } bool AbstractContainer::isEmpty() { return m_data->m_items.isEmpty(); } // Stacked container StackedContainer::StackedContainer(int selectorDimention, int buttonDimention, AbstractContainer::Position selectorPosition, Qt::Orientation orientation, QWidget* parent) : AbstractContainer(selectorDimention, buttonDimention, selectorPosition, orientation, parent) { QBoxLayout* layout = nullptr; if (orientation == Qt::Vertical) { layout = new QHBoxLayout(); } else { layout = new QVBoxLayout(); } if (selectorPosition == Position::Before) { layout->addWidget(selector()); layout->addWidget(stackedWidget()); layout->setAlignment(selector(), orientation == Qt::Horizontal ? Qt::AlignTop : Qt::AlignLeft); } else { layout->addWidget(stackedWidget()); layout->addWidget(selector()); layout->setAlignment(selector(), orientation == Qt::Horizontal ? Qt::AlignBottom : Qt::AlignRight); } layout->setContentsMargins(QMargins{}); auto margins = this->contentsMargins(); margins.setLeft(0); this->setContentsMargins(margins); this->setLayout(layout); } StackedContainer::~StackedContainer() { } QString StackedContainer::containerType() const { return "StackedContainer"; } } // namespace Quartz
31.075581
80
0.575304
varunamachi
4fdfaa3f8b1a6b061f86e6583fcca48546ec6515
2,730
cpp
C++
Plugins/SpawnPedestrian/Source/SpawnPedestrian/Private/SpawnPedestrian.cpp
ishaan95/EvoScenario
31712603a89d19b01cefffe1223363969505bbf3
[ "MIT" ]
5
2021-02-21T23:08:03.000Z
2022-02-24T22:24:31.000Z
Plugins/SpawnPedestrian/Source/SpawnPedestrian/Private/SpawnPedestrian.cpp
AugmentedDesignLab/CruzWay
c21d28ee4bd5a3cc4f7e1e2c829f88e0554886e3
[ "MIT" ]
7
2020-11-11T01:03:17.000Z
2021-04-19T18:13:00.000Z
Plugins/SpawnPedestrian/Source/SpawnPedestrian/Private/SpawnPedestrian.cpp
ishaan95/EvoScenario
31712603a89d19b01cefffe1223363969505bbf3
[ "MIT" ]
2
2021-07-18T02:30:13.000Z
2022-01-11T08:04:36.000Z
// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. #include "SpawnPedestrian.h" #include "SpawnPedestrianStyle.h" #include "SpawnPedestrianCommands.h" #include "Misc/MessageDialog.h" #include "Framework/MultiBox/MultiBoxBuilder.h" #include "LevelEditor.h" #include "SpawnManager.h" static const FName SpawnPedestrianTabName("SpawnPedestrian"); #define LOCTEXT_NAMESPACE "FSpawnPedestrianModule" void FSpawnPedestrianModule::StartupModule() { // This code will execute after your module is loaded into memory; the exact timing is specified in the .uplugin file per-module FSpawnPedestrianStyle::Initialize(); FSpawnPedestrianStyle::ReloadTextures(); FSpawnPedestrianCommands::Register(); PluginCommands = MakeShareable(new FUICommandList); PluginCommands->MapAction( FSpawnPedestrianCommands::Get().PluginAction, FExecuteAction::CreateRaw(this, &FSpawnPedestrianModule::PluginButtonClicked), FCanExecuteAction()); FLevelEditorModule& LevelEditorModule = FModuleManager::LoadModuleChecked<FLevelEditorModule>("LevelEditor"); { TSharedPtr<FExtender> MenuExtender = MakeShareable(new FExtender()); MenuExtender->AddMenuExtension("WindowLayout", EExtensionHook::After, PluginCommands, FMenuExtensionDelegate::CreateRaw( this, &FSpawnPedestrianModule::AddMenuExtension)); LevelEditorModule.GetMenuExtensibilityManager()->AddExtender(MenuExtender); } { TSharedPtr<FExtender> ToolbarExtender = MakeShareable(new FExtender); ToolbarExtender->AddToolBarExtension("Settings", EExtensionHook::After, PluginCommands, FToolBarExtensionDelegate::CreateRaw( this, &FSpawnPedestrianModule::AddToolbarExtension)); LevelEditorModule.GetToolBarExtensibilityManager()->AddExtender(ToolbarExtender); } } void FSpawnPedestrianModule::ShutdownModule() { // This function may be called during shutdown to clean up your module. For modules that support dynamic reloading, // we call this function before unloading the module. FSpawnPedestrianStyle::Shutdown(); FSpawnPedestrianCommands::Unregister(); } void FSpawnPedestrianModule::PluginButtonClicked() { // Main entry point to my Plugin FSpawnManager::LoadBlueprintAssets(); FSpawnManager::InitializeNavMesh(); } void FSpawnPedestrianModule::AddMenuExtension(FMenuBuilder& Builder) { Builder.AddMenuEntry(FSpawnPedestrianCommands::Get().PluginAction); } void FSpawnPedestrianModule::AddToolbarExtension(FToolBarBuilder& Builder) { Builder.AddToolBarButton(FSpawnPedestrianCommands::Get().PluginAction); } #undef LOCTEXT_NAMESPACE IMPLEMENT_MODULE(FSpawnPedestrianModule, SpawnPedestrian)
33.292683
129
0.769963
ishaan95
4fe1d403e823872161e865c37354ed666537d721
702
cpp
C++
test/unit-tests/compiler/compiler_test.cpp
twantonie/centurion
198b80f9e8a29da2ae7d3c15e48ffa1a046165c3
[ "MIT" ]
126
2020-12-05T00:05:56.000Z
2022-03-30T15:15:03.000Z
test/unit-tests/compiler/compiler_test.cpp
twantonie/centurion
198b80f9e8a29da2ae7d3c15e48ffa1a046165c3
[ "MIT" ]
46
2020-12-27T14:25:22.000Z
2022-01-26T13:58:11.000Z
test/unit-tests/compiler/compiler_test.cpp
twantonie/centurion
198b80f9e8a29da2ae7d3c15e48ffa1a046165c3
[ "MIT" ]
13
2021-01-20T20:50:18.000Z
2022-03-25T06:59:03.000Z
#include "compiler/compiler.hpp" #include <gtest/gtest.h> TEST(Compiler, IsDebugBuild) { #ifdef NDEBUG ASSERT_FALSE(cen::is_debug_build()); #else ASSERT_TRUE(cen::is_debug_build()); #endif } TEST(Compiler, IsReleaseBuild) { #ifdef NDEBUG ASSERT_TRUE(cen::is_release_build()); #else ASSERT_FALSE(cen::is_release_build()); #endif } TEST(Compiler, OnMSVC) { #ifdef _MSC_VER ASSERT_TRUE(cen::on_msvc()); #else ASSERT_FALSE(cen::on_msvc()); #endif } TEST(Compiler, OnClang) { #ifdef __clang__ ASSERT_TRUE(cen::on_clang()); #else ASSERT_FALSE(cen::on_clang()); #endif } TEST(Compiler, OnGCC) { #ifdef __GNUC__ ASSERT_TRUE(cen::on_gcc()); #else ASSERT_FALSE(cen::on_gcc()); #endif }
14.326531
40
0.7151
twantonie
4fe2036cc2af9ec54924d35ce673146f20b1d84b
4,768
cpp
C++
src/main/cpp/Scripting/CommandGroupBuilder.cpp
FRC-Team-4143/2022-Test-DiffSwerve
e28620d4622e0ecc02b447f6f439791c0f7f5783
[ "BSD-3-Clause" ]
null
null
null
src/main/cpp/Scripting/CommandGroupBuilder.cpp
FRC-Team-4143/2022-Test-DiffSwerve
e28620d4622e0ecc02b447f6f439791c0f7f5783
[ "BSD-3-Clause" ]
null
null
null
src/main/cpp/Scripting/CommandGroupBuilder.cpp
FRC-Team-4143/2022-Test-DiffSwerve
e28620d4622e0ecc02b447f6f439791c0f7f5783
[ "BSD-3-Clause" ]
null
null
null
// ========================================================================== // CommandGroupBuilder class // // FRC 4143: MARS/WARS // ========================================================================== // 2022-02-27 JKSalmon - Initial release // ========================================================================== #include "Scripting/CommandGroupBuilder.h" #include <utility> #include "frc2/command/ParallelCommandGroup.h" #include "frc2/command/ParallelDeadlineGroup.h" #include "frc2/command/ParallelRaceGroup.h" #include "frc2/command/SequentialCommandGroup.h" #include "frc2/command/WaitCommand.h" using frc4143::CommandGroupBuilder; // ========================================================================== CommandGroupBuilder::CommandGroupBuilder() : _sequentialCommands{}, _parallelCommands{}, _parallelDeadlineCommand{}, _parallelDeadlineCommands{}, _parallelRaceCommands{} { } // ========================================================================== CommandGroupBuilder::~CommandGroupBuilder() { } // ========================================================================== void CommandGroupBuilder::AddSequential( std::unique_ptr<frc2::Command>&& command, units::time::second_t duration ) { _FlushParallel(); _FlushParallelDeadline(); _FlushParallelRace(); if (duration > 0_s) { command = _ApplyTimeout(std::move(command), duration); } _sequentialCommands.emplace_back(std::move(command)); } // ========================================================================== void CommandGroupBuilder::AddParallel( std::unique_ptr<frc2::Command>&& command, units::time::second_t duration ) { _FlushParallelDeadline(); _FlushParallelRace(); if (duration > 0_s) { command = _ApplyTimeout(std::move(command), duration); } _parallelCommands.emplace_back(std::move(command)); } // ========================================================================== void CommandGroupBuilder::AddParallelDeadline( std::unique_ptr<frc2::Command>&& command, units::time::second_t duration ) { _FlushParallel(); _FlushParallelRace(); if (duration > 0_s) { command = _ApplyTimeout(std::move(command), duration); } if (_parallelDeadlineCommand) { _parallelDeadlineCommands.emplace_back(std::move(command)); } else { _parallelDeadlineCommand = std::move(command); } } // ========================================================================== void CommandGroupBuilder::AddParallelRace( std::unique_ptr<frc2::Command>&& command, units::time::second_t duration ) { _FlushParallel(); _FlushParallelDeadline(); if (duration > 0_s) { command = _ApplyTimeout(std::move(command), duration); } _parallelRaceCommands.emplace_back(std::move(command)); } // ========================================================================== std::unique_ptr<frc2::Command> CommandGroupBuilder::Create() { _FlushParallel(); _FlushParallelDeadline(); _FlushParallelRace(); auto numCommands{_sequentialCommands.size()}; if (0 == numCommands) { return nullptr; } if (1 == numCommands) { return std::move(_sequentialCommands[0]); } return std::make_unique<frc2::SequentialCommandGroup>(std::move(_sequentialCommands)); } // ========================================================================== std::unique_ptr<frc2::ParallelRaceGroup> CommandGroupBuilder::_ApplyTimeout( std::unique_ptr<frc2::Command>&& command, units::time::second_t duration ) { std::vector<std::unique_ptr<frc2::Command>> temp; temp.emplace_back(std::make_unique<frc2::WaitCommand>(duration)); temp.emplace_back(std::move(command)); return std::make_unique<frc2::ParallelRaceGroup>(std::move(temp)); } // ========================================================================== void CommandGroupBuilder::_FlushParallel() { if (!_parallelCommands.empty()) { _sequentialCommands.emplace_back(std::make_unique<frc2::ParallelCommandGroup>(std::move(_parallelCommands))); _parallelCommands.clear(); } } // ========================================================================== void CommandGroupBuilder::_FlushParallelDeadline() { if (_parallelDeadlineCommand) { _sequentialCommands.emplace_back(std::make_unique<frc2::ParallelDeadlineGroup>(std::move(_parallelDeadlineCommand), std::move(_parallelDeadlineCommands))); _parallelDeadlineCommand.reset(); _parallelDeadlineCommands.clear(); } } // ========================================================================== void CommandGroupBuilder::_FlushParallelRace() { if (!_parallelRaceCommands.empty()) { _sequentialCommands.emplace_back(std::make_unique<frc2::ParallelRaceGroup>(std::move(_parallelRaceCommands))); _parallelRaceCommands.clear(); } } // ==========================================================================
27.883041
157
0.574035
FRC-Team-4143
4fe208e83a2a28c3cc73a8d93d5c8da50bbda1f2
612
hpp
C++
iOS/G3MApp/G3MApp/G3MCanvas2DDemoScene.hpp
AeroGlass/g3m
a21a9e70a6205f1f37046ae85dafc6e3bfaeb917
[ "BSD-2-Clause" ]
null
null
null
iOS/G3MApp/G3MApp/G3MCanvas2DDemoScene.hpp
AeroGlass/g3m
a21a9e70a6205f1f37046ae85dafc6e3bfaeb917
[ "BSD-2-Clause" ]
null
null
null
iOS/G3MApp/G3MApp/G3MCanvas2DDemoScene.hpp
AeroGlass/g3m
a21a9e70a6205f1f37046ae85dafc6e3bfaeb917
[ "BSD-2-Clause" ]
null
null
null
// // G3MCanvas2DDemoScene.hpp // G3MApp // // Created by Diego Gomez Deck on 2/12/15. // Copyright (c) 2015 Igo Software SL. All rights reserved. // #ifndef __G3MApp__G3MCanvas2DDemoScene__ #define __G3MApp__G3MCanvas2DDemoScene__ #include "G3MDemoScene.hpp" class G3MCanvas2DDemoScene : public G3MDemoScene { protected: void rawActivate(const G3MContext* context); void rawSelectOption(const std::string& option, int optionIndex) { // do nothing } public: G3MCanvas2DDemoScene(G3MDemoModel* model) : G3MDemoScene(model, "Canvas 2D", "", -1) { } }; #endif
18.545455
60
0.696078
AeroGlass
4fe2f1130e63469bd6fd38299067649bc4e92155
25,121
cpp
C++
src/model/test/AirLoopHVACUnitarySystem_GTest.cpp
mehrdad-shokri/OpenStudio
1773b54ce1cb660818ac0114dd7eefae6639ca36
[ "blessing" ]
null
null
null
src/model/test/AirLoopHVACUnitarySystem_GTest.cpp
mehrdad-shokri/OpenStudio
1773b54ce1cb660818ac0114dd7eefae6639ca36
[ "blessing" ]
null
null
null
src/model/test/AirLoopHVACUnitarySystem_GTest.cpp
mehrdad-shokri/OpenStudio
1773b54ce1cb660818ac0114dd7eefae6639ca36
[ "blessing" ]
null
null
null
/*********************************************************************************************************************** * OpenStudio(R), Copyright (c) 2008-2020, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the * following conditions are met: * * (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following * disclaimer. * * (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided with the distribution. * * (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission from the respective party. * * (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works * may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior * written permission from Alliance for Sustainable Energy, LLC. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED * STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************************************************************/ #include <gtest/gtest.h> #include "ModelFixture.hpp" #include "../AirLoopHVACUnitarySystem.hpp" #include "../AirLoopHVACUnitarySystem_Impl.hpp" #include "../Schedule.hpp" #include "../Schedule_Impl.hpp" #include "../Node.hpp" #include "../Node_Impl.hpp" #include "../Splitter.hpp" #include "../Splitter_Impl.hpp" #include "../AirLoopHVACZoneSplitter.hpp" #include "../AirLoopHVACZoneSplitter_Impl.hpp" #include "../HVACComponent.hpp" #include "../HVACComponent_Impl.hpp" #include "../FanVariableVolume.hpp" #include "../FanVariableVolume_Impl.hpp" #include "../FanConstantVolume.hpp" #include "../FanConstantVolume_Impl.hpp" #include "../FanOnOff.hpp" #include "../FanOnOff_Impl.hpp" #include "../CoilHeatingWater.hpp" #include "../CoilHeatingGas.hpp" #include "../CoilHeatingElectric.hpp" #include "../CoilHeatingElectric_Impl.hpp" #include "../CoilCoolingWater.hpp" #include "../CoilCoolingWater_Impl.hpp" #include "../CoilCoolingDXSingleSpeed.hpp" #include "../CoilHeatingDXSingleSpeed.hpp" #include "../CoilCoolingWaterToAirHeatPumpEquationFit.hpp" #include "../CoilHeatingWaterToAirHeatPumpEquationFit.hpp" #include "../CoilHeatingDesuperheater.hpp" #include "../CoilCoolingDXTwoSpeed.hpp" #include "../Curve.hpp" #include "../CurveQuadratic.hpp" #include "../CurveCubic.hpp" #include "../CurveExponent.hpp" #include "../CurveBiquadratic.hpp" using namespace openstudio; using namespace openstudio::model; TEST_F(ModelFixture, AirLoopHVACUnitarySystem_DefaultConstructors) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; ASSERT_EXIT ( { Model m; AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); exit(0); } , ::testing::ExitedWithCode(0), "" ); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_Remove) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); FanOnOff fan = FanOnOff(m,s); CoilHeatingElectric heatingCoil = CoilHeatingElectric(m,s); CoilHeatingElectric suppHeatingCoil = CoilHeatingElectric(m,s); CoilCoolingWater coolingCoil = CoilCoolingWater(m,s); testObject.setSupplyFan(fan); testObject.setCoolingCoil(coolingCoil); testObject.setHeatingCoil(heatingCoil); testObject.setSupplementalHeatingCoil(suppHeatingCoil); std::vector<AirLoopHVACUnitarySystem> unitarySystem = m.getConcreteModelObjects<AirLoopHVACUnitarySystem>(); EXPECT_EQ(1, unitarySystem.size()); std::vector<FanOnOff> fans = m.getConcreteModelObjects<FanOnOff>(); EXPECT_EQ(1, fans.size()); std::vector<CoilHeatingElectric> heatingCoils = m.getConcreteModelObjects<CoilHeatingElectric>(); EXPECT_EQ(2, heatingCoils.size()); std::vector<CoilCoolingWater> coolingCoils = m.getConcreteModelObjects<CoilCoolingWater>(); EXPECT_EQ(1, coolingCoils.size()); testObject.remove(); unitarySystem = m.getConcreteModelObjects<AirLoopHVACUnitarySystem>(); EXPECT_EQ(0, unitarySystem.size()); fans = m.getConcreteModelObjects<FanOnOff>(); EXPECT_EQ(0, fans.size()); heatingCoils = m.getConcreteModelObjects<CoilHeatingElectric>(); EXPECT_EQ(0, heatingCoils.size()); coolingCoils = m.getConcreteModelObjects<CoilCoolingWater>(); EXPECT_EQ(0, coolingCoils.size()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_CloneOneModelWithDefaultData) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); FanOnOff fan = FanOnOff(m,s); CoilHeatingElectric heatingCoil = CoilHeatingElectric(m,s); CoilHeatingElectric suppHeatingCoil = CoilHeatingElectric(m,s); CoilCoolingWater coolingCoil = CoilCoolingWater(m,s); testObject.setSupplyFan(fan); testObject.setCoolingCoil(coolingCoil); testObject.setHeatingCoil(heatingCoil); testObject.setSupplementalHeatingCoil(suppHeatingCoil); AirLoopHVACUnitarySystem testObjectClone = testObject.clone(m).cast<AirLoopHVACUnitarySystem>(); // EXPECT_EQ("Load", testObjectClone.controlType()); EXPECT_EQ("None", testObjectClone.dehumidificationControlType()); EXPECT_DOUBLE_EQ(1.0, testObjectClone.dXHeatingCoilSizingRatio()); EXPECT_FALSE(testObjectClone.useDOASDXCoolingCoil()); EXPECT_DOUBLE_EQ(2.0, testObjectClone.dOASDXCoolingCoilLeavingMinimumAirTemperature()); EXPECT_EQ("SensibleOnlyLoadControl", testObjectClone.latentLoadControl()); EXPECT_TRUE(testObjectClone.isSupplyAirFlowRateDuringCoolingOperationAutosized()); EXPECT_TRUE(testObjectClone.isSupplyAirFlowRateDuringHeatingOperationAutosized()); EXPECT_TRUE(testObjectClone.isSupplyAirFlowRateWhenNoCoolingorHeatingisRequiredAutosized()); EXPECT_DOUBLE_EQ(80.0, testObjectClone.maximumSupplyAirTemperature().get()); EXPECT_DOUBLE_EQ(21.0, testObjectClone.maximumOutdoorDryBulbTemperatureforSupplementalHeaterOperation()); EXPECT_DOUBLE_EQ(2.5, testObjectClone.maximumCyclingRate()); EXPECT_DOUBLE_EQ(60.0, testObjectClone.heatPumpTimeConstant()); EXPECT_DOUBLE_EQ(0.01, testObjectClone.fractionofOnCyclePowerUse()); EXPECT_DOUBLE_EQ(60, testObjectClone.heatPumpFanDelayTime()); EXPECT_DOUBLE_EQ(0.0, testObjectClone.ancilliaryOnCycleElectricPower()); EXPECT_DOUBLE_EQ(0.0, testObjectClone.ancilliaryOffCycleElectricPower()); // EXPECT_DOUBLE_EQ(80.0, testObjectClone.maximumTemperatureforHeatRecovery()); EXPECT_NE(testObject.supplyFan(), testObjectClone.supplyFan()); EXPECT_NE(testObject.coolingCoil(), testObjectClone.coolingCoil()); EXPECT_NE(testObject.heatingCoil(), testObjectClone.heatingCoil()); EXPECT_NE(testObject.supplementalHeatingCoil(), testObjectClone.supplementalHeatingCoil()); std::vector<AirLoopHVACUnitarySystem> unitarySystem = m.getConcreteModelObjects<AirLoopHVACUnitarySystem>(); EXPECT_EQ(2, unitarySystem.size()); std::vector<FanOnOff> fans = m.getConcreteModelObjects<FanOnOff>(); EXPECT_EQ(2, fans.size()); std::vector<CoilHeatingElectric> heatingCoils = m.getConcreteModelObjects<CoilHeatingElectric>(); EXPECT_EQ(4, heatingCoils.size()); std::vector<CoilCoolingWater> coolingCoils = m.getConcreteModelObjects<CoilCoolingWater>(); EXPECT_EQ(2, coolingCoils.size()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_CloneOneModelWithCustomData) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); FanOnOff fan = FanOnOff(m,s); CoilHeatingElectric heatingCoil = CoilHeatingElectric(m,s); CoilHeatingElectric suppHeatingCoil = CoilHeatingElectric(m,s); CoilCoolingWater coolingCoil = CoilCoolingWater(m,s); testObject.setSupplyFan(fan); testObject.setCoolingCoil(coolingCoil); testObject.setHeatingCoil(heatingCoil); testObject.setSupplementalHeatingCoil(suppHeatingCoil); // testObject.setControlType("SetPoint"); testObject.setDehumidificationControlType("CoolReheat"); testObject.setFanPlacement("BlowThrough"); testObject.setDXHeatingCoilSizingRatio(999.0); testObject.setUseDOASDXCoolingCoil(true); testObject.resetDOASDXCoolingCoilLeavingMinimumAirTemperature(); EXPECT_TRUE(testObject.isDOASDXCoolingCoilLeavingMinimumAirTemperatureDefaulted()); EXPECT_FALSE(testObject.isDOASDXCoolingCoilLeavingMinimumAirTemperatureAutosized()); testObject.autosizeDOASDXCoolingCoilLeavingMinimumAirTemperature(); EXPECT_FALSE(testObject.isDOASDXCoolingCoilLeavingMinimumAirTemperatureDefaulted()); EXPECT_TRUE(testObject.isDOASDXCoolingCoilLeavingMinimumAirTemperatureAutosized()); testObject.setDOASDXCoolingCoilLeavingMinimumAirTemperature(7.0); EXPECT_FALSE(testObject.isDOASDXCoolingCoilLeavingMinimumAirTemperatureDefaulted()); EXPECT_FALSE(testObject.isDOASDXCoolingCoilLeavingMinimumAirTemperatureAutosized()); testObject.setLatentLoadControl("LatentWithSensibleLoadControl"); testObject.autosizeSupplyAirFlowRateDuringCoolingOperation(); testObject.autosizeSupplyAirFlowRateDuringHeatingOperation(); testObject.autosizeSupplyAirFlowRateWhenNoCoolingorHeatingisRequired(); testObject.autosizeMaximumSupplyAirTemperature(); testObject.setMaximumOutdoorDryBulbTemperatureforSupplementalHeaterOperation(999.0); testObject.setMaximumCyclingRate(5.0); testObject.setHeatPumpTimeConstant(500.0); testObject.setFractionofOnCyclePowerUse(0.05); testObject.setHeatPumpFanDelayTime(999.0); testObject.setAncilliaryOnCycleElectricPower(999.0); testObject.setAncilliaryOffCycleElectricPower(999.0); // testObject.setMaximumTemperatureforHeatRecovery(100.0); AirLoopHVACUnitarySystem testObjectClone = testObject.clone(m).cast<AirLoopHVACUnitarySystem>(); // EXPECT_EQ("SetPoint", testObjectClone.controlType()); EXPECT_EQ("CoolReheat", testObjectClone.dehumidificationControlType()); EXPECT_EQ("BlowThrough", testObjectClone.fanPlacement().get()); EXPECT_DOUBLE_EQ(999.0, testObjectClone.dXHeatingCoilSizingRatio()); EXPECT_TRUE(testObjectClone.useDOASDXCoolingCoil()); EXPECT_DOUBLE_EQ(7.0, testObjectClone.dOASDXCoolingCoilLeavingMinimumAirTemperature()); EXPECT_EQ("LatentWithSensibleLoadControl", testObjectClone.latentLoadControl()); EXPECT_TRUE(testObjectClone.isSupplyAirFlowRateDuringCoolingOperationAutosized()); EXPECT_TRUE(testObjectClone.isSupplyAirFlowRateDuringHeatingOperationAutosized()); EXPECT_TRUE(testObjectClone.isSupplyAirFlowRateWhenNoCoolingorHeatingisRequiredAutosized()); EXPECT_TRUE(testObjectClone.isMaximumSupplyAirTemperatureAutosized()); EXPECT_DOUBLE_EQ(999.0, testObjectClone.maximumOutdoorDryBulbTemperatureforSupplementalHeaterOperation()); EXPECT_DOUBLE_EQ(5.0, testObjectClone.maximumCyclingRate()); EXPECT_DOUBLE_EQ(500.0, testObjectClone.heatPumpTimeConstant()); EXPECT_DOUBLE_EQ(0.05, testObjectClone.fractionofOnCyclePowerUse()); EXPECT_DOUBLE_EQ(999.0, testObjectClone.heatPumpFanDelayTime()); EXPECT_DOUBLE_EQ(999.0, testObjectClone.ancilliaryOnCycleElectricPower()); EXPECT_DOUBLE_EQ(999.0, testObjectClone.ancilliaryOffCycleElectricPower()); // EXPECT_DOUBLE_EQ(100.0, testObjectClone.maximumTemperatureforHeatRecovery()); EXPECT_NE(testObject.supplyFan(), testObjectClone.supplyFan()); EXPECT_NE(testObject.coolingCoil(), testObjectClone.coolingCoil()); EXPECT_NE(testObject.heatingCoil(), testObjectClone.heatingCoil()); EXPECT_NE(testObject.supplementalHeatingCoil(), testObjectClone.supplementalHeatingCoil()); std::vector<AirLoopHVACUnitarySystem> unitarySystem = m.getConcreteModelObjects<AirLoopHVACUnitarySystem>(); EXPECT_EQ(2, unitarySystem.size()); std::vector<FanOnOff> fans = m.getConcreteModelObjects<FanOnOff>(); EXPECT_EQ(2, fans.size()); std::vector<CoilHeatingElectric> heatingCoils = m.getConcreteModelObjects<CoilHeatingElectric>(); EXPECT_EQ(4, heatingCoils.size()); std::vector<CoilCoolingWater> coolingCoils = m.getConcreteModelObjects<CoilCoolingWater>(); EXPECT_EQ(2, coolingCoils.size()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_CloneTwoModelsWithCustomData) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); FanOnOff fan = FanOnOff(m,s); CoilHeatingElectric heatingCoil = CoilHeatingElectric(m,s); CoilHeatingElectric suppHeatingCoil = CoilHeatingElectric(m,s); CoilCoolingWater coolingCoil = CoilCoolingWater(m,s); testObject.setSupplyFan(fan); testObject.setCoolingCoil(coolingCoil); testObject.setHeatingCoil(heatingCoil); testObject.setSupplementalHeatingCoil(suppHeatingCoil); AirLoopHVACUnitarySystem testObjectClone = testObject.clone(m).cast<AirLoopHVACUnitarySystem>(); EXPECT_NE(testObject.supplyFan(), testObjectClone.supplyFan()); EXPECT_NE(testObject.coolingCoil(), testObjectClone.coolingCoil()); EXPECT_NE(testObject.heatingCoil(), testObjectClone.heatingCoil()); EXPECT_NE(testObject.supplementalHeatingCoil(), testObjectClone.supplementalHeatingCoil()); std::vector<AirLoopHVACUnitarySystem> unitarySystem = m.getConcreteModelObjects<AirLoopHVACUnitarySystem>(); EXPECT_EQ(2, unitarySystem.size()); std::vector<FanOnOff> fans = m.getConcreteModelObjects<FanOnOff>(); EXPECT_EQ(2, fans.size()); std::vector<CoilHeatingElectric> heatingCoils = m.getConcreteModelObjects<CoilHeatingElectric>(); EXPECT_EQ(4, heatingCoils.size()); std::vector<CoilCoolingWater> coolingCoils = m.getConcreteModelObjects<CoilCoolingWater>(); EXPECT_EQ(2, coolingCoils.size()); Model m2; AirLoopHVACUnitarySystem testObjectClone2 = testObject.clone(m2).cast<AirLoopHVACUnitarySystem>(); unitarySystem = m2.getConcreteModelObjects<AirLoopHVACUnitarySystem>(); EXPECT_EQ(1, unitarySystem.size()); fans = m2.getConcreteModelObjects<FanOnOff>(); EXPECT_EQ(1, fans.size()); heatingCoils = m2.getConcreteModelObjects<CoilHeatingElectric>(); EXPECT_EQ(2, heatingCoils.size()); coolingCoils = m2.getConcreteModelObjects<CoilCoolingWater>(); EXPECT_EQ(1, coolingCoils.size()); } TEST_F(ModelFixture,AirLoopHVACUnitarySystem_addToNode) { Model m; AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); AirLoopHVAC airLoop(m); Node supplyOutletNode = airLoop.supplyOutletNode(); EXPECT_TRUE(testObject.addToNode(supplyOutletNode)); EXPECT_EQ( (unsigned)3, airLoop.supplyComponents().size() ); EXPECT_TRUE(testObject.inletPort()); EXPECT_TRUE(testObject.outletPort()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_AddToNodeTwoSameObjects) { Model m; AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); AirLoopHVAC airLoop(m); Node supplyOutletNode = airLoop.supplyOutletNode(); testObject.addToNode(supplyOutletNode); supplyOutletNode = airLoop.supplyOutletNode(); EXPECT_FALSE(testObject.addToNode(supplyOutletNode)); EXPECT_TRUE(testObject.inletPort()); EXPECT_TRUE(testObject.outletPort()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_AddToNodeAirLoopDemandSide) { Model m; AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); AirLoopHVAC airLoop(m); Node inletNode = airLoop.zoneSplitter().lastOutletModelObject()->cast<Node>(); EXPECT_FALSE(testObject.addToNode(inletNode)); EXPECT_EQ((unsigned)5, airLoop.demandComponents().size()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_AddToNodePlantLoop) { // As of right now you cannot add the unitary to a plant. // Some heat recovery configurations may enable it, but more likely // the inner components will be added to the plant. Model m; AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); PlantLoop plantLoop(m); EXPECT_EQ( (unsigned)5,plantLoop.demandComponents().size() ); Node demandInletNode = plantLoop.demandSplitter().lastOutletModelObject()->cast<Node>(); EXPECT_FALSE(testObject.addToNode(demandInletNode)); EXPECT_FALSE(plantLoop.addDemandBranchForComponent(testObject)); EXPECT_EQ((unsigned)5, plantLoop.demandComponents().size()); Node supplyInletNode = plantLoop.supplySplitter().lastOutletModelObject()->cast<Node>(); EXPECT_FALSE(testObject.addToNode(supplyInletNode)); EXPECT_EQ((unsigned)5, plantLoop.supplyComponents().size()); EXPECT_FALSE(plantLoop.addSupplyBranchForComponent(testObject)); EXPECT_EQ((unsigned)5, plantLoop.supplyComponents().size()); ASSERT_FALSE(testObject.plantLoop()); // EXPECT_EQ(plantLoop, testObject.plantLoop().get()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_WaterHeatingCoilToPlant) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); FanOnOff fan = FanOnOff(m,s); CoilHeatingWater heatingCoil = CoilHeatingWater(m,s); CoilHeatingWater suppHeatingCoil = CoilHeatingWater(m,s); CoilCoolingWater coolingCoil = CoilCoolingWater(m,s); testObject.setSupplyFan(fan); testObject.setCoolingCoil(coolingCoil); testObject.setHeatingCoil(heatingCoil); testObject.setSupplementalHeatingCoil(suppHeatingCoil); PlantLoop plantLoop(m); EXPECT_TRUE(plantLoop.addDemandBranchForComponent(heatingCoil)); EXPECT_TRUE(plantLoop.addDemandBranchForComponent(suppHeatingCoil)); EXPECT_EQ((unsigned)10, plantLoop.demandComponents().size()); EXPECT_NE((unsigned)7, plantLoop.demandComponents().size()); testObject.remove(); EXPECT_EQ((unsigned)5, plantLoop.demandComponents().size()); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_RemoveWaterHeatingCoilFromPlant) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); AirLoopHVACUnitarySystem testObject = AirLoopHVACUnitarySystem(m); FanOnOff fan = FanOnOff(m,s); CoilHeatingWater heatingCoil = CoilHeatingWater(m,s); CoilHeatingWater suppHeatingCoil = CoilHeatingWater(m,s); CoilCoolingWater coolingCoil = CoilCoolingWater(m,s); testObject.setSupplyFan(fan); testObject.setCoolingCoil(coolingCoil); testObject.setHeatingCoil(heatingCoil); testObject.setSupplementalHeatingCoil(suppHeatingCoil); PlantLoop plantLoop(m); EXPECT_TRUE(plantLoop.addDemandBranchForComponent(heatingCoil)); EXPECT_TRUE(plantLoop.addDemandBranchForComponent(suppHeatingCoil)); EXPECT_TRUE(plantLoop.removeDemandBranchWithComponent(heatingCoil)); EXPECT_TRUE(plantLoop.removeDemandBranchWithComponent(suppHeatingCoil)); EXPECT_EQ((unsigned)5, plantLoop.demandComponents().size()); EXPECT_NE((unsigned)7, plantLoop.demandComponents().size()); } TEST_F(ModelFixture,AirLoopHVACUnitarySystem_containingHVACComponent) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); CurveBiquadratic c1(m); CurveQuadratic c2(m); CurveBiquadratic c3(m); CurveQuadratic c4(m); CurveQuadratic c5(m); CurveBiquadratic c6(m); CurveBiquadratic c7(m); FanOnOff fanOnOff = FanOnOff(m, s); CoilHeatingWaterToAirHeatPumpEquationFit coilHeatingWaterToAirHeatPumpEquationFit(m); CoilCoolingWaterToAirHeatPumpEquationFit coilCoolingWaterToAirHeatPumpEquationFit(m); CoilHeatingElectric coilHeatingElectricSupp(m, s); AirLoopHVACUnitarySystem testObject(m); EXPECT_TRUE(testObject.setSupplyFan(fanOnOff)); EXPECT_TRUE(testObject.setCoolingCoil(coilCoolingWaterToAirHeatPumpEquationFit)); EXPECT_TRUE(testObject.setHeatingCoil(coilHeatingWaterToAirHeatPumpEquationFit)); EXPECT_TRUE(testObject.setSupplementalHeatingCoil(coilHeatingElectricSupp)); boost::optional<HVACComponent> component = fanOnOff.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilHeatingWaterToAirHeatPumpEquationFit.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilCoolingWaterToAirHeatPumpEquationFit.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilHeatingElectricSupp.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); FanConstantVolume fanConstantVolume = FanConstantVolume(m, s); CoilHeatingWater coilHeatingWater(m, s); CoilCoolingWater coilCoolingWater(m, s); CoilHeatingGas coilHeatingGasSupp(m, s); EXPECT_TRUE(testObject.setSupplyFan(fanConstantVolume)); EXPECT_TRUE(testObject.setCoolingCoil(coilCoolingWater)); EXPECT_TRUE(testObject.setHeatingCoil(coilHeatingWater)); EXPECT_TRUE(testObject.setSupplementalHeatingCoil(coilHeatingGasSupp)); component = fanConstantVolume.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilHeatingWater.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilCoolingWater.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilHeatingGasSupp.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); FanVariableVolume fanVariableVolume = FanVariableVolume(m, s); CoilHeatingDXSingleSpeed coilHeatingDXSingleSpeed(m, s, c1, c2, c3, c4, c5); CoilCoolingDXSingleSpeed coilCoolingDXSingleSpeed(m, s, c1, c2, c3, c4, c5); CoilHeatingWater coilHeatingWaterSupp(m, s); EXPECT_TRUE(testObject.setSupplyFan(fanVariableVolume)); EXPECT_TRUE(testObject.setCoolingCoil(coilCoolingDXSingleSpeed)); EXPECT_TRUE(testObject.setHeatingCoil(coilHeatingDXSingleSpeed)); EXPECT_TRUE(testObject.setSupplementalHeatingCoil(coilHeatingWaterSupp)); component = fanVariableVolume.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilHeatingDXSingleSpeed.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilCoolingDXSingleSpeed.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilHeatingWaterSupp.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); CoilHeatingDesuperheater coilHeatingDesupeheater(m); CoilCoolingDXTwoSpeed coilCoolingDXTwoSpeed(m, s, c1, c2, c3, c4, c5, c6, c7); CoilHeatingDesuperheater coilHeatingDesupeheaterSupp(m); EXPECT_TRUE(testObject.setSupplyFan(fanOnOff)); EXPECT_TRUE(testObject.setCoolingCoil(coilCoolingDXTwoSpeed)); EXPECT_TRUE(testObject.setHeatingCoil(coilHeatingDesupeheater)); EXPECT_TRUE(testObject.setSupplementalHeatingCoil(coilHeatingDesupeheaterSupp)); component = coilHeatingDesupeheater.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilCoolingDXTwoSpeed.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); component = coilHeatingDesupeheaterSupp.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); CoilHeatingElectric coilHeatingElectric(m, s); EXPECT_TRUE(testObject.setSupplyFan(fanOnOff)); EXPECT_TRUE(testObject.setCoolingCoil(coilCoolingWater)); EXPECT_TRUE(testObject.setHeatingCoil(coilHeatingElectric)); EXPECT_TRUE(testObject.setSupplementalHeatingCoil(coilHeatingWaterSupp)); component = coilHeatingElectric.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); CoilHeatingGas coilHeatingGas(m, s); EXPECT_TRUE(testObject.setSupplyFan(fanOnOff)); EXPECT_TRUE(testObject.setCoolingCoil(coilCoolingWater)); EXPECT_TRUE(testObject.setHeatingCoil(coilHeatingGas)); EXPECT_TRUE(testObject.setSupplementalHeatingCoil(coilHeatingWaterSupp)); component = coilHeatingGas.containingHVACComponent(); ASSERT_TRUE(component); EXPECT_EQ(*component, testObject); } TEST_F(ModelFixture, AirLoopHVACUnitarySystem_ControlType) { Model m; AirLoopHVACUnitarySystem a = AirLoopHVACUnitarySystem(m); // Tests constructor EXPECT_EQ("Load", a.controlType()); EXPECT_FALSE(a.isControlTypeDefaulted()); ASSERT_TRUE(a.setControlType("Setpoint")); ASSERT_FALSE(a.isControlTypeDefaulted()); a.resetControlType(); ASSERT_TRUE(a.isControlTypeDefaulted()); ASSERT_EQ("Load", a.controlType()); }
43.16323
125
0.798416
mehrdad-shokri
4fe35522dd811ea3a4133d69d70bd53eadb7ba1a
308
cpp
C++
src/2/2475.cpp
youngdaLee/Baekjoon
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
[ "MIT" ]
11
2020-09-20T15:17:11.000Z
2022-03-17T12:43:33.000Z
src/2/2475.cpp
youngdaLee/Baekjoon
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
[ "MIT" ]
3
2021-10-30T07:51:36.000Z
2022-03-09T05:19:23.000Z
src/2/2475.cpp
youngdaLee/Baekjoon
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
[ "MIT" ]
13
2021-01-21T03:19:08.000Z
2022-03-28T10:44:58.000Z
/** * 2475. 검증수 * * 작성자: xCrypt0r * 언어: C++14 * 사용 메모리: 1,984 KB * 소요 시간: 0 ms * 해결 날짜: 2020년 8월 21일 */ #include <iostream> using namespace std; int main() { int n1, n2, n3, n4, n5; cin >> n1 >> n2 >> n3 >> n4 >> n5; cout << (n1 * n1 + n2 * n2 + n3 * n3 + n4 * n4 + n5 * n5) % 10; }
14
67
0.474026
youngdaLee
4fe6ad2f0632184bd2f29c82339cdfb448172276
576
cpp
C++
plugins/protein_calls/src/IntSelectionCall.cpp
azuki-monster/megamol
f5d75ae5630f9a71a7fbf81624bfd4f6b253c655
[ "BSD-3-Clause" ]
2
2020-10-16T10:15:37.000Z
2021-01-21T13:06:00.000Z
plugins/protein_calls/src/IntSelectionCall.cpp
azuki-monster/megamol
f5d75ae5630f9a71a7fbf81624bfd4f6b253c655
[ "BSD-3-Clause" ]
null
null
null
plugins/protein_calls/src/IntSelectionCall.cpp
azuki-monster/megamol
f5d75ae5630f9a71a7fbf81624bfd4f6b253c655
[ "BSD-3-Clause" ]
1
2021-01-28T01:19:54.000Z
2021-01-28T01:19:54.000Z
#include "stdafx.h" #include "protein_calls/IntSelectionCall.h" using namespace megamol; using namespace megamol::protein_calls; /* * IntSelectionCall::CallForGetSelection */ const unsigned int IntSelectionCall::CallForGetSelection = 0; /* * IntSelectionCall::CallForSetSelection */ const unsigned int IntSelectionCall::CallForSetSelection = 1; /* * IntSelectionCall:IntSelectionCall */ IntSelectionCall::IntSelectionCall(void) : selection(NULL) { } /* * IntSelectionCall::~IntSelectionCall */ IntSelectionCall::~IntSelectionCall(void) { selection = NULL; }
19.2
61
0.767361
azuki-monster
4fe9892661be507584dd0d847f22f40e1b6c9ff8
183,668
cpp
C++
pxr/usd/lib/pcp/primIndex.cpp
navefx/YuksUSD
56c2e1def36ee07121f4ecb349c1626472b3c338
[ "AML" ]
6
2018-08-26T13:27:22.000Z
2021-08-14T23:57:38.000Z
pxr/usd/lib/pcp/primIndex.cpp
navefx/YuksUSD
56c2e1def36ee07121f4ecb349c1626472b3c338
[ "AML" ]
1
2021-08-14T23:57:51.000Z
2021-08-14T23:57:51.000Z
pxr/usd/lib/pcp/primIndex.cpp
navefx/YuksUSD
56c2e1def36ee07121f4ecb349c1626472b3c338
[ "AML" ]
4
2018-06-14T18:14:59.000Z
2021-09-13T22:20:50.000Z
// // Copyright 2016 Pixar // // Licensed under the Apache License, Version 2.0 (the "Apache License") // with the following modification; you may not use this file except in // compliance with the Apache License and the following modification to it: // Section 6. Trademarks. is deleted and replaced with: // // 6. Trademarks. This License does not grant permission to use the trade // names, trademarks, service marks, or product names of the Licensor // and its affiliates, except as required to comply with Section 4(c) of // the License and to reproduce the content of the NOTICE file. // // You may obtain a copy of the Apache License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the Apache License with the above modification is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the Apache License for the specific // language governing permissions and limitations under the Apache License. // #include "pxr/pxr.h" #include "pxr/usd/pcp/primIndex.h" #include "pxr/usd/pcp/arc.h" #include "pxr/usd/pcp/cache.h" #include "pxr/usd/pcp/composeSite.h" #include "pxr/usd/pcp/debugCodes.h" #include "pxr/usd/pcp/diagnostic.h" #include "pxr/usd/pcp/instancing.h" #include "pxr/usd/pcp/layerStack.h" #include "pxr/usd/pcp/layerStackRegistry.h" #include "pxr/usd/pcp/node_Iterator.h" #include "pxr/usd/pcp/primIndex_Graph.h" #include "pxr/usd/pcp/primIndex_StackFrame.h" #include "pxr/usd/pcp/payloadContext.h" #include "pxr/usd/pcp/payloadDecorator.h" #include "pxr/usd/pcp/statistics.h" #include "pxr/usd/pcp/strengthOrdering.h" #include "pxr/usd/pcp/types.h" #include "pxr/usd/pcp/utils.h" #include "pxr/usd/ar/resolver.h" #include "pxr/usd/ar/resolverContextBinder.h" #include "pxr/usd/sdf/layer.h" #include "pxr/usd/sdf/layerUtils.h" #include "pxr/base/trace/trace.h" #include "pxr/base/tf/debug.h" #include "pxr/base/tf/enum.h" #include "pxr/base/tf/diagnostic.h" #include "pxr/base/tf/envSetting.h" #include "pxr/base/tf/mallocTag.h" #include <boost/functional/hash.hpp> #include <boost/optional.hpp> #include <algorithm> #include <functional> #include <vector> // Un-comment for extra runtime validation. // #define PCP_DIAGNOSTIC_VALIDATION 1 using std::string; using std::vector; PXR_NAMESPACE_OPEN_SCOPE TF_DEFINE_ENV_SETTING( MENV30_ENABLE_NEW_DEFAULT_STANDIN_BEHAVIOR, true, "If enabled then standin preference is weakest opinion."); static inline PcpPrimIndex const * _GetOriginatingIndex(PcpPrimIndex_StackFrame *previousFrame, PcpPrimIndexOutputs *outputs) { return ARCH_UNLIKELY(previousFrame) ? previousFrame->originatingIndex : &outputs->primIndex; } bool PcpIsNewDefaultStandinBehaviorEnabled() { return TfGetEnvSetting(MENV30_ENABLE_NEW_DEFAULT_STANDIN_BEHAVIOR); } //////////////////////////////////////////////////////////////////////// PcpPrimIndex::PcpPrimIndex() { } void PcpPrimIndex::SetGraph(const PcpPrimIndex_GraphRefPtr& graph) { _graph = graph; } PcpPrimIndex_GraphPtr PcpPrimIndex::GetGraph() const { return _graph; } PcpNodeRef PcpPrimIndex::GetRootNode() const { return _graph ? _graph->GetRootNode() : PcpNodeRef(); } const SdfPath& PcpPrimIndex::GetPath() const { return _graph ? _graph->GetRootNode().GetPath() : SdfPath::EmptyPath(); } bool PcpPrimIndex::HasSpecs() const { return !_primStack.empty(); } bool PcpPrimIndex::HasPayload() const { return _graph && _graph->HasPayload(); } bool PcpPrimIndex::IsUsd() const { return _graph && _graph->IsUsd(); } bool PcpPrimIndex::IsInstanceable() const { return _graph && _graph->IsInstanceable(); } PcpPrimIndex::PcpPrimIndex(const PcpPrimIndex &rhs) { _graph = rhs._graph; _primStack = rhs._primStack; if (rhs._localErrors) { _localErrors.reset(new PcpErrorVector(*rhs._localErrors.get())); } } void PcpPrimIndex::Swap(PcpPrimIndex& rhs) { _graph.swap(rhs._graph); _primStack.swap(rhs._primStack); _localErrors.swap(rhs._localErrors); } void PcpPrimIndex::PrintStatistics() const { Pcp_PrintPrimIndexStatistics(*this); } std::string PcpPrimIndex::DumpToString( bool includeInheritOriginInfo, bool includeMaps) const { return PcpDump( *this, includeInheritOriginInfo, includeMaps); } void PcpPrimIndex::DumpToDotGraph( const std::string& filename, bool includeInheritOriginInfo, bool includeMaps) const { PcpDumpDotGraph( *this, filename.c_str(), includeInheritOriginInfo, includeMaps); } PcpNodeRange PcpPrimIndex::GetNodeRange(PcpRangeType rangeType) const { if (!_graph) { return PcpNodeRange(); } const std::pair<size_t, size_t> range = _graph->GetNodeIndexesForRange(rangeType); return PcpNodeRange( PcpNodeIterator(boost::get_pointer(_graph), range.first), PcpNodeIterator(boost::get_pointer(_graph), range.second)); } PcpPrimRange PcpPrimIndex::GetPrimRange(PcpRangeType rangeType) const { if (!_graph) { return PcpPrimRange(); } // Early out for common case of retrieving entire prim range. if (rangeType == PcpRangeTypeAll) { return PcpPrimRange( PcpPrimIterator(this, 0), PcpPrimIterator(this, _primStack.size())); } const std::pair<size_t, size_t> range = _graph->GetNodeIndexesForRange(rangeType); const size_t startNodeIdx = range.first; const size_t endNodeIdx = range.second; for (size_t startPrimIdx = 0; startPrimIdx < _primStack.size(); ++startPrimIdx) { const Pcp_CompressedSdSite& startPrim = _primStack[startPrimIdx]; if (startPrim.nodeIndex >= startNodeIdx && startPrim.nodeIndex < endNodeIdx) { size_t endPrimIdx = startPrimIdx + 1; for (; endPrimIdx < _primStack.size(); ++endPrimIdx) { const Pcp_CompressedSdSite& endPrim = _primStack[endPrimIdx]; if (endPrim.nodeIndex >= endNodeIdx) { break; } } return PcpPrimRange( PcpPrimIterator(this, startPrimIdx), PcpPrimIterator(this, endPrimIdx)); } } return PcpPrimRange(PcpPrimIterator(this, _primStack.size()), PcpPrimIterator(this, _primStack.size())); } PcpPrimRange PcpPrimIndex::GetPrimRangeForNode(const PcpNodeRef& node) const { PcpPrimIterator firstIt(this, 0); PcpPrimIterator endIt(this, _primStack.size()); // XXX: optimization // This is slow, but the prim index doesn't provide us any faster // way to associate a node with prims in the prim stack. We may need // to store indices into the prim stack with each node, similar to // Csd_NamespaceExcerpt and Csd_PrimCache. while (firstIt != endIt && firstIt.GetNode() != node) { ++firstIt; } if (firstIt == endIt) { return PcpPrimRange(); } PcpPrimIterator lastIt = firstIt; while (++lastIt != endIt && lastIt.GetNode() == node) { // Do nothing } return PcpPrimRange(firstIt, lastIt); } PcpNodeRef PcpPrimIndex::GetNodeProvidingSpec(const SdfPrimSpecHandle& primSpec) const { return GetNodeProvidingSpec(primSpec->GetLayer(), primSpec->GetPath()); } PcpNodeRef PcpPrimIndex::GetNodeProvidingSpec( const SdfLayerHandle& layer, const SdfPath& path) const { for (const PcpNodeRef &node: GetNodeRange()) { // If the site has the given path and contributes specs then // search for the layer. if (node.CanContributeSpecs() && node.GetPath() == path && node.GetLayerStack()->HasLayer(layer)) { return node; } } return PcpNodeRef(); } SdfVariantSelectionMap PcpPrimIndex::ComposeAuthoredVariantSelections() const { TRACE_FUNCTION(); // Collect the selections according to the prim stack. SdfVariantSelectionMap result; const TfToken field = SdfFieldKeys->VariantSelection; TF_FOR_ALL(i, GetPrimRange()) { Pcp_SdSiteRef site = i.base()._GetSiteRef(); const VtValue& value = site.layer->GetField(site.path, field); if (value.IsHolding<SdfVariantSelectionMap>()) { const SdfVariantSelectionMap & vselMap = value.UncheckedGet<SdfVariantSelectionMap>(); result.insert(vselMap.begin(), vselMap.end()); } } return result; } std::string PcpPrimIndex::GetSelectionAppliedForVariantSet( const std::string &variantSet) const { for (const PcpNodeRef &node: GetNodeRange()) { if (node.GetPath().IsPrimVariantSelectionPath()) { std::pair<std::string, std::string> vsel = node.GetPath().GetVariantSelection(); if (vsel.first == variantSet) return vsel.second; } } return std::string(); } //////////////////////////////////////////////////////////////////////// template <class T> static bool _CheckIfEquivalent(const T* lhsPtr, const T* rhsPtr) { if (lhsPtr == rhsPtr) { return true; } static const T empty; const T& lhs = (lhsPtr ? *lhsPtr : empty); const T& rhs = (rhsPtr ? *rhsPtr : empty); return lhs == rhs; } bool PcpPrimIndexInputs::IsEquivalentTo(const PcpPrimIndexInputs& inputs) const { // Don't consider the PcpCache when determining equivalence, as // prim index computation is independent of the cache. return _CheckIfEquivalent(variantFallbacks, inputs.variantFallbacks) && _CheckIfEquivalent(includedPayloads, inputs.includedPayloads) && cull == inputs.cull; } //////////////////////////////////////////////////////////////////////// static void Pcp_BuildPrimIndex( const PcpLayerStackSite & site, const PcpLayerStackSite & rootSite, int ancestorRecursionDepth, bool evaluateImpliedSpecializes, bool evaluateVariants, bool directNodeShouldContributeSpecs, PcpPrimIndex_StackFrame *previousFrame, const PcpPrimIndexInputs& inputs, PcpPrimIndexOutputs* outputs); static inline bool _NodeCanBeCulled(const PcpNodeRef& node, const PcpLayerStackSite& rootLayerStack); static void _GatherNodesRecursively(const PcpNodeRef& node, std::vector<PcpNodeRef> *result); static bool _HasSpecializesChild(const PcpNodeRef & parent) { TF_FOR_ALL(child, Pcp_GetChildrenRange(parent)) { if (PcpIsSpecializesArc((*child).GetArcType())) return true; } return false; } // The implied specializes algorithm wants to start at the // most ancestral parent of the given node that is a specializes // arc, if such a node exists. static PcpNodeRef _FindStartingNodeForImpliedSpecializes(const PcpNodeRef& node) { PcpNodeRef specializesNode; for (PcpNodeRef n = node, e = n.GetRootNode(); n != e; n = n.GetParentNode()) { if (PcpIsSpecializesArc(n.GetArcType())) { specializesNode = n; } } return specializesNode; } static bool _HasClassBasedChild(const PcpNodeRef & parent) { TF_FOR_ALL(child, Pcp_GetChildrenRange(parent)) { if (PcpIsClassBasedArc((*child).GetArcType())) return true; } return false; } // Find the starting node of the class hierarchy of which node n is a part. // This is the prim that starts the class chain, aka the 'instance' of the // class hierarchy. Also returns the node for the first class in the // chain that the instance inherits opinions from. // // For example, consider an inherits chain like this: I --> C1 --> C2 --> C3. // When given either C1, C2, or C3, this method will return (I, C1). // What will it do when given I? Keep reading. // // One tricky aspect is that we need to distinguish nested class // hierarchies at different levels of namespace, aka ancestral classes. // Returning to the example above, consider if I -> ... -> C3 were all // nested as sibling children under a global class, G, with instance M: // // inherits // M ------------------------> G (depth=1) // | | // +- I (depth=1) +- I (depth=1) // | : | : // | : inherits | : inherits // | v | v // +- C1 (depth=2) +- C1 (depth=2) // | : | : // | : inherits | : inherits // | v | v // +- C2 (depth=2) +- C2 (depth=2) // | : | : // | : inherits | : inherits // | v | v // +- C3 (depth=2) +- C3 (depth=2) // // Asking for the starting node of M/C1 .. M/C3 should all return (M/I, M/C1). // Asking for the starting node of G/C1 .. G/C3 should all return (G/I, G/C1). // // However, asking for the starting node of G/I should return (M/I, G/I), // because it is walking up the ancestral classes (M->G) instead. // // We distinguish ancestral class chains by considering, for the // nodes being examined, how far they are below the point in namespace // where they were introduced, using GetDepthBelowIntroduction(). // This lets us distinguish the hierarchy connecting the children // G/C1, G/C2, and G/C3 (all at depth=2) from the ancestral hierarchy // connecting G/I to M/I, which was introduced at depth=1 and thus up // one level of ancestry. // // Note that this approach also handles a chain of classes that // happen to live at different levels of namespace but which are not // ancestrally connected to one another. For example, consider if C2 // was tucked under a parent scope D: // // inherits // M ------------------------> G // | | // +- I (depth=1) +- I (depth=1) // | : | : // | : inherits | : inherits // | v | v // +- C1 (depth=2) +- C1 (depth=2) // | : | : // +- D : inherits +- D : inherits // | | v | | v // | +- C2 (depth=3) | +- C2 (depth=3) // | : | : // | : inherits | : inherits // | v | v // +- C3 (depth=2) +- C3 (depth=2) // // Here, G/C1, G/D/C2, and G/C3 are all still identified as part of // the same hierarchy. C1 and C3 are at depth=2 and have 2 path // components; C2 is at depth=3 and has 3 path components. Thus, // they all have the same GetDepthBelowIntroduction(). // static std::pair<PcpNodeRef, PcpNodeRef> _FindStartingNodeOfClassHierarchy(const PcpNodeRef& n) { TF_VERIFY(PcpIsClassBasedArc(n.GetArcType())); const int depth = n.GetDepthBelowIntroduction(); PcpNodeRef instanceNode = n; PcpNodeRef classNode; while (PcpIsClassBasedArc(instanceNode.GetArcType()) && instanceNode.GetDepthBelowIntroduction() == depth) { TF_VERIFY(instanceNode.GetParentNode()); classNode = instanceNode; instanceNode = instanceNode.GetParentNode(); } return std::make_pair(instanceNode, classNode); } // Given class-based node n, returns the 'starting' node where implied class // processing should begin in order to correctly propagate n through the // graph. // // The starting node will generally be the starting node of the class hierarchy // that n is a part of. For instance, in the simple case: // // inh inh inh // I ---> C1 ---> C2 ---> C3 ... // // Given any of { C1, C2, C3, ... }, the starting node would be I // (See _FindStartingNodeOfClassHierarchy). This causes the entire class // hierarchy to be propagated as a unit. If we were to propagate each class // individually, it would be as if I inherited directly from C1, C2, and C3, // which is incorrect. // // This gets more complicated when ancestral classes are involved. Basically, // when a class-based node is added, we have to take into account the location // of that node's site relative to the ancestral class to determine where to // start from. // // Consider the prim /M/I/A in the following example: // // reference // M --------------------------> R // | | // +- CA <----+ implied inh. +- CA <----+ inherit // | | | | // +- C1 <----|--+ implied inh. +- C1 <----|--+ inherit // | | | | | | | | // | +- A ---+ | | +- A ---+ | // | | | | // +- I ---------+ +- I ---------+ // | | // +- A +- A // // /M/I/A inherits opinions from /M/C1/A due to the ancestral inherit arc // between /M/I and /M/C1. Then, /M/C1/A inherits opinions from /M/CA. // However, /M/I/A does NOT explicitly inherit opinions from /M/CA. If it did, // opinions from /M/CA would show up twice. // // To ensure /M/I/A does not explicitly inherit from /M/CA, when /R/CA is added // the chain of inherit nodes: inh inh // /R/I/A ---> /R/C1/A ---> /R/CA // // Must be propagated as a single unit, even though it does not form a single // class hierarchy. So, the starting node would be /R/I/A. // // Contrast that with this case: // // reference // M --------------------------> R // | | // +- C1 <------------+ implied +- C1 <------------+ inherit // | | | inh. | | | // | +- CA <-+ impl. | | +- CA <-+ inh. | // | | | inh. | | | | | // | +- A ---+ | | +- A ---+ | // | | | | // +- I --------------+ +- I --------------+ // | | // +- CA <-+ +- CA <-+ // | | implied inh. | | implied inh. // +- A ---+ +- A ---+ // // In this case, we do expect /M/I/A to explicitly inherit from /M/I/CA. // When /R/C1/CA is added, the chain: inh inh // /R/I/A ---> /R/C1/A ---> /R/C1/CA // // Must be propagated as a single unit (Note that this *is* a class hierarchy). // So, the starting node would be /R/I/A. // // This (deceivingly simple) function accounts for all this. // These variations are captured in the TrickyNestedClasses museum cases. static PcpNodeRef _FindStartingNodeForImpliedClasses(const PcpNodeRef& n) { TF_VERIFY(PcpIsClassBasedArc(n.GetArcType())); PcpNodeRef startNode = n; while (PcpIsClassBasedArc(startNode.GetArcType())) { const std::pair<PcpNodeRef, PcpNodeRef> instanceAndClass = _FindStartingNodeOfClassHierarchy(startNode); const PcpNodeRef& instanceNode = instanceAndClass.first; const PcpNodeRef& classNode = instanceAndClass.second; startNode = instanceNode; // If the instance that inherits the class hierarchy is itself // a class-based node, there must be an ancestral inherit arc which // we need to consider. If the class being inherited from is a // namespace child of the ancestral class (the second case shown // above), we're done. Otherwise, we'll iterate again to find the // start of the ancestral class hierarchy. if (PcpIsClassBasedArc(instanceNode.GetArcType())) { const SdfPath ancestralClassPath = instanceNode.GetPathAtIntroduction(); const bool classHierarchyIsChildOfAncestralHierarchy = classNode.GetPath().HasPrefix(ancestralClassPath); if (classHierarchyIsChildOfAncestralHierarchy) { break; } } } return startNode; } // This is a convenience function to create a map expression // that maps a given source path to a target node, composing in // relocations and layer offsets if any exist. static PcpMapExpression _CreateMapExpressionForArc(const SdfPath &sourcePath, const PcpNodeRef &targetNode, const PcpPrimIndexInputs &inputs, const SdfLayerOffset &offset = SdfLayerOffset()) { const SdfPath targetPath = targetNode.GetPath().StripAllVariantSelections(); PcpMapFunction::PathMap sourceToTargetMap; sourceToTargetMap[sourcePath] = targetPath; PcpMapExpression arcExpr = PcpMapExpression::Constant( PcpMapFunction::Create( sourceToTargetMap, offset ) ); // Apply relocations that affect namespace at and below this site. if (!inputs.usd) { arcExpr = targetNode.GetLayerStack() ->GetExpressionForRelocatesAtPath(targetPath) .Compose(arcExpr); } return arcExpr; } //////////////////////////////////////////////////////////////////////// namespace { /// A task to perform on a particular node. struct Task { /// This enum must be in evaluation priority order. enum Type { EvalNodeRelocations, EvalImpliedRelocations, EvalNodeReferences, EvalNodePayload, EvalNodeInherits, EvalImpliedClasses, EvalNodeSpecializes, EvalImpliedSpecializes, EvalNodeVariantSets, EvalNodeVariantAuthored, EvalNodeVariantFallback, EvalNodeVariantNoneFound, None }; // This sorts tasks in priority order from lowest priority to highest // priority, so highest priority tasks come last. struct PriorityOrder { inline bool operator()(const Task& a, const Task& b) const { if (a.type != b.type) { return a.type > b.type; } // Node strength order is costly to compute, so avoid it for // arcs with order-independent results. switch (a.type) { case EvalNodePayload: if (_hasPayloadDecorator) { // Payload decorators can depend on non-local information, // so we must process these in strength order. return PcpCompareNodeStrength(a.node, b.node) == 1; } else { // Arbitrary order return a.node > b.node; } case EvalNodeVariantAuthored: case EvalNodeVariantFallback: // Variant selections can depend on non-local information // so we must visit them in strength order. if (a.node != b.node) { return PcpCompareNodeStrength(a.node, b.node) == 1; } else { // Lower-number vsets have strength priority. return a.vsetNum > b.vsetNum; } case EvalNodeVariantNoneFound: // In the none-found case, we only need to ensure a consistent // and distinct order for distinct tasks, the specific order can // be arbitrary. if (a.node != b.node) { return a.node > b.node; } else { return a.vsetNum > b.vsetNum; } default: // Arbitrary order return a.node > b.node; } } // We can use a slightly cheaper ordering for payload arcs // when there is no payload decorator. const bool _hasPayloadDecorator; PriorityOrder(bool hasPayloadDecorator) : _hasPayloadDecorator(hasPayloadDecorator) {} }; explicit Task(Type type, const PcpNodeRef& node = PcpNodeRef()) : type(type) , node(node) , vsetNum(0) { } Task(Type type, const PcpNodeRef& node, std::string &&vsetName, int vsetNum) : type(type) , node(node) , vsetName(std::move(vsetName)) , vsetNum(vsetNum) { } Task(Type type, const PcpNodeRef& node, std::string const &vsetName, int vsetNum) : type(type) , node(node) , vsetName(vsetName) , vsetNum(vsetNum) { } inline bool operator==(Task const &rhs) const { return type == rhs.type && node == rhs.node && vsetName == rhs.vsetName && vsetNum == rhs.vsetNum; } inline bool operator!=(Task const &rhs) const { return !(*this == rhs); } friend void swap(Task &lhs, Task &rhs) { std::swap(lhs.type, rhs.type); std::swap(lhs.node, rhs.node); lhs.vsetName.swap(rhs.vsetName); std::swap(lhs.vsetNum, rhs.vsetNum); } // Stream insertion operator for debugging. friend std::ostream &operator<<(std::ostream &os, Task const &task) { os << TfStringPrintf( "Task(type=%s, nodePath=<%s>, nodeSite=<%s>", TfEnum::GetName(task.type).c_str(), task.node.GetPath().GetText(), TfStringify(task.node.GetSite()).c_str()); if (task.vsetName) { os << TfStringPrintf(", vsetName=%s, vsetNum=%d", task.vsetName->c_str(), task.vsetNum); } return os << ")"; } Type type; PcpNodeRef node; // only for variant tasks: boost::optional<std::string> vsetName; int vsetNum; }; } TF_REGISTRY_FUNCTION(TfEnum) { TF_ADD_ENUM_NAME(Task::EvalNodeRelocations); TF_ADD_ENUM_NAME(Task::EvalImpliedRelocations); TF_ADD_ENUM_NAME(Task::EvalNodeReferences); TF_ADD_ENUM_NAME(Task::EvalNodePayload); TF_ADD_ENUM_NAME(Task::EvalNodeInherits); TF_ADD_ENUM_NAME(Task::EvalImpliedClasses); TF_ADD_ENUM_NAME(Task::EvalNodeSpecializes); TF_ADD_ENUM_NAME(Task::EvalImpliedSpecializes); TF_ADD_ENUM_NAME(Task::EvalNodeVariantSets); TF_ADD_ENUM_NAME(Task::EvalNodeVariantAuthored); TF_ADD_ENUM_NAME(Task::EvalNodeVariantFallback); TF_ADD_ENUM_NAME(Task::EvalNodeVariantNoneFound); TF_ADD_ENUM_NAME(Task::None); } // Pcp_PrimIndexer is used during prim cache population to track which // tasks remain to finish building the graph. As new nodes are added, // we add task entries to this structure, which ensures that we // process them in an appropriate order. // // This is the high-level control logic for the population algorithm. // At each step, it determines what will happen next. // // Notes on the algorithm: // // - We can process inherits, and implied inherits in any order // any order, as long as we finish them before moving on to // deciding references and variants. This is because evaluating any // arcs of the former group does not affect how we evaluate other arcs // of that group -- but they do affect how we evaluate references, // variants and payloads. Specifically, they may introduce information // needed to evaluate references, opinions with variants selections, // or overrides to the payload target path. // // It is important to complete evaluation of the former group // before proceeding to references/variants/payloads so that we gather // as much information as available before deciding those arcs. // // - We only want to process a payload when there is nothing else // left to do. Again, this is to ensure that we have discovered // any opinions which may affect the payload arc, including // those inside variants. // // - At each step, we may introduce a new node that returns us // to an earlier stage of the algorithm. For example, a payload // may introduce nodes that contain references, inherits, etc. // We need to process them to completion before we return to // check variants, and so on. // struct Pcp_PrimIndexer { // The root site for the prim indexing process. const PcpLayerStackSite rootSite; // Total depth of ancestral recursion. const int ancestorRecursionDepth; // Context for the prim index we are building. const PcpPrimIndexInputs &inputs; PcpPrimIndexOutputs* const outputs; // The previousFrame tracks information across recursive invocations // of Pcp_BuildPrimIndex() so that recursive indexes can query // outer indexes. This is used for cycle detection as well as // composing the variant selection. PcpPrimIndex_StackFrame* const previousFrame; // Open tasks, in priority order using _TaskQueue = std::vector<Task>; _TaskQueue tasks; const bool evaluateImpliedSpecializes; const bool evaluateVariants; #ifdef PCP_DIAGNOSTIC_VALIDATION /// Diagnostic helper to make sure we don't revisit sites. PcpNodeRefHashSet seen; #endif // PCP_DIAGNOSTIC_VALIDATION Pcp_PrimIndexer(PcpPrimIndexInputs const &inputs_, PcpPrimIndexOutputs *outputs_, PcpLayerStackSite rootSite_, int ancestorRecursionDepth_, PcpPrimIndex_StackFrame *previousFrame_=nullptr, bool evaluateImpliedSpecializes_=true, bool evaluateVariants_=true) : rootSite(rootSite_) , ancestorRecursionDepth(ancestorRecursionDepth_) , inputs(inputs_) , outputs(outputs_) , previousFrame(previousFrame_) , evaluateImpliedSpecializes(evaluateImpliedSpecializes_) , evaluateVariants(evaluateVariants_) { } inline PcpPrimIndex const *GetOriginatingIndex() const { return _GetOriginatingIndex(previousFrame, outputs); } void AddTask(Task &&task) { Task::PriorityOrder comp(inputs.payloadDecorator); auto iter = std::lower_bound(tasks.begin(), tasks.end(), task, comp); if (iter == tasks.end() || *iter != task) { tasks.insert(iter, std::move(task)); } } // Select the next task to perform. Task PopTask() { Task task(Task::Type::None); if (!tasks.empty()) { task = std::move(tasks.back()); tasks.pop_back(); } return task; } // Add this node and its children to the task queues. void _AddTasksForNodeRecursively( const PcpNodeRef& n, bool skipCompletedNodesForAncestralOpinions, bool skipCompletedNodesForImpliedSpecializes, bool isUsd) { #ifdef PCP_DIAGNOSTIC_VALIDATION TF_VERIFY(seen.count(n) == 0, "Already processed <%s>", n.GetPath().GetText()); seen.insert(n); #endif // PCP_DIAGNOSTIC_VALIDATION TF_FOR_ALL(child, Pcp_GetChildrenRange(n)) { _AddTasksForNodeRecursively( *child, skipCompletedNodesForAncestralOpinions, skipCompletedNodesForImpliedSpecializes, isUsd); } // If the node does not have specs or cannot contribute specs, // we can avoid even enqueueing certain kinds of tasks that will // end up being no-ops. bool contributesSpecs = n.HasSpecs() && n.CanContributeSpecs(); // If the caller tells us the new node and its children were already // indexed, we do not need to re-scan them for certain arcs based on // what was already completed. if (skipCompletedNodesForImpliedSpecializes) { // In this case, we only need to add tasks that come after // implied specializes. if (contributesSpecs) { if (evaluateVariants) { AddTask(Task(Task::Type::EvalNodeVariantSets, n)); } } } else { if (!skipCompletedNodesForAncestralOpinions) { // In this case, we only need to add tasks that weren't // evaluated during the recursive prim indexing for // ancestral opinions. if (contributesSpecs) { AddTask(Task(Task::Type::EvalNodeInherits, n)); AddTask(Task(Task::Type::EvalNodeSpecializes, n)); AddTask(Task(Task::Type::EvalNodeReferences, n)); AddTask(Task(Task::Type::EvalNodePayload, n)); } if (!isUsd) { AddTask(Task(Task::Type::EvalNodeRelocations, n)); } } if (contributesSpecs) { if (evaluateVariants) { AddTask(Task(Task::Type::EvalNodeVariantSets, n)); } } if (!isUsd && n.GetArcType() == PcpArcTypeRelocate) { AddTask(Task(Task::Type::EvalImpliedRelocations, n)); } } } void AddTasksForNode( const PcpNodeRef& n, bool skipCompletedNodesForAncestralOpinions = false, bool skipCompletedNodesForImpliedSpecializes = false) { // Any time we add an edge to the graph, we may need to update // implied class edges. if (!skipCompletedNodesForImpliedSpecializes) { if (PcpIsClassBasedArc(n.GetArcType())) { // The new node is itself class-based. Find the starting // prim of the chain of classes the node is a part of, and // propagate the entire chain as a single unit. if (PcpNodeRef base = _FindStartingNodeForImpliedClasses(n)) { AddTask(Task(Task::Type::EvalImpliedClasses, base)); } } else if (_HasClassBasedChild(n)) { // The new node is not class-based -- but it has class-based // children. Such children represent inherits found during the // recursive computation of the node's subgraph. We need to // pick them up and continue propagating them now that we are // merging the subgraph into the parent graph. AddTask(Task(Task::Type::EvalImpliedClasses, n)); } if (evaluateImpliedSpecializes) { if (PcpNodeRef base = _FindStartingNodeForImpliedSpecializes(n)) { // We're adding a new specializes node or a node beneath // a specializes node. Add a task to propagate the subgraph // beneath this node to the appropriate location. AddTask(Task(Task::Type::EvalImpliedSpecializes, base)); } else if (_HasSpecializesChild(n)) { // The new node is not a specializes node or beneath a // specializes node, but has specializes children. // Such children represent arcs found during the recursive // computation of the node's subgraph. We need to pick them // up and continue propagating them now that we are // merging the subgraph into the parent graph. AddTask(Task(Task::Type::EvalImpliedSpecializes, n)); } } } // Recurse over all of the rest of the nodes. (We assume that any // embedded class hierarchies have already been propagated to // the top node n, letting us avoid redundant work.) _AddTasksForNodeRecursively( n, skipCompletedNodesForAncestralOpinions, skipCompletedNodesForImpliedSpecializes, inputs.usd); _DebugPrintTasks("After AddTasksForNode"); } inline void _DebugPrintTasks(char const *label) const { #if 0 printf("-- %s ----------------\n", label); for (auto iter = tasks.rbegin(); iter != tasks.rend(); ++iter) { printf("%s\n", TfStringify(*iter).c_str()); } printf("----------------\n"); #endif } // Retry any variant sets that previously failed to find an authored // selection to take into account newly-discovered opinions. // EvalNodeVariantNoneFound is a placeholder representing variants // that were previously visited and yielded no variant; it exists // solely for this function to be able to find and retry them. void RetryVariantTasks() { // Optimization: We know variant tasks are the lowest priority, and // therefore sorted to the front of this container. We promote the // leading non-authored variant tasks to authored tasks, then merge them // with any existing authored tasks. auto nonAuthVariantsEnd = std::find_if_not( tasks.begin(), tasks.end(), [](Task const &t) { return t.type == Task::Type::EvalNodeVariantFallback || t.type == Task::Type::EvalNodeVariantNoneFound; }); if (nonAuthVariantsEnd == tasks.begin()) { // No variant tasks present. return; } auto authVariantsEnd = std::find_if_not( nonAuthVariantsEnd, tasks.end(), [](Task const &t) { return t.type == Task::Type::EvalNodeVariantAuthored; }); // Now we've split tasks into three ranges: // non-authored variant tasks : [begin, nonAuthVariantsEnd) // authored variant tasks : [nonAuthVariantsEnd, authVariantsEnd) // other tasks : [authVariantsEnd, end) // // We want to change the non-authored variant tasks' types to be // authored instead, and then sort them in with the othered authored // tasks. // Change types. std::for_each(tasks.begin(), nonAuthVariantsEnd, [](Task &t) { t.type = Task::Type::EvalNodeVariantAuthored; }); // Sort and merge. Task::PriorityOrder comp(inputs.payloadDecorator); std::sort(tasks.begin(), nonAuthVariantsEnd, comp); std::inplace_merge( tasks.begin(), nonAuthVariantsEnd, authVariantsEnd, comp); // XXX Is it possible to have dupes here? blevin? tasks.erase( std::unique(tasks.begin(), authVariantsEnd), authVariantsEnd); #ifdef PCP_DIAGNOSTIC_VALIDATION TF_VERIFY(std::is_sorted(tasks.begin(), tasks.end(), comp)); #endif // PCP_DIAGNOSTIC_VALIDATION _DebugPrintTasks("After RetryVariantTasks"); } // Convenience function to record an error both in this primIndex's // local errors vector and the allErrors vector. void RecordError(const PcpErrorBasePtr &err) { RecordError(err, &outputs->primIndex, &outputs->allErrors); } // Convenience function to record an error both in this primIndex's // local errors vector and the allErrors vector. static void RecordError(const PcpErrorBasePtr &err, PcpPrimIndex *primIndex, PcpErrorVector *allErrors) { allErrors->push_back(err); if (!primIndex->_localErrors) { primIndex->_localErrors.reset(new PcpErrorVector); } primIndex->_localErrors->push_back(err); } }; // Returns true if there is a prim spec associated with the specified node // or any of its descendants. static bool _PrimSpecExistsUnderNode( const PcpNodeRef &node, Pcp_PrimIndexer *indexer) { // Check for prim specs at this node's site. if (node.HasSpecs()) return true; // Recursively check this node's children. TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { if (_PrimSpecExistsUnderNode(*child, indexer)) return true; } return false; } // Mark an entire subtree of nodes as inert. static void _InertSubtree( PcpNodeRef node) { node.SetInert(true); TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { _InertSubtree(*child); } } inline static bool _HasAncestorCycle( const PcpLayerStackSite& parentNodeSite, const PcpLayerStackSite& childNodeSite ) { if (parentNodeSite.layerStack != childNodeSite.layerStack) return false; if (parentNodeSite.path.HasPrefix(childNodeSite.path)) return true; if (childNodeSite.path.HasPrefix(parentNodeSite.path)) { if (childNodeSite.path.IsPrimVariantSelectionPath()) { // Variant selection arcs do not represent cycles, because // we do not look for ancestral opinions above variant // selection sites. See Pcp_BuildPrimIndex. return false; } return true; } return false; } static bool _IsImpliedClassBasedArc( PcpArcType arcType, const PcpNodeRef &parent, const PcpNodeRef &origin) { return PcpIsClassBasedArc(arcType) && parent != origin; } static bool _IsImpliedClassBasedArc(const PcpNodeRef& node) { return _IsImpliedClassBasedArc( node.GetArcType(), node.GetParentNode(), node.GetOriginNode()); } // Check that no cycles are being introduced by adding this arc. static PcpErrorArcCyclePtr _CheckForCycle( const PcpNodeRef &parent, const PcpNodeRef &origin, PcpArcType arcType, const PcpLayerStackSite &childSite, PcpPrimIndex_StackFrame *previousFrame ) { // XXX:RelocatesSourceNodes: Don't check for cycles in placeholder // implied class nodes under relocates. These children of Relocates // nodes can yield invalid sites, because the arc will include // the effect of relocations but the Relocates node is the source // path. In this case, we won't be adding opinions anyway, so we // don't need to check for cycles. if (_IsImpliedClassBasedArc(arcType, parent, origin)) { // Skip across parent class arcs. PcpPrimIndex_StackFrameIterator j(parent, previousFrame); while (j.node && _IsImpliedClassBasedArc(j.GetArcType(), parent, origin)) { j.Next(); } if (j.node && j.GetArcType() == PcpArcTypeRelocate) { // This is a class arc under a relocate. // Do not count this as a cycle. return PcpErrorArcCyclePtr(); } } // We compare the targeted site to each previously-visited site: bool foundCycle = false; for (PcpPrimIndex_StackFrameIterator i(parent, previousFrame); i.node; i.Next()) { if (_HasAncestorCycle(i.node.GetSite(), childSite)) { foundCycle = true; } } if (foundCycle) { PcpErrorArcCyclePtr err = PcpErrorArcCycle::New(); // Traverse the parent chain to build a list of participating arcs. PcpSiteTrackerSegment seg; for (PcpPrimIndex_StackFrameIterator i(parent, previousFrame); i.node; i.Next()) { seg.site = i.node.GetSite(); seg.arcType = i.GetArcType(); err->cycle.push_back(seg); } // Reverse the list to order arcs from root to leaf. std::reverse(err->cycle.begin(), err->cycle.end()); // Retain the root site. err->rootSite = PcpSite(err->cycle.front().site); // There is no node for the last site in the chain, so report it // directly. seg.site = childSite; seg.arcType = arcType; err->cycle.push_back(seg); return err; } return PcpErrorArcCyclePtr(); } // Add an arc of the given type from the parent node to the child site, // and track any new tasks that result. Return the new node. // // If includeAncestralOpinions is specified, recursively build and // include the ancestral opinions that would affect the new site. // static PcpNodeRef _AddArc( const PcpArcType arcType, PcpNodeRef parent, PcpNodeRef origin, const PcpLayerStackSite & site, PcpMapExpression mapExpr, int arcSiblingNum, int namespaceDepth, bool directNodeShouldContributeSpecs, bool includeAncestralOpinions, bool requirePrimAtTarget, bool skipDuplicateNodes, bool skipImpliedSpecializesCompletedNodes, Pcp_PrimIndexer *indexer ) { PCP_INDEXING_PHASE( indexer, parent, "Adding new %s arc to %s to %s", TfEnum::GetDisplayName(arcType).c_str(), Pcp_FormatSite(site).c_str(), Pcp_FormatSite(parent.GetSite()).c_str()); PCP_INDEXING_MSG( indexer, parent, "origin: %s\n" "arcSiblingNum: %d\n" "namespaceDepth: %d\n" "directNodeShouldContributeSpecs: %s\n" "includeAncestralOpinions: %s\n" "requirePrimAtTarget: %s\n" "skipDuplicateNodes: %s\n" "skipImpliedSpecializesCompletedNodes: %s\n\n", origin ? Pcp_FormatSite(origin.GetSite()).c_str() : "<None>", arcSiblingNum, namespaceDepth, directNodeShouldContributeSpecs ? "true" : "false", includeAncestralOpinions ? "true" : "false", requirePrimAtTarget ? "true" : "false", skipDuplicateNodes ? "true" : "false", skipImpliedSpecializesCompletedNodes ? "true" : "false"); if (!TF_VERIFY(!mapExpr.IsNull())) { return PcpNodeRef(); } // Check for cycles. If found, report an error and bail. if (PcpErrorArcCyclePtr err = _CheckForCycle(parent, origin, arcType, site, indexer->previousFrame)) { indexer->RecordError(err); return PcpNodeRef(); } // We (may) want to determine whether adding this arc would cause the // final prim index to have nodes with the same site. If so, we need to // skip over it, as adding the arc would cause duplicate opinions in the // final prim index. // // This is tricky -- we need to search the current graph being built as // well as those in the previous recursive calls to Pcp_BuildPrimIndex. if (indexer->previousFrame) { skipDuplicateNodes |= indexer->previousFrame->skipDuplicateNodes; } if (skipDuplicateNodes) { PcpLayerStackSite siteToAddInCurrentGraph = site; bool foundDuplicateNode = false; for (PcpPrimIndex_StackFrameIterator it(parent, indexer->previousFrame); it.node; it.NextFrame()) { PcpPrimIndex_GraphPtr currentGraph = it.node.GetOwningGraph(); if (currentGraph->GetNodeUsingSite(siteToAddInCurrentGraph)) { foundDuplicateNode = true; break; } // The graph in the previous stack frame may be at a different // level of namespace than the current graph. In order to search // it for this new node's site, we have to figure out what this // node's site would be once it was added to the previous graph. // Let's say we're in a recursive call to Pcp_BuildPrimIndex for // prim /A/B, and that we're processing ancestral opinions for /A. // In doing so, we're adding an arc to site /C. That would be: // // - requestedPathForCurrentGraph = /A/B // currentPathForCurrentGraph = /A // siteToAddInCurrentGraph.path = /C // // When the recursive call to Pcp_BuildPrimIndex is all done, // the arc to site /C will have become /C/B. This is the path // we need to use to search the graph in the previous frame. We // compute this path using a simple prefix replacement. if (it.previousFrame) { const SdfPath& requestedPathForCurrentGraph = it.previousFrame->requestedSite.path; const SdfPath& currentPathForCurrentGraph = currentGraph->GetRootNode().GetPath(); siteToAddInCurrentGraph.path = requestedPathForCurrentGraph.ReplacePrefix( currentPathForCurrentGraph, siteToAddInCurrentGraph.path); } } if (foundDuplicateNode) { return PcpNodeRef(); } } // Local opinions are not allowed at the source of a relocation (or below). // This is colloquially known as the "salted earth" policy. We enforce // this policy here to ensure we examine all arcs as they're being added. // Optimizations: // - We only need to do this for non-root prims because root prims can't // be relocated. This is indicated by the includeAncestralOpinions flag. if (directNodeShouldContributeSpecs && includeAncestralOpinions) { const SdfRelocatesMap & layerStackRelocates = site.layerStack->GetRelocatesSourceToTarget(); SdfRelocatesMap::const_iterator i = layerStackRelocates.lower_bound( site.path ); if (i != layerStackRelocates.end() && i->first.HasPrefix(site.path)) { directNodeShouldContributeSpecs = false; } } // Set up the arc. PcpArc newArc; newArc.type = arcType; newArc.mapToParent = mapExpr; newArc.parent = parent; newArc.origin = origin; newArc.namespaceDepth = namespaceDepth; newArc.siblingNumAtOrigin = arcSiblingNum; // Create the new node. PcpNodeRef newNode; if (!includeAncestralOpinions) { // No ancestral opinions. Just add the single new site. newNode = parent.InsertChild(site, newArc); newNode.SetInert(!directNodeShouldContributeSpecs); // Compose the existence of primSpecs and update the HasSpecs field // accordingly. newNode.SetHasSpecs(PcpComposeSiteHasPrimSpecs(newNode)); if (!newNode.IsInert() && newNode.HasSpecs()) { if (!indexer->inputs.usd) { // Determine whether opinions from this site can be accessed // from other sites in the graph. newNode.SetPermission(PcpComposeSitePermission( site.layerStack, site.path)); // Determine whether this node has any symmetry information. newNode.SetHasSymmetry(PcpComposeSiteHasSymmetry( site.layerStack, site.path)); } } PCP_INDEXING_UPDATE( indexer, newNode, "Added new node for site %s to graph", TfStringify(site).c_str()); } else { // Ancestral opinions are those above the source site in namespace. // We only need to account for them if the site is not a root prim // (since root prims have no ancestors with scene description, only // the pseudo-root). This is why we do not need to handle ancestral // opinions for references, payloads, or global classes: they are // all restricted to root prims. // // Account for ancestral opinions by building out the graph for // that site and incorporating its root node as the new child. PCP_INDEXING_MSG( indexer, parent, "Need to build index for %s source at %s to " "pick up ancestral opinions", TfEnum::GetDisplayName(arcType).c_str(), Pcp_FormatSite(site).c_str()); // We don't want to evaluate implied specializes immediately when // building the index for this source site. Instead, we'll add // tasks to do this after we have merged the source index into // the final index. This allows any specializes arcs in the source // index to be propagated to the root of the graph for the correct // strength ordering. const bool evaluateImpliedSpecializes = false; // We don't want to evaluate variants immediately when building // the index for the source site. This is because Pcp_BuildPrimIndex, // won't know anything about opinions outside of the source site, // which could cause stronger variant selections to be ignored. // (For instance, if a referencing layer stack had a stronger // opinion for the selection than what was authored at the source. // // So, tell Pcp_BuildPrimIndex to skip variants; we'll add tasks // for that after inserting the source index into our index. That // way, the variant evaluation process will have enough context // to decide what the strongest variant selection is. const bool evaluateVariants = false; // Provide a linkage across recursive calls to the indexer. PcpPrimIndex_StackFrame frame(site, parent, &newArc, indexer->previousFrame, indexer->GetOriginatingIndex(), skipDuplicateNodes); PcpPrimIndexOutputs childOutputs; Pcp_BuildPrimIndex( site, indexer->rootSite, indexer->ancestorRecursionDepth, evaluateImpliedSpecializes, evaluateVariants, directNodeShouldContributeSpecs, &frame, indexer->inputs, &childOutputs ); // Join the subtree into this graph. newNode = parent.InsertChildSubgraph( childOutputs.primIndex.GetGraph(), newArc); PCP_INDEXING_UPDATE( indexer, newNode, "Added subtree for site %s to graph", TfStringify(site).c_str()); if (childOutputs.primIndex.GetGraph()->HasPayload()) { parent.GetOwningGraph()->SetHasPayload(true); } // Pass along the other outputs from the nested computation. indexer->outputs->allErrors.insert( indexer->outputs->allErrors.end(), childOutputs.allErrors.begin(), childOutputs.allErrors.end()); } // If culling is enabled, check whether the entire subtree rooted // at the new node can be culled. This doesn't have to recurse down // the new subtree; instead, it just needs to check the new node only. // This is because computing the source prim index above will have culled // everything it can *except* for the direct node. if (indexer->inputs.cull) { if (_NodeCanBeCulled(newNode, indexer->rootSite)) { newNode.SetCulled(true); } else { // Ancestor nodes that were previously marked as culled must // be updated because they now have a subtree that isn't culled. // This can happen during the propagation of implied inherits from // a class hierarchy. For instance, consider the graph: // // root.menva ref.menva // Model_1 (ref)--> Model (inh)--> ModelClass (inh)--> CharClass. // // Let's say there were specs for /CharClass but NOT for /ModelClass // in the root layer stack. In that case, propagating ModelClass to // the root layer stack would result in a culled node. However, when // we then propagate CharClass, we wind up with an unculled node // beneath a culled node, which violates the culling invariant. So, // we would need to fix up /ModelClass to indicate that it can no // longer be culled. for (PcpNodeRef p = parent; p && p.IsCulled(); p = p.GetParentNode()) { p.SetCulled(false); } } } // Enqueue tasks to evaluate the new nodes. // // If we evaluated ancestral opinions, it it means the nested // call to Pcp_BuildPrimIndex() has already evaluated refs, payloads, // and inherits on this subgraph, so we can skip those tasks. const bool skipAncestralCompletedNodes = includeAncestralOpinions; indexer->AddTasksForNode( newNode, skipAncestralCompletedNodes, skipImpliedSpecializesCompletedNodes); // If requested, recursively check if there is a prim spec at the // targeted site or at any of its descendants. If there isn't, // we report an error. Note that we still return the new node in this // case because we want to propagate implied inherits, etc. in the graph. if (requirePrimAtTarget && !_PrimSpecExistsUnderNode(newNode, indexer)) { PcpErrorUnresolvedPrimPathPtr err = PcpErrorUnresolvedPrimPath::New(); err->rootSite = PcpSite(parent.GetRootNode().GetSite()); err->site = PcpSite(parent.GetSite()); err->unresolvedPath = newNode.GetPath(); err->arcType = arcType; indexer->RecordError(err); } // If the arc targets a site that is itself private, issue an error. if (newNode.GetPermission() == SdfPermissionPrivate) { PcpErrorArcPermissionDeniedPtr err = PcpErrorArcPermissionDenied::New(); err->rootSite = PcpSite(parent.GetRootNode().GetSite()); err->site = PcpSite(parent.GetSite()); err->privateSite = PcpSite(newNode.GetSite()); err->arcType = arcType; indexer->RecordError(err); // Mark the new child subtree as inert so that it does not // contribute specs, but keep the node(s) to track the // dependencies in order to support processing later changes // that relax the permissions. // // Note, this is a complementary form of permissions enforcement // to that done by _EnforcePermissions(). That function enforces // the constraint that once something is made private via an // ancestral arc, overrides are prohibited. This enforces the // equivalent constraint on direct arcs: you cannot employ an // arc directly to a private site. _InertSubtree(newNode); } // If the new node's path is the pseudo root, this is a special dependency // placeholder for unresolved default-target references/payloads. // Mark the node inert to node contribute opinions, but retain the // nodes to represent the dependency. if (newNode.GetPath() == SdfPath::AbsoluteRootPath()) { _InertSubtree(newNode); } return newNode; } static PcpNodeRef _AddArc( const PcpArcType arcType, PcpNodeRef parent, PcpNodeRef origin, const PcpLayerStackSite & site, PcpMapExpression mapExpr, int arcSiblingNum, bool directNodeShouldContributeSpecs, bool includeAncestralOpinions, bool requirePrimAtTarget, bool skipDuplicateNodes, Pcp_PrimIndexer *indexer ) { // Strip variant selections when determining namespace depth. // Variant selections are (unfortunately) represented as path // components, but do not represent additional levels of namespace, // just alternate storage locations for data. const int namespaceDepth = PcpNode_GetNonVariantPathElementCount( parent.GetPath() ); return _AddArc( arcType, parent, origin, site, mapExpr, arcSiblingNum, namespaceDepth, directNodeShouldContributeSpecs, includeAncestralOpinions, requirePrimAtTarget, skipDuplicateNodes, /* skipImpliedSpecializes = */ false, indexer); } //////////////////////////////////////////////////////////////////////// // References // Declare helper function for creating PcpPayloadContext, // implemented in payloadContext.cpp PcpPayloadContext Pcp_CreatePayloadContext(const PcpNodeRef&, PcpPrimIndex_StackFrame*); static SdfPath _GetDefaultPrimPath(SdfLayerHandle const &layer) { TfToken target = layer->GetDefaultPrim(); return SdfPath::IsValidIdentifier(target) ? SdfPath::AbsoluteRootPath().AppendChild(target) : SdfPath(); } static void _EvalNodeReferences( PcpPrimIndex *index, PcpNodeRef node, Pcp_PrimIndexer *indexer) { PCP_INDEXING_PHASE( indexer, node, "Evaluating references at %s", Pcp_FormatSite(node.GetSite()).c_str()); if (!node.CanContributeSpecs()) return; // Compose value for local references. SdfReferenceVector refArcs; PcpSourceReferenceInfoVector refInfo; PcpComposeSiteReferences(node, &refArcs, &refInfo); // Add each reference arc. const SdfPath & srcPath = node.GetPath(); for (size_t refArcNum=0; refArcNum < refArcs.size(); ++refArcNum) { const SdfReference & ref = refArcs[refArcNum]; const PcpSourceReferenceInfo& info = refInfo[refArcNum]; const SdfLayerHandle & srcLayer = info.layer; const SdfLayerOffset & srcLayerOffset = info.layerOffset; SdfLayerOffset layerOffset = ref.GetLayerOffset(); PCP_INDEXING_MSG( indexer, node, "Found reference to @%s@<%s>", info.authoredAssetPath.c_str(), ref.GetPrimPath().GetText()); bool fail = false; // Verify that the reference targets the default reference/payload // target or a root prim. if (!ref.GetPrimPath().IsEmpty() && !(ref.GetPrimPath().IsAbsolutePath() && ref.GetPrimPath().IsPrimPath())) { PcpErrorInvalidPrimPathPtr err = PcpErrorInvalidPrimPath::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->site = PcpSite(node.GetSite()); err->primPath = ref.GetPrimPath(); err->arcType = PcpArcTypeReference; indexer->RecordError(err); fail = true; } // Validate layer offset in original reference (not the composed // layer offset stored in ref). if (!srcLayerOffset.IsValid() || !srcLayerOffset.GetInverse().IsValid()) { PcpErrorInvalidReferenceOffsetPtr err = PcpErrorInvalidReferenceOffset::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->layer = srcLayer; err->sourcePath = srcPath; err->assetPath = info.authoredAssetPath; err->targetPath = ref.GetPrimPath(); err->offset = srcLayerOffset; indexer->RecordError(err); // Don't set fail, just reset the offset. layerOffset = SdfLayerOffset(); } // Go no further if we've found any problems with this reference. if (fail) { continue; } // Compute the reference layer stack // See Pcp_NeedToRecomputeDueToAssetPathChange SdfLayerRefPtr refLayer; PcpLayerStackRefPtr refLayerStack; const bool isInternalReference = ref.GetAssetPath().empty(); if (isInternalReference) { refLayer = node.GetLayerStack()->GetIdentifier().rootLayer; refLayerStack = node.GetLayerStack(); } else { std::string canonicalMutedLayerId; if (indexer->inputs.cache->IsLayerMuted( srcLayer, info.authoredAssetPath, &canonicalMutedLayerId)) { PcpErrorMutedAssetPathPtr err = PcpErrorMutedAssetPath::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->site = PcpSite(node.GetSite()); err->targetPath = ref.GetPrimPath(); err->assetPath = info.authoredAssetPath; err->resolvedAssetPath = canonicalMutedLayerId; err->arcType = PcpArcTypeReference; err->layer = srcLayer; indexer->RecordError(err); continue; } TfErrorMark m; // Relative asset paths will already have been anchored to their // source layers in PcpComposeSiteReferences, so we can just call // SdfLayer::FindOrOpen instead of SdfFindOrOpenRelativeToLayer. refLayer = SdfLayer::FindOrOpen( ref.GetAssetPath(), Pcp_GetArgumentsForTargetSchema(indexer->inputs.targetSchema)); if (!refLayer) { PcpErrorInvalidAssetPathPtr err = PcpErrorInvalidAssetPath::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->site = PcpSite(node.GetSite()); err->targetPath = ref.GetPrimPath(); err->assetPath = info.authoredAssetPath; err->resolvedAssetPath = ref.GetAssetPath(); err->arcType = PcpArcTypeReference; err->layer = srcLayer; if (!m.IsClean()) { vector<string> commentary; for (auto const &err: m) { commentary.push_back(err.GetCommentary()); } m.Clear(); err->messages = TfStringJoin(commentary.begin(), commentary.end(), "; "); } indexer->RecordError(err); continue; } m.Clear(); const ArResolverContext& pathResolverContext = node.GetLayerStack()->GetIdentifier().pathResolverContext; PcpLayerStackIdentifier refLayerStackIdentifier( refLayer, SdfLayerHandle(), pathResolverContext ); refLayerStack = indexer->inputs.cache->ComputeLayerStack( refLayerStackIdentifier, &indexer->outputs->allErrors); } bool directNodeShouldContributeSpecs = true; // Determine the referenced prim path. This is either the one // explicitly specified in the SdfReference, or if that's empty, then // the one specified by DefaultPrim in the // referenced layer. SdfPath defaultRefPath; if (ref.GetPrimPath().IsEmpty()) { // Check the layer for a defaultPrim, and use // that if present. defaultRefPath = _GetDefaultPrimPath(refLayer); if (defaultRefPath.IsEmpty()) { PcpErrorUnresolvedPrimPathPtr err = PcpErrorUnresolvedPrimPath::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->site = PcpSite(node.GetSite()); // Use a relative path with the field key for a hint. err->unresolvedPath = SdfPath::ReflexiveRelativePath(). AppendChild(SdfFieldKeys->DefaultPrim); err->arcType = PcpArcTypeReference; indexer->RecordError(err); // Set the refPath to the pseudo-root path. We'll still add an // arc to it as a special dependency placeholder, so we // correctly invalidate if/when the default target metadata gets // authored in the target layer. defaultRefPath = SdfPath::AbsoluteRootPath(); directNodeShouldContributeSpecs = false; } } // Final reference path to use. SdfPath const &refPath = defaultRefPath.IsEmpty() ? ref.GetPrimPath() : defaultRefPath; // References only map values under the source path, aka the // reference root. Any paths outside the reference root do // not map across. PcpMapExpression mapExpr = _CreateMapExpressionForArc( /* source */ refPath, /* targetNode */ node, indexer->inputs, layerOffset); // Only need to include ancestral opinions if the prim path is // not a root prim. const bool includeAncestralOpinions = !refPath.IsRootPrimPath(); _AddArc( PcpArcTypeReference, /* parent = */ node, /* origin = */ node, PcpLayerStackSite( refLayerStack, refPath ), mapExpr, /* arcSiblingNum = */ refArcNum, directNodeShouldContributeSpecs, includeAncestralOpinions, /* requirePrimAtTarget = */ true, /* skipDuplicateNodes = */ false, indexer ); } } //////////////////////////////////////////////////////////////////////// // Relocations static void _ElideSubtree( const Pcp_PrimIndexer& indexer, PcpNodeRef node) { if (indexer.inputs.cull) { node.SetCulled(true); } else { node.SetInert(true); } TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { _ElideSubtree(indexer, *child); } } static void _ElideRelocatedSubtrees( const Pcp_PrimIndexer& indexer, PcpNodeRef node) { TF_FOR_ALL(it, Pcp_GetChildrenRange(node)) { const PcpNodeRef& childNode = *it; // We can cut off the traversal if this is a relocate node, since we // would have done this work when the node was originally added to // the graph. if (childNode.GetArcType() == PcpArcTypeRelocate) { continue; } // Elide the subtree rooted at this node if there's a relocate // statement that would move its opinions to a different prim. if (childNode.CanContributeSpecs()) { const PcpLayerStackRefPtr& layerStack = childNode.GetLayerStack(); const SdfRelocatesMap& relocatesSrcToTarget = layerStack->GetIncrementalRelocatesSourceToTarget(); if (relocatesSrcToTarget.find(childNode.GetPath()) != relocatesSrcToTarget.end()) { _ElideSubtree(indexer, childNode); continue; } } _ElideRelocatedSubtrees(indexer, childNode); } } // Account for relocations that affect existing nodes in the graph. // This method is how we handle the effects of relocations, as we walk // down namespace. For each prim, we start by using the parent's graph, // then applying relocations here. For every relocation, we introduce a // new graph node for the relocation source, and recursively populate that // source via _AddArc(). static void _EvalNodeRelocations( PcpPrimIndex *index, const PcpNodeRef &node, Pcp_PrimIndexer *indexer ) { PCP_INDEXING_PHASE( indexer, node, "Evaluating relocations under %s", Pcp_FormatSite(node.GetSite()).c_str()); // Unlike other tasks, we skip processing if this node can't contribute // specs, but only if this node was introduced at this level at namespace. // This additional check is needed because a descendant node might not // have any specs and thus be marked as culled, but still have relocates // that affect that node. if (!node.CanContributeSpecs() && node.GetDepthBelowIntroduction() == 0) { return; } // Determine if this node was relocated, and from what source path. // // We need to use the incremental relocates map instead of the // fully-combined map to ensure we examine all sources of opinions // in the case where there are multiple relocations nested in different // levels of namespace that affect the same prim. The fully-combined // map collapses these relocations into a single entry, which would // cause us to skip looking at any intermediate sites. const SdfRelocatesMap & relocatesTargetToSource = node.GetLayerStack()->GetIncrementalRelocatesTargetToSource(); SdfRelocatesMap::const_iterator i = relocatesTargetToSource.find(node.GetPath()); if (i == relocatesTargetToSource.end()) { // This node was not relocated. return; } // This node was relocated. Add a relocation arc back to the source. const SdfPath & relocSource = i->second; const SdfPath & relocTarget = i->first; PCP_INDEXING_MSG( indexer, node, "<%s> was relocated from source <%s>", relocTarget.GetText(), relocSource.GetText()); // Determine how the opinions from the relocation source will compose // with opinions from ancestral arcs on the relocation target. // For certain nodes, we recursively mark their contributes as // shouldContributeSpecs=false to indicate that they should not // contribute opinions. // // TODO: We do not remove them entirely, because the // nodes there may be used as the 'origin' of an implied inherit // for purposes of determining relative strength. Perhaps we can // remove all nodes that aren't used as an origin? // // TODO: We may also want to use these nodes as a basis // to check for an issue errors about opinions at relocation // sources across references. Today, Csd silently ignores these, // but it seems like we should check for opinion collisions, // and either report the current relocation arc as invalid, or // choose between the opinions somehow. // TF_FOR_ALL(childIt, Pcp_GetChildrenRange(node)) { const PcpNodeRef& child = *childIt; switch (child.GetArcType()) { // Ancestral arcs of these types should contribute opinions. case PcpArcTypeVariant: // Variants are allowed to provide overrides of relocated prims. continue; case PcpArcTypeRoot: case PcpNumArcTypes: // Cases we should never encounter. TF_VERIFY(false, "Unexpected child node encountered"); continue; // Nodes of these types should NOT contribute opinions. case PcpArcTypeRelocate: // Ancestral relocation arcs are superceded by this relocation, // which is 'closer' to the actual prim we're trying to index. // So, contributions from the ancestral subtree should be ignored // in favor of the ones from the relocation arc we're about to // add. See TrickyMultipleRelocations for an example. case PcpArcTypeReference: case PcpArcTypePayload: case PcpArcTypeLocalInherit: case PcpArcTypeGlobalInherit: case PcpArcTypeLocalSpecializes: case PcpArcTypeGlobalSpecializes: // Ancestral opinions at a relocation target across a reference // or inherit are silently ignored. See TrickyRelocationSquatter // for an example. // // XXX: Since inherits are stronger than relocations, I wonder // if you could make the argument that classes should be // able to override relocated prims, just like variants. break; }; _ElideSubtree(*indexer, child); PCP_INDEXING_UPDATE( indexer, child, "Elided subtree that will be superceded by relocation source <%s>", relocSource.GetText()); } // The mapping for a relocation source node is identity. // // The reason is that relocation mappings are applied across the // specific arcs whose target path is affected by relocations. // In this approach, relocates source nodes do not need to apply // relocation mappings since they would be redundant. // // Instead of representing the namespace mappings for relocations, // Relocation source nodes are primarily placeholders used to // incorporate the ancestral arcs from the relocation sources (spooky // ancestors). Using actual nodes for this lets us easily // incorporate spooky ancestral opinions, spooky implied inherits // etc. without needed special accommodation. However, it does // have some other ramifications; see XXX:RelocatesSourceNodes. // // XXX: It could be that a better design would be to only use // Relocates Source nodes during the temporary recursive indexing // of relocation sources, and then immediately transfer all of its // children to the relocates parent directly. To do this we would // need to decide how to resolve the relative arc strength of the // relocation target vs. source child nodes. const PcpMapExpression identityMapExpr = PcpMapExpression::Identity(); // A prim can only be relocated from a single place -- our // expression of relocates as a map only allows for a single // entry -- so the arc number is always zero. const int arcSiblingNum = 0; PcpNodeRef newNode = _AddArc( PcpArcTypeRelocate, /* parent = */ node, /* origin = */ node, PcpLayerStackSite( node.GetLayerStack(), relocSource ), identityMapExpr, arcSiblingNum, /* The direct site of a relocation source is not allowed to contribute opinions. However, note that it usually has node-children that do contribute opinions via ancestral arcs. */ /* directNodeShouldContributeSpecs = */ false, /* includeAncestralOpinions = */ true, /* requirePrimAtTarget = */ false, /* skipDuplicateNodes = */ false, indexer ); if (newNode) { // Check for the existence of opinions at the relocation // source, and issue errors for any that are found. // // XXX: It's a little misleading to do this only here, as this won't // report relocation source errors for namespace children beneath // this site. (See the error message for /Group/Model_Renamed/B // in ErrorArcCycle for example; it cites invalid opinions at // /Group/Model, but doesn't cite invalid opinions at // /Group/Model/B. SdfSiteVector sites; PcpComposeSitePrimSites(newNode, &sites); TF_FOR_ALL(site, sites) { PcpErrorOpinionAtRelocationSourcePtr err = PcpErrorOpinionAtRelocationSource::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->layer = site->layer; err->path = site->path; indexer->RecordError(err); } // Scan the added subtree to see it contains any opinions that would // be moved to a different prim by other relocate statements. If so, // we need to elide those opinions, or else we'll wind up with multiple // prims with opinions from the same site. // // See RelocatePrimsWithSameName test case for an example of this. _ElideRelocatedSubtrees(*indexer, newNode); } } static void _EvalImpliedRelocations( PcpPrimIndex *index, const PcpNodeRef &node, Pcp_PrimIndexer *indexer ) { if (node.GetArcType() != PcpArcTypeRelocate || node.IsDueToAncestor()) { return; } PCP_INDEXING_PHASE( indexer, node, "Evaluating relocations implied by %s", Pcp_FormatSite(node.GetSite()).c_str()); if (PcpNodeRef parent = node.GetParentNode()) { if (PcpNodeRef gp = parent.GetParentNode()) { SdfPath gpRelocSource = parent.GetMapToParent().MapSourceToTarget(node.GetPath()); if (!TF_VERIFY(!gpRelocSource.IsEmpty())) { return; } PCP_INDEXING_PHASE( indexer, node, "Propagating relocate from %s to %s", Pcp_FormatSite(node.GetSite()).c_str(), gpRelocSource.GetText()); // Check if this has already been propagated. TF_FOR_ALL(gpChildIt, Pcp_GetChildrenRange(gp)) { const PcpNodeRef& gpChild = *gpChildIt; if (gpChild.GetPath() == gpRelocSource && gpChild.GetArcType() == PcpArcTypeRelocate) { PCP_INDEXING_PHASE( indexer, node, "Relocate already exists -- skipping"); return; } } _AddArc( PcpArcTypeRelocate, /* parent = */ gp, /* origin = */ node, PcpLayerStackSite( gp.GetLayerStack(), gpRelocSource ), PcpMapExpression::Identity(), /* arcSiblingNum = */ 0, /* directNodeShouldContributeSpecs = */ false, /* includeAncestralOpinions = */ false, /* requirePrimAtTarget = */ false, /* skipDuplicateNodes = */ false, indexer ); } } } //////////////////////////////////////////////////////////////////////// // Class-based Arcs // Walk over the child nodes of parent, looking for an existing inherit // node. static PcpNodeRef _FindMatchingChild(const PcpNodeRef& parent, const PcpArcType parentArcType, const PcpLayerStackSite& site, const PcpArcType arcType, const PcpMapExpression & mapToParent, int depthBelowIntroduction) { // Arbitrary-order traversal. TF_FOR_ALL(childIt, Pcp_GetChildrenRange(parent)) { const PcpNodeRef& child = *childIt; // XXX:RelocatesSourceNodes: This somewhat arcane way of comparing // inherits arc "identity" is necessary to handle the way implied // inherits map across relocation source nodes. In particular, // comparing only the sites there would give us a collision, because // the sites for implied inherits under relocates sources are // not necessarily meaningful. if (parentArcType == PcpArcTypeRelocate) { if (child.GetArcType() == arcType && child.GetMapToParent().Evaluate() == mapToParent.Evaluate() && child.GetOriginNode().GetDepthBelowIntroduction() == depthBelowIntroduction) { return child; } } else { if (child.GetSite() == site) { return child; } } } return PcpNodeRef(); } static SdfPath _FindContainingVariantSelection(SdfPath p) { while (!p.IsEmpty() && !p.IsPrimVariantSelectionPath()) { p = p.GetParentPath(); } return p; } // Use the mapping function to figure out the path of the site to // inherit, by mapping the parent's site back to the source. static SdfPath _DetermineInheritPath( const SdfPath & parentPath, const PcpMapExpression & inheritMap ) { // For example, given an inherit map like this: // source: /Class // target: /Model // // Say we are adding this inherit arc to </Model>; we'll map // the target path back to </Class>. // // Why don't we just use the source path directly? // The reason we use a mapping function to represent the arc, // rather than simply passing around the path of the class itself, // is to let us account for relocations that happened along the // way. See TrickySpookyInheritsInSymmetricRig for an example // where we reparent a rig's LArm/Anim scope out to the anim // interface, and we need to account for the "spooky inherit" // back to SymArm/Anim from the new location. The PcpMapFunction // lets us account for any relocations needed. // // We also have to handle variants here. PcpLayerStackSites for variant // arcs may contain variant selections. These variant selections // are purely to address appropriate section of opinion storage // in the layer, however; variant selections are *not* an aspect // of composed scene namespace, and must never appear in the paths // used in mapping functions. Therefore, to add a class arc to a // variant-selection site, we take additional measures to strip out // the variant selections before mapping the path and then re-add // them afterwards. // if (!parentPath.ContainsPrimVariantSelection()) { // Easy case: Just map the site back across the inherit. return inheritMap.MapTargetToSource(parentPath); } else { // Harder case: The site path has variant selections. // We want to map the site's namespace back across the // inherit, but retain the embedded variant selections. // Find the nearest containing variant selection. SdfPath varPath = _FindContainingVariantSelection(parentPath); TF_VERIFY(!varPath.IsEmpty()); // Strip the variant selections from the site path, apply the // inherit mapping, then re-add the variant selections. return inheritMap.MapTargetToSource( parentPath.StripAllVariantSelections() ) .ReplacePrefix( varPath.StripAllVariantSelections(), varPath ); } } // A helper that adds a single class-based arc below the given parent, // returning the new node. If the arc already exists, this // returns the existing node. static PcpNodeRef _AddClassBasedArc( PcpArcType arcType, PcpNodeRef parent, PcpNodeRef origin, const PcpMapExpression & inheritMap, const int inheritArcNum, const PcpLayerStackSite & ignoreIfSameAsSite, Pcp_PrimIndexer *indexer ) { PCP_INDEXING_PHASE( indexer, parent, "Preparing to add %s arc to %s", TfEnum::GetDisplayName(arcType).c_str(), Pcp_FormatSite(parent.GetSite()).c_str()); PCP_INDEXING_MSG( indexer, parent, "origin: %s\n" "inheritArcNum: %d\n" "ignoreIfSameAsSite: %s\n", Pcp_FormatSite(origin.GetSite()).c_str(), inheritArcNum, ignoreIfSameAsSite == PcpLayerStackSite() ? "<none>" : Pcp_FormatSite(ignoreIfSameAsSite).c_str()); // Use the inherit map to figure out the site path to inherit. SdfPath inheritPath = _DetermineInheritPath( parent.GetPath(), inheritMap ); // We need to check the parent node's arc type in a few places // below. PcpNode::GetArcType is insufficient because we could be in a // recursive prim indexing call. In that case, we need to know what // the arc type will be once this node is incorporated into the parent // prim index. We can use the PcpPrimIndex_StackFrameIterator to // determine that. const PcpArcType parentArcType = PcpPrimIndex_StackFrameIterator(parent, indexer->previousFrame) .GetArcType(); if (!inheritPath.IsEmpty()) { PCP_INDEXING_MSG(indexer, parent, "Inheriting from path <%s>", inheritPath.GetText()); } else { // The parentNode site is outside the co-domain of the inherit. // This means there is no appropriate site for the parent // to inherit opinions along this inherit arc. // // For example, this could be an inherit that reaches outside // a referenced root to another non-global class, which cannot // be mapped across that reference. Or it could be a global // inherit in the context of a variant: variants cannot contain // opinions about global classes. // // This is not an error; it just means the class arc is not // meaningful from this site. PCP_INDEXING_MSG(indexer, parent, "No appropriate site for inheriting opinions"); return PcpNodeRef(); } PcpLayerStackSite inheritSite( parent.GetLayerStack(), inheritPath ); // Check if there are multiple inherits with the same site. // For example, this might be an implied inherit that was also // broken down explicitly. if (PcpNodeRef child = _FindMatchingChild( parent, parentArcType, inheritSite, arcType, inheritMap, origin.GetDepthBelowIntroduction())) { PCP_INDEXING_MSG( indexer, parent, child, TfEnum::GetDisplayName(arcType).c_str(), "A %s arc to <%s> already exists. Skipping.", inheritPath.GetText()); // TODO Need some policy to resolve multiple arcs. Existing Csd // prefers the weaker of the two. Currently, this just // leaves the one that happened to get populated first // in place, which is too loosey-goosey. return child; } // The class-based arc may map this path un-changed. For example, // consider an implied inherit being propagated from under a // reference node, that is in turn a child of a relocation node: // // root -> relocation -> reference -> inherit // : // +--> implied inherit // // The reference node's mapToParent will apply the effect of the // relocations, because it is bringing opinions into a namespace // where relocations have been applied. As a result, as soon as // the inherit is transferred to become the implied inherit, the // implied inherit map function also also includes the relocations. // // When we use it to _DetermineInheritPath() from the relocation node, // the relocation source site will end up hitting the identity // mapping (/ -> /) that every inherit has, and yield the same // path unchanged. // // We need to add these nodes to the graph to represent the logical // presence of the class arc, and to ensure that it continues to // be propagated further up the graph. However, we do not want to // contribute redundant opinions, so we mark the newly added node // with shouldContributeSpecs=false. // // XXX: This situation is a pretty subtle implication of the way // we use PcpNodes to represent (and propagate) inherits. Overall, // it seems like an opportunity to find a cleaner representation. // const bool shouldContributeSpecs = (inheritPath != parent.GetPath()) && (inheritSite != ignoreIfSameAsSite); // If we hit the cases described above, we need to ensure the placeholder // duplicate nodes are added to the graph to ensure the continued // propagation of implied classes. Otherwise, duplicate nodes should // be skipped over to ensure we don't introduce different paths // to the same site. const bool skipDuplicateNodes = shouldContributeSpecs; // Only local classes need to compute ancestral opinions, since // global classes are root nodes. const bool includeAncestralOpinions = PcpIsLocalClassBasedArc(arcType) && shouldContributeSpecs; PcpNodeRef newNode = _AddArc( arcType, parent, origin, inheritSite, inheritMap, inheritArcNum, /* directNodeShouldContributeSpecs = */ shouldContributeSpecs, includeAncestralOpinions, /* requirePrimAtTarget = */ false, skipDuplicateNodes, indexer ); return newNode; } // Helper function for adding a list of class-based arcs under the given // node in the given prim index. static void _AddClassBasedArcs( PcpPrimIndex* index, const PcpNodeRef& node, const SdfPathVector& classArcs, PcpArcType globalArcType, PcpArcType localArcType, Pcp_PrimIndexer* indexer) { for (size_t arcNum=0; arcNum < classArcs.size(); ++arcNum) { PcpArcType arcType = classArcs[arcNum].IsRootPrimPath() ? globalArcType : localArcType; PCP_INDEXING_MSG(indexer, node, "Found %s to <%s>", TfEnum::GetDisplayName(arcType).c_str(), classArcs[arcNum].GetText()); // The mapping for a class arc maps the class to the instance. // Every other path maps to itself. PcpMapExpression mapExpr = _CreateMapExpressionForArc( /* source */ classArcs[arcNum], /* targetNode */ node, indexer->inputs) .AddRootIdentity(); _AddClassBasedArc(arcType, /* parent = */ node, /* origin = */ node, mapExpr, arcNum, /* ignoreIfSameAsSite = */ PcpLayerStackSite(), indexer); } } /// Build the effective map function for an implied class arc. /// /// \p classArc is the original class arc /// \p transfer is the function that maps the parent of the arc /// to the destination parent /// /// Here is an example: /// /// Say Sullivan_1 references Sullivan, and has a child rig scope Rig /// that inherits a child class _class_Rig: /// /// Sullivan_1 -----reference-----> Sullivan /// | | /// +---Rig +---Rig /// | : | | /// | implicit inherit | inherits /// | : | | /// | V | V /// +---_class_Rig +---_class_Rig /// /// The mapping for the inherit in Sullivan is /// /// source: /Sullivan/_class_Rig /// target: /Sullivan/Rig /// /// The mapping for the reference is: /// /// source: /Sullivan /// target: /Sullivan_1 /// /// The implied classes are determined by applying \p transfer to /// \p classArc. In the same way we apply MapFunctions to individual /// paths to move them between namespaces, we apply functions to other /// functions to move them as well, via PcpMapFunction::Compose(). In /// this example, we use the reference mapping as the function to /// figure out the equivalent implicit class mapping on the left side. /// This ends up giving us the implicit class result: /// /// source: /Sullivan_1/_class_Rig /// target: /Sullivan_1/Rig /// /// In more elaborate cases where relocations are at play, transferFunc /// accounts for the effect of the relocations, and the implied class /// function we return here will also reflect those relocations. /// static PcpMapExpression _GetImpliedClass( const PcpMapExpression & transfer, const PcpMapExpression & classArc ) { if (transfer.IsConstantIdentity()) { return classArc; } return transfer.Compose( classArc.Compose( transfer.Inverse() )) .AddRootIdentity(); } // Check the given node for class-based children, and add corresponding // implied classes to the parent node. static void _EvalImpliedClassTree( PcpPrimIndex *index, PcpNodeRef destNode, PcpNodeRef srcNode, const PcpMapExpression & transferFunc, bool srcNodeIsStartOfTree, Pcp_PrimIndexer *indexer) { // XXX:RelocatesSourceNodes: Avoid propagating implied classes to // relocates nodes here. Classes on relocate nodes only exist as // placeholders so that they can continue to be propagated after // the relocation source tree is added to the prim index in _AddArc. // We don't need to propagate classes to relocate nodes here because // we don't need them to serve as placeholders; instead, we can just // propagate them directly to the relocate node's parent. // // Doing this avoids having to work around path translation subtleties // in _AddClassBasedArc. if (destNode.GetArcType() == PcpArcTypeRelocate) { // Create a transfer function for the relocate node's parent by // composing the relocate node's mapToParent with the given transfer // function. See _EvalImpliedClasses for more details. const PcpMapExpression newTransferFunc = destNode.GetMapToParent().AddRootIdentity().Compose(transferFunc); _EvalImpliedClassTree( index, destNode.GetParentNode(), srcNode, newTransferFunc, srcNodeIsStartOfTree, indexer); // Ensure that any ancestral class hierarchies beginning under // destNode are propagated. This normally occurs naturally when // a new implied class arc is added under destNode. However, // since we're adding implied class arcs to destNode's parent // instead, we have to explicitly add a task to ensure this occurs. // See TrickyInheritsAndRelocates5 for a test case where this is // important. indexer->AddTask(Task(Task::Type::EvalImpliedClasses, destNode)); return; } // Visit all class arcs under srcNode, in arbitrary order. // Walk over the tree below srcNode, pushing to the parent. // // NOTE: We need to grab a copy of the child list and not just // a reference. The recursive call may cause more nodes to // be added to the graph's node pool, which would invalidate // the reference. for (const PcpNodeRef& srcChild : Pcp_GetChildren(srcNode)) { // Skip everything that isn't a class-based arc. if (!PcpIsClassBasedArc(srcChild.GetArcType())) continue; PCP_INDEXING_MSG( indexer, srcChild, destNode, "Attempting to propagate %s of %s to %s.", TfEnum::GetDisplayName(srcChild.GetArcType()).c_str(), Pcp_FormatSite(srcChild.GetSite()).c_str(), Pcp_FormatSite(destNode.GetSite()).c_str()); // Now, the purpose of this entire function is to propagate an // entire class hierarchy below one node, to its parent: // // destNode ---> srcNode // : : // : : // : : // : : // (...classes...) // // However, consider what happens when destNode inherits // srcNode, which also inherits some otherNode: // // i i // destNode ---> srcNode ---> otherNode // // As we are processing the class-based children of srcNode, // we need to somehow distinguish the true children (i.e. // namespace descendants) from the arc that continues // the destNode --> srcNode --> otherNode chain. // We do NOT want to add an implied class arc directly // from otherNode to destNode. // if (srcNodeIsStartOfTree && PcpIsClassBasedArc(srcNode.GetArcType()) && srcNode .GetDepthBelowIntroduction() == srcChild.GetDepthBelowIntroduction()) { PCP_INDEXING_MSG(indexer, srcChild, destNode, "Skipping ancestral class"); continue; } // Determine the equivalent class mapping under destNode. PcpMapExpression destClassFunc = _GetImpliedClass(transferFunc, srcChild.GetMapToParent()); PCP_INDEXING_MSG( indexer, srcChild, destNode, "Transfer function:\n%s", transferFunc.GetString().c_str()); PCP_INDEXING_MSG( indexer, srcChild, destNode, "Implied class:\n%s", destClassFunc.GetString().c_str()); PcpNodeRef destChild; // Check to see if an implied class for srcChild has already been // propagated to destNode by examining origin nodes. If we find a // a child node whose origin matches srcChild, that node must be // the implied class for srcChild, so we don't don't need to redo // the work to process it. TF_FOR_ALL(destChildIt, Pcp_GetChildrenRange(destNode)) { if (destChildIt->GetOriginNode() == srcChild && destChildIt->GetMapToParent().Evaluate() == destClassFunc.Evaluate()) { destChild = *destChildIt; PCP_INDEXING_MSG( indexer, srcChild, destChild, "Found previously added implied inherit node"); break; } } // Try to add this implied class. // // This may fail if there's no equivalent site to inherit, due to // the namespace domains of the mappings involved. Or it may // return an existing node if destNode already inherits the site. // // We use the same origin and sibling number information // as the srcChild in order to properly account for the // effective strength of this implied class. For example, // there may be multiple class arcs from srcNode that // we are pushing to destNode, and we need to preserve // their relative strength. destNode may also end up // receiving implied classes from multiple different // sources; we rely on their distinct origins to reconcile // their strength. // // It is also possible that the newly added class arc would // represent a redundant arc in the scene, due to relocations // or variants. For example, this might be an inherit of // a class outside the scope of the relocation or variant. // We do not want to contribute redundant opinions to the // scene, but we still want to continue propagating the // inherit arc up the graph. To handle this, we provide // the ignoreIfSameAsSite (the inherit site we are propagating) // so that _AddClassBasedArc() can determine if this would be // a redundant inherit. // if (!destChild) { destChild = _AddClassBasedArc( srcChild.GetArcType(), /* parent = */ destNode, /* origin = */ srcChild, destClassFunc, srcChild.GetSiblingNumAtOrigin(), /* ignoreIfSameAsSite = */ srcChild.GetSite(), indexer); } // If we successfully added the arc (or found it already existed) // recurse on nested classes. This will build up the full // class hierarchy that we are inheriting. // Optimization: Recursion requires some cost to set up // childTransferFunc, below. Before we do that work, // check if there are any nested inherits. if (destChild && _HasClassBasedChild(srcChild)) { // Determine the transferFunc to use for the nested child, // by composing the functions to walk up from the srcChild, // across the transferFunc, and down to the destChild. // (Since we are walking down to destChild, we use the // inverse of its mapToParent.) // // This gives us a childTransferFunc that will map the // srcChild namespace to the destChild namespace, so // that can continue propagating implied classes from there. // PcpMapExpression childTransferFunc = destClassFunc.Inverse() .Compose(transferFunc.Compose(srcChild.GetMapToParent())); _EvalImpliedClassTree(index, destChild, srcChild, childTransferFunc, /* srcNodeIsStartOfTree = */ false, indexer); } } } static bool _IsPropagatedSpecializesNode( const PcpNodeRef& node); static void _EvalImpliedClasses( PcpPrimIndex *index, PcpNodeRef node, Pcp_PrimIndexer *indexer) { PCP_INDEXING_PHASE( indexer, node, "Evaluating implied classes at %s", Pcp_FormatSite(node.GetSite()).c_str()); // If this is the root node, there is no need to propagate classes. if (!node.GetParentNode()) return; // Do not allow inherits to propagate from beneath propagated // specializes arcs. These inherits need to be propagated from // the origin of these specializes arcs -- this ensures the origin // nodes of the propagated inherits have a consistent strength // ordering. This is handled with the implied specializes task. if (_IsPropagatedSpecializesNode(node)) { return; } // Optimization: early-out if there are no class arcs to propagate. if (!_HasClassBasedChild(node)) { return; } // Grab the mapping to the parent node. // We will use it to map ("transfer") the class to the parent. // The mapping to the parent may have a restricted domain, such as // for a reference arc, which only maps the reference root prim. // To map global classes across such a mapping, we need to add // an identity (/->/) entry. This is not a violation of reference // namespace encapsulation: classes deliberately work this way. PcpMapExpression transferFunc = node.GetMapToParent().AddRootIdentity(); _EvalImpliedClassTree( index, node.GetParentNode(), node, transferFunc, /* srcNodeIsStartOfTree = */ true, indexer ); } //////////////////////////////////////////////////////////////////////// // Inherits // Evaluate any inherit arcs expressed directly at node. static void _EvalNodeInherits( PcpPrimIndex *index, PcpNodeRef node, Pcp_PrimIndexer *indexer) { PCP_INDEXING_PHASE( indexer, node, "Evaluating inherits at %s", Pcp_FormatSite(node.GetSite()).c_str()); if (!node.CanContributeSpecs()) return; // Compose value for local inherits. SdfPathVector inhArcs; PcpComposeSiteInherits(node, &inhArcs); // Add inherits arcs. _AddClassBasedArcs( index, node, inhArcs, PcpArcTypeGlobalInherit, PcpArcTypeLocalInherit, indexer); } //////////////////////////////////////////////////////////////////////// // Specializes // Evaluate any specializes arcs expressed directly at node. static void _EvalNodeSpecializes( PcpPrimIndex* index, const PcpNodeRef& node, Pcp_PrimIndexer* indexer) { PCP_INDEXING_PHASE( indexer, node, "Evaluating specializes at %s", Pcp_FormatSite(node.GetSite()).c_str()); if (!node.CanContributeSpecs()) return; // Compose value for local specializes. SdfPathVector specArcs; PcpComposeSiteSpecializes(node, &specArcs); // Add specializes arcs. _AddClassBasedArcs( index, node, specArcs, PcpArcTypeGlobalSpecializes, PcpArcTypeLocalSpecializes, indexer); } // Returns true if the given node is a specializes node that // has been propagated to the root of the graph for strength // ordering purposes in _EvalImpliedSpecializes. static bool _IsPropagatedSpecializesNode( const PcpNodeRef& node) { return (PcpIsSpecializesArc(node.GetArcType()) && node.GetParentNode() == node.GetRootNode() && node.GetSite() == node.GetOriginNode().GetSite()); } static bool _IsNodeInSubtree( const PcpNodeRef& node, const PcpNodeRef& subtreeRoot) { for (PcpNodeRef n = node; n; n = n.GetParentNode()) { if (n == subtreeRoot) { return true; } } return false; } static PcpNodeRef _PropagateNodeToParent( PcpNodeRef parentNode, PcpNodeRef srcNode, bool skipImpliedSpecializes, const PcpMapExpression& mapToParent, const PcpNodeRef& srcTreeRoot, Pcp_PrimIndexer* indexer) { PcpNodeRef newNode; if (srcNode.GetParentNode() == parentNode) { newNode = srcNode; } else { newNode = _FindMatchingChild( parentNode, parentNode.GetArcType(), srcNode.GetSite(), srcNode.GetArcType(), mapToParent, srcNode.GetDepthBelowIntroduction()); if (!newNode) { // Only propagate a node if it's a direct arc or if it's an // implied arc whose origin is outside the subgraph we're // propagating. If this is an implied arc whose origin is // within the subgraph, it will be handled when we evaluate // implied class arcs on the subgraph being propagated. if (!_IsImpliedClassBasedArc(srcNode) || !_IsNodeInSubtree(srcNode.GetOriginNode(), srcTreeRoot)) { const int namespaceDepth = (srcNode == srcTreeRoot ? PcpNode_GetNonVariantPathElementCount( parentNode.GetPath()) : srcNode.GetNamespaceDepth()); const PcpNodeRef originNode = (srcNode == srcTreeRoot || _IsImpliedClassBasedArc(srcNode) ? srcNode : parentNode); newNode = _AddArc(srcNode.GetArcType(), /* parent = */ parentNode, /* origin = */ originNode, srcNode.GetSite(), mapToParent, srcNode.GetSiblingNumAtOrigin(), namespaceDepth, /* directNodeShouldContributeSpecs = */ !srcNode.IsInert(), /* includeAncestralOpinions = */ false, /* requirePrimAtTarget = */ false, /* skipDuplicateNodes = */ false, skipImpliedSpecializes, indexer); } } if (newNode) { newNode.SetInert(srcNode.IsInert()); newNode.SetHasSymmetry(srcNode.HasSymmetry()); newNode.SetPermission(srcNode.GetPermission()); newNode.SetRestricted(srcNode.IsRestricted()); srcNode.SetInert(true); } else { _InertSubtree(srcNode); } } return newNode; } static PcpNodeRef _PropagateSpecializesTreeToRoot( PcpPrimIndex* index, PcpNodeRef parentNode, PcpNodeRef srcNode, PcpNodeRef originNode, const PcpMapExpression& mapToParent, const PcpNodeRef& srcTreeRoot, Pcp_PrimIndexer* indexer) { // Make sure to skip implied specializes tasks for the propagated // node. Otherwise, we'll wind up propagating this node back to // its originating subtree, which will leave it inert. const bool skipImpliedSpecializes = true; PcpNodeRef newNode = _PropagateNodeToParent( parentNode, srcNode, skipImpliedSpecializes, mapToParent, srcTreeRoot, indexer); if (!newNode) { return newNode; } for (PcpNodeRef childNode : Pcp_GetChildren(srcNode)) { if (!PcpIsSpecializesArc(childNode.GetArcType())) { _PropagateSpecializesTreeToRoot( index, newNode, childNode, newNode, childNode.GetMapToParent(), srcTreeRoot, indexer); } } return newNode; } static void _FindSpecializesToPropagateToRoot( PcpPrimIndex* index, PcpNodeRef node, Pcp_PrimIndexer* indexer) { // XXX:RelocatesSourceNodes: This node may be a placeholder // implied arc under a relocation node that is only present // to allow class-based arcs to be implied up the prim index. // These placeholders are not valid sources of opinions, so // we can cut off our search for specializes to propagate. const PcpNodeRef parentNode = node.GetParentNode(); const bool nodeIsRelocatesPlaceholder = parentNode != node.GetOriginNode() && parentNode.GetArcType() == PcpArcTypeRelocate && parentNode.GetSite() == node.GetSite(); if (nodeIsRelocatesPlaceholder) { return; } if (PcpIsSpecializesArc(node.GetArcType())) { PCP_INDEXING_MSG( indexer, node, node.GetRootNode(), "Propagating specializes arc %s to root", Pcp_FormatSite(node.GetSite()).c_str()); // HACK: When we propagate specializes arcs from the root // to their origin in _PropagateArcsToOrigin, we will mark // them as inert=false. However, we will *not* do the same // for any of the implied specializes that originate from // that arc -- they will be left with inert=true. // // If we wind up having to propagate these implied specializes // back to the root, we will wind up copying the inert=true // flag, which isn't what we want. Instead of trying to fix // up the implied specializes in _PropagateArcsToOrigin, // it's much simpler if we just deal with that here by forcing // the specializes node to inert=false. node.SetInert(false); _PropagateSpecializesTreeToRoot( index, index->GetRootNode(), node, node, node.GetMapToRoot(), node, indexer); } for (PcpNodeRef childNode : Pcp_GetChildren(node)) { _FindSpecializesToPropagateToRoot(index, childNode, indexer); } } static void _PropagateArcsToOrigin( PcpPrimIndex* index, PcpNodeRef parentNode, PcpNodeRef srcNode, const PcpMapExpression& mapToParent, const PcpNodeRef& srcTreeRoot, Pcp_PrimIndexer* indexer) { // Don't skip implied specializes tasks as we propagate arcs back // to the origin. If one of the arcs we propagate back is another // specializes arc, we need to ensure that arc is propagated back // to the root later on. const bool skipImpliedSpecializes = false; PcpNodeRef newNode = _PropagateNodeToParent( parentNode, srcNode, skipImpliedSpecializes, mapToParent, srcTreeRoot, indexer); if (!newNode) { return; } for (PcpNodeRef childNode : Pcp_GetChildren(srcNode)) { _PropagateArcsToOrigin( index, newNode, childNode, childNode.GetMapToParent(), srcTreeRoot, indexer); } } static void _FindArcsToPropagateToOrigin( PcpPrimIndex* index, const PcpNodeRef& node, Pcp_PrimIndexer* indexer) { TF_VERIFY(PcpIsSpecializesArc(node.GetArcType())); for (PcpNodeRef childNode : Pcp_GetChildren(node)) { PCP_INDEXING_MSG( indexer, childNode, node.GetOriginNode(), "Propagating arcs under %s to specializes origin %s", Pcp_FormatSite(childNode.GetSite()).c_str(), Pcp_FormatSite(node.GetOriginNode().GetSite()).c_str()); _PropagateArcsToOrigin( index, node.GetOriginNode(), childNode, childNode.GetMapToParent(), node, indexer); } } // Opinions from specializes arcs, including those that are implied across // other arcs, are always weaker than the target of those arcs. Conceptually, // this means that opinions from all specializes arcs (and any encapsulated // arcs) come after all other opinions. // // ref // For instance, Model ---------> Ref // given this example: | | // +- Instance +- Instance // | : | : // | : implied | : specializes // | v | v // +- Class +- Class // // The intended strength ordering is for /Model/Instance is: // [/Model/Instance, /Ref/Instance, /Model/Class, /Ref/Class]. // // To achieve this, we propagate specializes subgraphs in the prim index // to the root of the graph. Strength ordering will then place the // specializes arcs at the end of the graph, after all other arcs. // // We need to reverse this process when we discover additional arcs // beneath the specializes subgraphs that have been propagated to the // root. This can happen if there are namespace children beneath the // source of a specializes arc with their own arcs. This can also // happen if we discover variants after processing implied specializes. // // When we encounter this situation, the specializes subgraph is // propagated back to its origin. The primary purpose of this is to // allow any implied arcs to be propagated to the necessary locations // using the already-existing mechanisms. Once that's done, // the subgraph will be propagated back to the root. // static void _EvalImpliedSpecializes( PcpPrimIndex* index, const PcpNodeRef& node, Pcp_PrimIndexer* indexer) { PCP_INDEXING_PHASE( indexer, node, "Evaluating implied specializes at %s", Pcp_FormatSite(node.GetSite()).c_str()); // If this is the root node, there is no need to propagate specializes. if (!node.GetParentNode()) return; if (_IsPropagatedSpecializesNode(node)) { _FindArcsToPropagateToOrigin(index, node, indexer); } else { _FindSpecializesToPropagateToRoot(index, node, indexer); } } //////////////////////////////////////////////////////////////////////// // Variants static bool _ComposeVariantSelectionForNode( const PcpNodeRef& node, const SdfPath& pathInNode, const std::string & vset, std::string *vsel, PcpNodeRef *nodeWithVsel, PcpPrimIndexOutputs *outputs) { TF_VERIFY(!pathInNode.IsEmpty()); // We are using path-translation to walk between nodes, so we // are working exclusively in namespace paths, which must have // no variant selection. TF_VERIFY(!pathInNode.ContainsPrimVariantSelection(), "Unexpected variant selection in namespace path <%s>", pathInNode.GetText()); // If this node has an authored selection, use that. // Note that we use this even if the authored selection is // the empty string, which explicitly selects no variant. if (node.CanContributeSpecs()) { PcpLayerStackSite site(node.GetLayerStack(), pathInNode); // pathInNode is a namespace path, not a storage path, // so it will contain no variant selection (as verified above). // To find the storage site, we need to insert any variant // selection for this node. if (node.GetArcType() == PcpArcTypeVariant) { site.path = pathInNode.ReplacePrefix( node.GetPath().StripAllVariantSelections(), node.GetPath()); } if (PcpComposeSiteVariantSelection( site.layerStack, site.path, vset, vsel)) { *nodeWithVsel = node; return true; } } return false; } // Check the tree of nodes rooted at the given node for any node // representing a prior selection for the given variant set. static bool _FindPriorVariantSelection( const PcpNodeRef& node, int ancestorRecursionDepth, const std::string & vset, std::string *vsel, PcpNodeRef *nodeWithVsel) { if (node.GetArcType() == PcpArcTypeVariant && node.GetDepthBelowIntroduction() == ancestorRecursionDepth) { // If this node represents a variant selection at the same // effective depth of namespace, check its selection. const std::pair<std::string, std::string> nodeVsel = node.GetPathAtIntroduction().GetVariantSelection(); if (nodeVsel.first == vset) { *vsel = nodeVsel.second; *nodeWithVsel = node; return true; } } TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { if (_FindPriorVariantSelection( *child, ancestorRecursionDepth, vset, vsel, nodeWithVsel)) { return true; } } return false; } typedef std::pair<PcpPrimIndex_StackFrame*, PcpNodeRef> _StackFrameAndChildNode; typedef std::vector<_StackFrameAndChildNode> _StackFrameAndChildNodeVector; static bool _ComposeVariantSelectionAcrossStackFrames( const PcpNodeRef& node, const SdfPath& pathInNode, const std::string & vset, std::string *vsel, _StackFrameAndChildNodeVector *stackFrames, PcpNodeRef *nodeWithVsel, PcpPrimIndexOutputs *outputs) { // Compose variant selection in strong-to-weak order. if (_ComposeVariantSelectionForNode( node, pathInNode, vset, vsel, nodeWithVsel, outputs)) { return true; } // If we're in recursive prim index construction and hit the end // of a graph produced by the current stack frame, we need to look // at the next stack frame to continue the traversal to the next // part of the graph. // // XXX: See XXX comment in _ComposeVariantSelection. This probably has // the same bug. The real fix would be to figure out where the // graph for the next stack frame would be inserted into the // current node's children in the below for loop and deal with it // there. const bool atEndOfStack = (!stackFrames->empty() && node == stackFrames->back().first->parentNode); if (atEndOfStack) { const _StackFrameAndChildNode nextFrame = stackFrames->back(); stackFrames->pop_back(); const PcpNodeRef& childNode = nextFrame.second; const SdfPath pathInChildNode = nextFrame.first->arcToParent->mapToParent .MapTargetToSource(pathInNode); if (!pathInChildNode.IsEmpty()) { return _ComposeVariantSelectionAcrossStackFrames( childNode, pathInChildNode, vset, vsel, stackFrames, nodeWithVsel, outputs); } return false; } TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { const PcpNodeRef& childNode = *child; const SdfPath pathInChildNode = childNode.GetMapToParent().MapTargetToSource(pathInNode); if (!pathInChildNode.IsEmpty() && _ComposeVariantSelectionAcrossStackFrames( *child, pathInChildNode, vset, vsel, stackFrames, nodeWithVsel, outputs)) { return true; } } return false; } static void _ComposeVariantSelection( int ancestorRecursionDepth, PcpPrimIndex_StackFrame *previousFrame, PcpNodeRef node, const SdfPath &pathInNode, const std::string &vset, std::string *vsel, PcpNodeRef *nodeWithVsel, PcpPrimIndexOutputs *outputs) { TRACE_FUNCTION(); TF_VERIFY(!pathInNode.IsEmpty()); TF_VERIFY(!pathInNode.ContainsPrimVariantSelection(), "%s", pathInNode.GetText()); // First check if we have already resolved this variant set. // Try all nodes in all parent frames; ancestorRecursionDepth // accounts for any ancestral recursion. { PcpNodeRef rootNode = node.GetRootNode(); PcpPrimIndex_StackFrame *prevFrame = previousFrame; while (rootNode) { if (_FindPriorVariantSelection(rootNode, ancestorRecursionDepth, vset, vsel, nodeWithVsel)) { return; } if (prevFrame) { rootNode = prevFrame->parentNode.GetRootNode(); prevFrame = prevFrame->previousFrame; } else { break; } } } // We want to look for variant selections in all nodes that have been // added up to this point. Note that Pcp may pick up variant // selections from weaker locations than the node for which // we are evaluating variants. // // See bug 106950 and TrickyVariantWeakerSelection for more details. // // This is really a simple strength-order traversal of the // current prim index. It is complicated by the fact that we // may be in the middle of recursive calls to Pcp_BuildPrimIndex // that are building up subgraphs that will eventually be joined // together. To deal with this, we need to keep track of the // stack frames for these recursive calls so that we can traverse // the prim index as if it were fully constructed. // // Translate the given path up to the root node of the *entire* // prim index under construction, keeping track of when we need // to hop across a stack frame. Note that we cannot use mapToRoot // here, since it is not valid until the graph is finalized. _StackFrameAndChildNodeVector previousStackFrames; PcpNodeRef rootNode = node; SdfPath pathInRoot = pathInNode; while (1) { while (rootNode.GetParentNode()) { pathInRoot = rootNode. GetMapToParent().MapSourceToTarget(pathInRoot); rootNode = rootNode.GetParentNode(); } if (!previousFrame) { break; } // There may not be a valid mapping for the current path across // the previous stack frame. For example, this may happen when // trying to compose ancestral variant selections on a sub-root // reference (see SubrootReferenceAndVariants for an example). // This failure means there are no further sites with relevant // variant selection opinions across this stack frame. In this case, // we break out of the loop and only search the portion of the prim // index we've traversed. const SdfPath pathInPreviousFrame = previousFrame->arcToParent->mapToParent.MapSourceToTarget( pathInRoot); if (pathInPreviousFrame.IsEmpty()) { break; } previousStackFrames.push_back( _StackFrameAndChildNode(previousFrame, rootNode)); pathInRoot = pathInPreviousFrame; rootNode = previousFrame->parentNode; previousFrame = previousFrame->previousFrame; } // Now recursively walk the prim index in strong-to-weak order // looking for a variant selection. _ComposeVariantSelectionAcrossStackFrames( rootNode, pathInRoot, vset, vsel, &previousStackFrames, nodeWithVsel, outputs); } static bool _ShouldUseVariantFallback( const Pcp_PrimIndexer *indexer, const std::string& vset, const std::string& vsel, const std::string& vselFallback, const PcpNodeRef &nodeWithVsel) { // Can't use fallback if we don't have one. if (vselFallback.empty()) { return false; } // If there's no variant selected then use the default. if (vsel.empty()) { return true; } // The "standin" variant set has special behavior, below. // All other variant sets default when there is no selection. // // XXX This logic can be simpler when we remove the old standin stuff if (vset != "standin") { return false; } // If we're using the new behavior then the preferences can't win over // the opinion in vsel. if (PcpIsNewDefaultStandinBehaviorEnabled()) { return false; } // From here down we're trying to match the Csd policy, which can // be rather peculiar. See bugs 29039 and 32264 for history that // lead to some of these policies. // If nodeWithVsel is a variant node that makes a selection for vset, // it structurally represents the fact that we have already decided // which variant selection to use for vset in this primIndex. In // this case, we do not want to apply standin preferences, because // we will have already applied them. // // (Applying the policy again here could give us an incorrect result, // because this might be a different nodeWithVsel than was used // originally to apply the policy.) if (nodeWithVsel.GetArcType() == PcpArcTypeVariant && nodeWithVsel.GetPath().IsPrimVariantSelectionPath() && nodeWithVsel.GetPath().GetVariantSelection().first == vset) { return false; } // Use the standin preference if the authored selection came from // inside the payload. for (PcpNodeRef n = nodeWithVsel; n; n = n.GetParentNode()) { if (n.GetArcType() == PcpArcTypePayload) { return true; } } // Use vsel if it came from a session layer, otherwise check the // standin preferences. For efficiency, we iterate over the full // layer stack instead of using PcpLayerStack::GetSessionLayerStack. const SdfLayerHandle rootLayer = indexer->rootSite.layerStack->GetIdentifier().rootLayer; TF_FOR_ALL(layer, indexer->rootSite.layerStack->GetLayers()) { if (*layer == rootLayer) { break; } static const TfToken field = SdfFieldKeys->VariantSelection; const VtValue& value = (*layer)->GetField(indexer->rootSite.path, field); if (value.IsHolding<SdfVariantSelectionMap>()) { const SdfVariantSelectionMap & vselMap = value.UncheckedGet<SdfVariantSelectionMap>(); SdfVariantSelectionMap::const_iterator i = vselMap.find(vset); if (i != vselMap.end() && i->second == vsel) { // Standin selection came from the session layer. return false; } } } // If we don't have a standin selection in the root node then check // the standin preferences. if (nodeWithVsel.GetArcType() != PcpArcTypeRoot) { return true; } return false; } static std::string _ChooseBestFallbackAmongOptions( const std::string &vset, const std::set<std::string> &vsetOptions, const PcpVariantFallbackMap& variantFallbacks) { PcpVariantFallbackMap::const_iterator vsetIt = variantFallbacks.find(vset); if (vsetIt != variantFallbacks.end()) { for (const auto &vselIt: vsetIt->second) { if (vsetOptions.find(vselIt) != vsetOptions.end()) { return vselIt; } } } return std::string(); } static void _AddVariantArc(Pcp_PrimIndexer *indexer, const PcpNodeRef &node, const std::string &vset, int vsetNum, const std::string &vsel) { // Variants do not remap the scenegraph's namespace, they simply // represent a branch off into a different section of the layer // storage. For this reason, the source site includes the // variant selection but the mapping function is identity. SdfPath varPath = node.GetSite().path.AppendVariantSelection(vset, vsel); if (_AddArc(PcpArcTypeVariant, /* parent = */ node, /* origin = */ node, PcpLayerStackSite( node.GetLayerStack(), varPath ), /* mapExpression = */ PcpMapExpression::Identity(), /* arcSiblingNum = */ vsetNum, /* directNodeShouldContributeSpecs = */ true, /* includeAncestralOpinions = */ false, /* requirePrimAtTarget = */ false, /* skipDuplicateNodes = */ false, indexer )) { // If we expanded a variant set, it may have introduced new // authored variant selections, so we must retry any pending // variant tasks as authored tasks. indexer->RetryVariantTasks(); } } static void _EvalNodeVariantSets( PcpPrimIndex *index, const PcpNodeRef& node, Pcp_PrimIndexer *indexer) { PCP_INDEXING_PHASE( indexer, node, "Evaluating variant sets at %s", Pcp_FormatSite(node.GetSite()).c_str()); if (!node.CanContributeSpecs()) return; std::vector<std::string> vsetNames; PcpComposeSiteVariantSets(node, &vsetNames); for (int vsetNum=0, numVsets=vsetNames.size(); vsetNum < numVsets; ++vsetNum) { indexer->AddTask(Task(Task::Type::EvalNodeVariantAuthored, node, std::move(vsetNames[vsetNum]), vsetNum)); } } static void _EvalNodeAuthoredVariant( PcpPrimIndex *index, const PcpNodeRef& node, Pcp_PrimIndexer *indexer, const std::string &vset, int vsetNum) { PCP_INDEXING_PHASE( indexer, node, "Evaluating authored selections for variant set %s at %s", vset.c_str(), Pcp_FormatSite(node.GetSite()).c_str()); if (!node.CanContributeSpecs()) return; // Compose options. std::set<std::string> vsetOptions; PcpComposeSiteVariantSetOptions(node, vset, &vsetOptions); // Determine what the fallback selection would be. // Generally speaking, authoring opinions win over fallbacks, however if // MENV30_ENABLE_NEW_DEFAULT_STANDIN_BEHAVIOR==false then that is not // always the case, and we must check the fallback here first. // TODO Remove this once we phase out the old behavior! const std::string vselFallback = _ChooseBestFallbackAmongOptions( vset, vsetOptions, *indexer->inputs.variantFallbacks ); if (!vselFallback.empty()) { PCP_INDEXING_MSG( indexer, node, "Found fallback {%s=%s}", vset.c_str(), vselFallback.c_str()); } // Determine the authored variant selection for this set, if any. std::string vsel; PcpNodeRef nodeWithVsel; _ComposeVariantSelection(indexer->ancestorRecursionDepth, indexer->previousFrame, node, node.GetPath().StripAllVariantSelections(), vset, &vsel, &nodeWithVsel, indexer->outputs); if (!vsel.empty()) { PCP_INDEXING_MSG( indexer, node, "Found variant selection {%s=%s} at %s", vset.c_str(), vsel.c_str(), Pcp_FormatSite(nodeWithVsel.GetSite()).c_str()); } // Check if we should use the fallback if (_ShouldUseVariantFallback(indexer, vset, vsel, vselFallback, nodeWithVsel)) { PCP_INDEXING_MSG(indexer, node, "Deferring to variant fallback"); indexer->AddTask(Task(Task::Type::EvalNodeVariantFallback, node, vset, vsetNum)); return; } // If no variant was chosen, do not expand this variant set. if (vsel.empty()) { PCP_INDEXING_MSG(indexer, node, "No variant selection found for set '%s'", vset.c_str()); indexer->AddTask(Task(Task::Type::EvalNodeVariantNoneFound, node, vset, vsetNum)); return; } _AddVariantArc(indexer, node, vset, vsetNum, vsel); } static void _EvalNodeFallbackVariant( PcpPrimIndex *index, const PcpNodeRef& node, Pcp_PrimIndexer *indexer, const std::string &vset, int vsetNum) { PCP_INDEXING_PHASE( indexer, node, "Evaluating fallback selections for variant set %s s at %s", vset.c_str(), Pcp_FormatSite(node.GetSite()).c_str()); if (!node.CanContributeSpecs()) return; // Compose options. std::set<std::string> vsetOptions; PcpComposeSiteVariantSetOptions(node, vset, &vsetOptions); // Determine what the fallback selection would be. const std::string vsel = _ChooseBestFallbackAmongOptions( vset, vsetOptions, *indexer->inputs.variantFallbacks ); // If no variant was chosen, do not expand this variant set. if (vsel.empty()) { PCP_INDEXING_MSG(indexer, node, "No variant fallback found for set '%s'", vset.c_str()); indexer->AddTask(Task(Task::Type::EvalNodeVariantNoneFound, node, vset, vsetNum)); return; } _AddVariantArc(indexer, node, vset, vsetNum, vsel); } //////////////////////////////////////////////////////////////////////// // Payload static void _EvalNodePayload( PcpPrimIndex *index, const PcpNodeRef& node, Pcp_PrimIndexer *indexer) { PCP_INDEXING_PHASE( indexer, node, "Evaluating payload for %s", Pcp_FormatSite(node.GetSite()).c_str()); if (!node.CanContributeSpecs()) { return; } // Compose payload arc for node. // // XXX We currently only support a single arc per layer stack site, // but we could potentially support multiple targets here, just // like we do with references. // SdfPayload payload; SdfLayerHandle payloadSpecLayer; PcpComposeSitePayload(node, &payload, &payloadSpecLayer); if (!payload) { return; } PCP_INDEXING_MSG( indexer, node, "Found payload @%s@<%s>", payload.GetAssetPath().c_str(), payload.GetPrimPath().GetText()); // Mark that this prim index contains a payload. // However, only process the payload if it's been requested. index->GetGraph()->SetHasPayload(true); const PcpPrimIndexInputs::PayloadSet* includedPayloads = indexer->inputs.includedPayloads; // If includedPayloads is nullptr, we never include payloads. Otherwise if // it does not have this path, we invoke the predicate. If the predicate // returns true we set the output bit includedDiscoveredPayload and we // compose it. if (!includedPayloads) { PCP_INDEXING_MSG(indexer, node, "Payload was not included, skipping"); return; } SdfPath const &path = indexer->rootSite.path; tbb::spin_rw_mutex::scoped_lock lock; auto *mutex = indexer->inputs.includedPayloadsMutex; if (mutex) { lock.acquire(*mutex, /*write=*/false); } bool inIncludeSet = includedPayloads->count(path); if (mutex) { lock.release(); } if (!inIncludeSet) { auto const &pred = indexer->inputs.includePayloadPredicate; if (pred && pred(path)) { indexer->outputs->includedDiscoveredPayload = true; } else { PCP_INDEXING_MSG(indexer, node, "Payload <%s> was not included, skipping", path.GetText()); return; } } // Verify the payload prim path. if (!payload.GetPrimPath().IsEmpty() && !(payload.GetPrimPath().IsAbsolutePath() && payload.GetPrimPath().IsPrimPath())) { PcpErrorInvalidPrimPathPtr err = PcpErrorInvalidPrimPath::New(); err->rootSite = PcpSite(node.GetSite()); err->site = PcpSite(node.GetSite()); err->primPath = payload.GetPrimPath(); err->arcType = PcpArcTypePayload; indexer->RecordError(err); return; } // Resolve the payload asset path. std::string canonicalMutedLayerId; if (indexer->inputs.cache->IsLayerMuted( payloadSpecLayer, payload.GetAssetPath(), &canonicalMutedLayerId)) { PcpErrorMutedAssetPathPtr err = PcpErrorMutedAssetPath::New(); err->rootSite = PcpSite(node.GetSite()); err->site = PcpSite(node.GetSite()); err->targetPath = payload.GetPrimPath(); err->assetPath = payload.GetAssetPath(); err->resolvedAssetPath = canonicalMutedLayerId; err->arcType = PcpArcTypePayload; err->layer = payloadSpecLayer; indexer->RecordError(err); return; } // Apply payload decorators SdfLayer::FileFormatArguments args; if (indexer->inputs.payloadDecorator) { PcpPayloadContext payloadCtx = Pcp_CreatePayloadContext( node, indexer->previousFrame); indexer->inputs.payloadDecorator-> DecoratePayload(indexer->rootSite.path, payload, payloadCtx, &args); } Pcp_GetArgumentsForTargetSchema(indexer->inputs.targetSchema, &args); // Resolve asset path // See Pcp_NeedToRecomputeDueToAssetPathChange std::string resolvedAssetPath(payload.GetAssetPath()); TfErrorMark m; SdfLayerRefPtr payloadLayer = SdfFindOrOpenRelativeToLayer( payloadSpecLayer, &resolvedAssetPath, args); if (!payloadLayer) { PcpErrorInvalidAssetPathPtr err = PcpErrorInvalidAssetPath::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->site = PcpSite(node.GetSite()); err->targetPath = payload.GetPrimPath(); err->assetPath = payload.GetAssetPath(); err->resolvedAssetPath = resolvedAssetPath; err->arcType = PcpArcTypePayload; err->layer = payloadSpecLayer; if (!m.IsClean()) { vector<string> commentary; for (auto const &err: m) { commentary.push_back(err.GetCommentary()); } m.Clear(); err->messages = TfStringJoin(commentary.begin(), commentary.end(), "; "); } indexer->RecordError(err); return; } m.Clear(); // Check if the payload layer is in the root node's layer stack. // If so, we report an error. (Internal payloads are disallowed.) const PcpLayerStackPtr rootLayerStack = node.GetLayerStack(); if (rootLayerStack->HasLayer(payloadLayer)) { PcpErrorInternalAssetPathPtr err = PcpErrorInternalAssetPath::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->site = PcpSite(node.GetSite()); err->targetPath = payload.GetPrimPath(); err->assetPath = payload.GetAssetPath(); err->resolvedAssetPath = resolvedAssetPath; err->arcType = PcpArcTypePayload; indexer->RecordError(err); return; } // Create the layerStack for the payload. const ArResolverContext& payloadResolverContext = node.GetLayerStack()->GetIdentifier().pathResolverContext; PcpLayerStackIdentifier payloadLayerStackIdentifier( payloadLayer, SdfLayerHandle(), payloadResolverContext); PcpLayerStackRefPtr payloadLayerStack = indexer->inputs.cache->ComputeLayerStack( payloadLayerStackIdentifier, &indexer->outputs->allErrors); // Assume that we will insert the payload contents -- unless // we detect an error below. bool directNodeShouldContributeSpecs = true; // Determine the payload prim path. This is either the one explicitly // specified in the SdfPayload, or if that's empty, then the one // specified by DefaultPrim in the referenced layer. SdfPath defaultPayloadPath; if (payload.GetPrimPath().IsEmpty()) { // Check the layer for a defaultPrim, and use that if present. defaultPayloadPath = _GetDefaultPrimPath(payloadLayer); if (defaultPayloadPath.IsEmpty()) { PcpErrorUnresolvedPrimPathPtr err = PcpErrorUnresolvedPrimPath::New(); err->rootSite = PcpSite(node.GetRootNode().GetSite()); err->site = PcpSite(node.GetSite()); // Use a relative path with the field key for a hint. err->unresolvedPath = SdfPath::ReflexiveRelativePath(). AppendChild(SdfFieldKeys->DefaultPrim); err->arcType = PcpArcTypePayload; indexer->RecordError(err); // Set the payloadPath to the pseudo-root path. We'll still add // an arc to it as a special dependency placeholder, so we // correctly invalidate if/when the default target metadata gets // authored in the target layer. defaultPayloadPath = SdfPath::AbsoluteRootPath(); directNodeShouldContributeSpecs = false; } } // Final payload path to use. SdfPath const &payloadPath = defaultPayloadPath.IsEmpty() ? payload.GetPrimPath() : defaultPayloadPath; // Incorporate any layer offset from this site to the sublayer // where the payload was expressed. const SdfLayerOffset *maybeOffset = node.GetSite().layerStack-> GetLayerOffsetForLayer(payloadSpecLayer); const SdfLayerOffset offset = maybeOffset ? *maybeOffset : SdfLayerOffset(); PcpMapExpression mapExpr = _CreateMapExpressionForArc( /* source */ payloadPath, /* target */ node, indexer->inputs, offset); // Only need to include ancestral opinions if the prim path is // not a root prim. const bool includeAncestralOpinions = !payloadPath.IsRootPrimPath(); _AddArc( PcpArcTypePayload, /* parent = */ node, /* origin = */ node, PcpLayerStackSite( payloadLayerStack, payloadPath ), mapExpr, /* arcSiblingNum = */ 0, directNodeShouldContributeSpecs, includeAncestralOpinions, /* requirePrimAtTarget = */ true, /* skipDuplicateNodes = */ false, indexer ); } //////////////////////////////////////////////////////////////////////// // Prim Specs void _GatherNodesRecursively( const PcpNodeRef& node, std::vector<PcpNodeRef> *result) { result->push_back(node); // Strength-order (strong-to-weak) traversal. TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { _GatherNodesRecursively(*child, result); } } static void _EnforcePermissions( PcpPrimIndex *primIndex, PcpErrorVector *allErrors) { TRACE_FUNCTION(); PcpNodeRef rootNode = primIndex->GetRootNode(); TF_VERIFY(rootNode); // Gather all the nodes that may contribute prim specs. std::vector<PcpNodeRef> allNodes; _GatherNodesRecursively(rootNode, &allNodes); // Go backwards through the list of nodes, looking for prim specs. // If we find a node that isn't public, we stash it away, and then // issue an error for any stronger nodes, which violate permissions. PcpNodeRef privateNode; TF_REVERSE_FOR_ALL(nodeIter, allNodes) { PcpNodeRef curNode = *nodeIter; if (!curNode.CanContributeSpecs()) { // XXX: Should we be setting permissionDenied? continue; } // If we previously found a private node, the current node is // not allowed to contribute specs. if (privateNode) { curNode.SetRestricted(true); // Check for prim specs in reverse strength order (weak-to-strong). // XXX: We should avoid collecting the prim specs here // and then again later when building the prim stack. // If we built the prim stack first we'd have to // discard stuff we discover to be private; that's // going to be rare so it's okay. if (curNode.HasSpecs()) { TF_REVERSE_FOR_ALL(layer, curNode.GetLayerStack()->GetLayers()) { if ((*layer)->HasSpec(curNode.GetPath())) { // The current node has a prim spec. Since this violates // permissions, we ignore this node's specs and report // an error. PcpErrorPrimPermissionDeniedPtr err = PcpErrorPrimPermissionDenied::New(); err->rootSite = PcpSite(curNode.GetRootNode().GetSite()); err->site = PcpSite(curNode.GetSite()); err->privateSite = PcpSite(privateNode.GetSite()); Pcp_PrimIndexer::RecordError(err, primIndex, allErrors); break; } } } } // If this node is private, any subsequent nodes will generate // errors (see above). if (!privateNode && curNode.GetPermission() != SdfPermissionPublic) { privateNode = curNode; } } } void Pcp_RescanForSpecs(PcpPrimIndex *index, bool usd, bool updateHasSpecs) { TfAutoMallocTag2 tag("Pcp", "Pcp_RescanForSpecs"); if (usd) { // USD does not retain prim stacks. // We do need to update the HasSpecs flag on nodes, however. if (updateHasSpecs) { TF_FOR_ALL(nodeIt, index->GetNodeRange()) { nodeIt->SetHasSpecs(PcpComposeSiteHasPrimSpecs(*nodeIt)); } } } else { Pcp_CompressedSdSiteVector primSites; TF_FOR_ALL(nodeIt, index->GetNodeRange()) { PcpNodeRef node = *nodeIt; bool nodeHasSpecs = false; if (!node.IsCulled() && node.CanContributeSpecs()) { // Add prim specs in strength order (strong-to-weak). const SdfLayerRefPtrVector& layers = node.GetLayerStack()->GetLayers(); const SdfPath& path = node.GetPath(); for (size_t i = 0, n = layers.size(); i != n; ++i) { if (layers[i]->HasSpec(path)) { nodeHasSpecs = true; primSites.push_back(node.GetCompressedSdSite(i)); } } } if (updateHasSpecs) { node.SetHasSpecs(nodeHasSpecs); } } index->_primStack.swap(primSites); } } //////////////////////////////////////////////////////////////////////// static std::pair< PcpNodeRef_PrivateChildrenConstIterator, PcpNodeRef_PrivateChildrenConstIterator> _GetDirectChildRange(const PcpNodeRef& node, PcpArcType arcType) { auto range = std::make_pair( PcpNodeRef_PrivateChildrenConstIterator(node), PcpNodeRef_PrivateChildrenConstIterator(node, /* end = */ true)); for (; range.first != range.second; ++range.first) { const PcpNodeRef& childNode = *range.first; if (childNode.GetArcType() == arcType && !childNode.IsDueToAncestor()) { break; } } auto end = range.second; for (range.second = range.first; range.second != end; ++range.second) { const PcpNodeRef& childNode = *range.second; if (childNode.GetArcType() != arcType || childNode.IsDueToAncestor()) { break; } } return range; } static bool _ComputedAssetPathWouldCreateDifferentNode( const PcpNodeRef& node, const std::string& newAssetPath) { // Get any file format arguments that were originally used to open the // layer so we can apply them to the new asset path. const SdfLayerRefPtr& nodeRootLayer = node.GetLayerStack()->GetIdentifier().rootLayer; std::string oldAssetPath; SdfLayer::FileFormatArguments oldArgs; if (!TF_VERIFY(SdfLayer::SplitIdentifier( nodeRootLayer->GetIdentifier(), &oldAssetPath, &oldArgs))) { return true; } // If no such layer is already open, this asset path must indicate a // layer that differs from the given node's root layer. const SdfLayerHandle newLayer = SdfLayer::Find(newAssetPath, oldArgs); if (!newLayer) { return true; } // Otherwise, if this layer differs from the given node's root layer, // this asset path would result in a different node during composition. return nodeRootLayer != newLayer; } bool Pcp_NeedToRecomputeDueToAssetPathChange(const PcpPrimIndex& index) { // Scan the index for any direct composition arcs that target another // layer. If any exist, try to determine if the asset paths that were // computed to load those layers would now target a different layer. // If so, this prim index needs to be recomputed to include that // new layer. for (const PcpNodeRef& node : index.GetNodeRange()) { if (!node.CanContributeSpecs()) { continue; } // Handle reference arcs. See _EvalNodeReferences. auto refNodeRange = _GetDirectChildRange(node, PcpArcTypeReference); if (refNodeRange.first != refNodeRange.second) { SdfReferenceVector refs; PcpSourceReferenceInfoVector sourceInfo; PcpComposeSiteReferences(node, &refs, &sourceInfo); TF_VERIFY(refs.size() == sourceInfo.size()); const size_t numReferenceArcs = std::distance(refNodeRange.first, refNodeRange.second) ; if (numReferenceArcs != refs.size()) { // This could happen if there was some scene description // change that added/removed references, but also if a // layer couldn't be opened when this index was computed. // We conservatively mark this index as needing recomputation // in the latter case to simplify things. return true; } for (size_t i = 0; i < refs.size(); ++i, ++refNodeRange.first) { // Skip internal references since there's no asset path // computation that occurs when processing them. if (refs[i].GetAssetPath().empty()) { continue; } // PcpComposeSiteReferences will have filled in each // SdfReference with the same asset path that would be used // during composition to open layers. const std::string& anchoredAssetPath = refs[i].GetAssetPath(); if (_ComputedAssetPathWouldCreateDifferentNode( *refNodeRange.first, anchoredAssetPath)) { return true; } } } // Handle payload arcs. See _EvalNodePayload. auto payloadNodeRange = _GetDirectChildRange(node, PcpArcTypePayload); if (payloadNodeRange.first != payloadNodeRange.second) { SdfPayload payload; SdfLayerHandle sourceLayer; PcpComposeSitePayload(node, &payload, &sourceLayer); if (!payload) { // This could happen if there was some scene description // change that removed the payload, which requires // recomputation. return true; } // Compute the same asset path that would be used during // composition to open layers via SdfFindOrOpenRelativeToLayer. const std::string& anchoredAssetPath = SdfComputeAssetPathRelativeToLayer( sourceLayer, payload.GetAssetPath()); if (_ComputedAssetPathWouldCreateDifferentNode( *payloadNodeRange.first, anchoredAssetPath)) { return true; } } } return false; } //////////////////////////////////////////////////////////////////////// // Index Construction static void _ConvertNodeForChild( PcpNodeRef node, const PcpPrimIndexInputs& inputs) { // Because the child site is at a deeper level of namespace than // the parent, there may no longer be any specs. if (node.HasSpecs()) { node.SetHasSpecs(PcpComposeSiteHasPrimSpecs(node)); } // Inert nodes are just placeholders, so we can skip computing these // bits of information since these nodes shouldn't have any opinions to // contribute. if (!node.IsInert() && node.HasSpecs()) { if (!inputs.usd) { // If the parent's permission is private, it will be inherited by // the child. Otherwise, we recompute it here. if (node.GetPermission() == SdfPermissionPublic) { node.SetPermission(PcpComposeSitePermission(node)); } // If the parent had symmetry, it will be inherited by the child. // Otherwise, we recompute it here. if (!node.HasSymmetry()) { node.SetHasSymmetry(PcpComposeSiteHasSymmetry(node)); } } } // Arbitrary-order traversal. TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { _ConvertNodeForChild(*child, inputs); } } // Returns true if the given node can be culled, false otherwise. // // In general, a node can be culled if no descendant nodes contribute // opinions, i.e., no specs are found in that subtree. There are some // exceptions that are documented in the function. static inline bool _NodeCanBeCulled( const PcpNodeRef& node, const PcpLayerStackSite& rootSite) { // Trivial case if this node has already been culled. // This could happen if this node was culled ancestrally. if (node.IsCulled()) { #ifdef PCP_DIAGNOSTIC_VALIDATION TF_VERIFY(!node.IsDirect()); #endif // PCP_DIAGNOSTIC_VALIDATION return true; } // The root node of a prim index is never culled. If needed, this // node will be culled when attached to another prim index in _AddArc. if (node.IsDirect()) { return false; } // We cannot cull any nodes that denote the addition of a new arc. // These nodes introduce dependencies and must be discoverable. // This usually isn't an issue -- arcs are generally added to sites // where prim specs exist, so even without this check these nodes // wouldn't be culled anyway. However, if an arc to a site with no prims // is added (e.g., a reference to a prim that doesn't exist), we need // to explicitly keep that around. if (node.GetDepthBelowIntroduction() == 0) { return false; } // XXX: The following are unfortunate cases where Pcp needs to keep // around nodes it would otherwise cull solely for consumers in Csd. // In theory, Csd would be able to generate this info by computing // unculled prim indices as needed, but in these cases, that // performance cost is too great. // Because of how Csd composes symmetry across namespace ancestors in a // layer stack before composing across arcs, Pcp needs to keep around // any node that directly OR ancestrally provides symmetry info. if (node.HasSymmetry()) { return false; } // CsdPrim::GetBases wants to return the path of all prims in the // composed scene from which this prim inherits opinions. To ensure // Csd has all the info it needs for this, Pcp has to avoid culling any // local inherit nodes in the root layer stack. To see why, consider: // // root layer stack ref layer stack // /GlobalClass <--+ (global inh) // /Model_1 (ref) ----> /Model ------+ // + SymArm <-+ // + LArm --+ (local inh) // // The prim index for /Model_1/LArm would normally have the inherit nodes // for /GlobalClass/LArm and /Model_1/SymArm culled, as there are no specs // for either in the root layer stack. The nature of global classes implies // that, if no specs for /GlobalClass exist in the root layer, there is // no /GlobalClass in the composed scene. So, we don't have to protect // global inherits from being culled. However, because of referencing, // the local inherit /Model_1/SymArm *does* exist in the composed scene. // So, we can't cull that node -- GetBases needs it. if (node.GetArcType() == PcpArcTypeLocalInherit && node.GetLayerStack() == rootSite.layerStack) { return false; } // If any subtree beneath this node wasn't culled, we can't cull // this node either. TF_FOR_ALL(it, Pcp_GetChildrenRange(node)) { const PcpNodeRef& child = *it; if (!child.IsCulled()) { return false; } } // If this node contributes any opinions, we can't cull it. if (node.HasSpecs() && node.CanContributeSpecs()) return false; return true; } // Helper that recursively culls subtrees at and under the given node. static void _CullSubtreesWithNoOpinions( PcpNodeRef node, const PcpLayerStackSite& rootSite) { // Recurse and attempt to cull all children first. Order doesn't matter. TF_FOR_ALL(child, Pcp_GetChildrenRange(node)) { // XXX: // We propagate and maintain duplicate node structure in the graph // for specializes arcs, so when we cull we need to ensure we do so // in both places consistently. For simplicity, we're going to skip // this for now and not cull beneath any specializes arcs. if (PcpIsSpecializesArc(child->GetArcType())) { continue; } _CullSubtreesWithNoOpinions(*child, rootSite); } // Now, mark this node as culled if we can. These nodes will be // removed from the prim index at the end of prim indexing. if (_NodeCanBeCulled(node, rootSite)) { node.SetCulled(true); } } // Helper that sets any nodes that cannot have overrides on name children // as inert. struct Pcp_DisableNonInstanceableNodesVisitor { bool Visit(PcpNodeRef node, bool nodeIsInstanceable) { if (!nodeIsInstanceable) { node.SetInert(true); return true; } return false; } }; const PcpPrimIndex & Pcp_ComputePrimIndexWithCompatibleInputs( PcpCache &cache, const SdfPath & path, const PcpPrimIndexInputs &inputs, PcpErrorVector *allErrors) { return cache._ComputePrimIndexWithCompatibleInputs(path, inputs, allErrors); } static void _BuildInitialPrimIndexFromAncestor( const PcpLayerStackSite &site, const PcpLayerStackSite &rootSite, int ancestorRecursionDepth, PcpPrimIndex_StackFrame *previousFrame, bool evaluateImpliedSpecializes, bool directNodeShouldContributeSpecs, const PcpPrimIndexInputs& inputs, PcpPrimIndexOutputs* outputs) { bool ancestorIsInstanceable = false; // If we're asking for a prim index in the cache's layer stack and // we're not excluding anything from the prim index then ask the // cache for the prim index. This will get it from the cache if // it's already there, and cache it and record dependencies if not. if (!previousFrame && evaluateImpliedSpecializes && inputs.cache->GetLayerStack() == site.layerStack && inputs.cache->GetPrimIndexInputs().IsEquivalentTo(inputs)) { // Get prim index through our cache. This ensures the lifetime // of layer stacks brought in by ancestors. const PcpPrimIndex& parentIndex = inputs.parentIndex ? *inputs.parentIndex : Pcp_ComputePrimIndexWithCompatibleInputs( *inputs.cache, site.path.GetParentPath(), inputs, &outputs->allErrors); // Clone the parent's graph.. outputs->primIndex.SetGraph( PcpPrimIndex_Graph::New(parentIndex.GetGraph())); ancestorIsInstanceable = parentIndex.IsInstanceable(); PCP_INDEXING_UPDATE( _GetOriginatingIndex(previousFrame, outputs), outputs->primIndex.GetRootNode(), "Retrieved index for <%s> from cache", site.path.GetParentPath().GetText()); } else { // First build the prim index for the given site's parent. // Note that variants and payloads are always evaluated to ensure // ancestral opinions are picked up. const PcpLayerStackSite parentSite(site.layerStack, site.path.GetParentPath()); Pcp_BuildPrimIndex(parentSite, parentSite, ancestorRecursionDepth+1, evaluateImpliedSpecializes, /* Always pick up ancestral opinions from variants evaluateVariants = */ true, /* directNodeShouldContributeSpecs = */ true, previousFrame, inputs, outputs); ancestorIsInstanceable = Pcp_PrimIndexIsInstanceable(outputs->primIndex); } // If the ancestor graph is an instance, mark every node that cannot // have opinions about name children as inert. This will cause any // opinions in restricted locations to be ignored. if (ancestorIsInstanceable) { Pcp_DisableNonInstanceableNodesVisitor visitor; Pcp_TraverseInstanceableStrongToWeak(outputs->primIndex, &visitor); } // Adjust the parent graph for this child. PcpPrimIndex_GraphPtr graph = outputs->primIndex.GetGraph(); graph->AppendChildNameToAllSites(site.path); // Reset the 'has payload' flag on this prim index. // This flag should only be set when a prim introduces a payload, // not when any of its parents introduced a payload. // // XXX: // Updating this flag may cause a new copy of the prim index // graph to be created, which is wasteful if this graph will // later set the flag back to its original value. It would be // better to defer setting this bit until we have the final // answer. graph->SetHasPayload(false); PcpNodeRef rootNode = outputs->primIndex.GetRootNode(); _ConvertNodeForChild(rootNode, inputs); if (inputs.cull) { _CullSubtreesWithNoOpinions(rootNode, rootSite); } // Force the root node to inert if the caller has specified that the // direct root node should not contribute specs. Note that the node // may already be set to inert when applying instancing restrictions // above. if (!directNodeShouldContributeSpecs) { rootNode.SetInert(true); } PCP_INDEXING_UPDATE( _GetOriginatingIndex(previousFrame, outputs), rootNode, "Adjusted ancestral index for %s", site.path.GetName().c_str()); } static void Pcp_BuildPrimIndex( const PcpLayerStackSite & site, const PcpLayerStackSite& rootSite, int ancestorRecursionDepth, bool evaluateImpliedSpecializes, bool evaluateVariants, bool directNodeShouldContributeSpecs, PcpPrimIndex_StackFrame *previousFrame, const PcpPrimIndexInputs& inputs, PcpPrimIndexOutputs* outputs ) { Pcp_PrimIndexingDebug debug(&outputs->primIndex, _GetOriginatingIndex(previousFrame, outputs), site); // We only index prims (including the pseudo-root) or variant-selection // paths, and only with absolute paths. if (!TF_VERIFY(site.path.IsAbsolutePath() && (site.path.IsAbsoluteRootOrPrimPath() || site.path.IsPrimVariantSelectionPath()), "%s", site.path.GetText())) { return; } // Establish initial PrimIndex contents. if (site.path.GetPathElementCount() == 0) { // Base case for the pseudo-root: just use the single site. outputs->primIndex.SetGraph(PcpPrimIndex_Graph::New(site, inputs.usd)); // Even though the pseudo root spec exists implicitly, don't // assume that here. PcpNodeRef node = outputs->primIndex.GetGraph()->GetRootNode(); node.SetHasSpecs(PcpComposeSiteHasPrimSpecs(node)); // Optimization: Since no composition arcs can live on the // pseudo-root, we can return early. return; } else if (site.path.IsPrimVariantSelectionPath()) { // For variant selection paths, unlike regular prim paths, we do not // recurse on the parent to obtain ancestral opinions. This is // because variant arcs are evaluated in the process of evaluating // the parent path site, which will already account for ancestral // opinions about the variant itself. outputs->primIndex.SetGraph(PcpPrimIndex_Graph::New(site, inputs.usd)); PcpNodeRef node = outputs->primIndex.GetGraph()->GetRootNode(); node.SetHasSpecs(PcpComposeSiteHasPrimSpecs(node)); node.SetInert(!directNodeShouldContributeSpecs); } else { // Start by building and cloning the namespace parent's index. // This is to account for ancestral opinions: references and // other arcs introduced by namespace ancestors that might // contribute opinions to this child. _BuildInitialPrimIndexFromAncestor( site, rootSite, ancestorRecursionDepth, previousFrame, evaluateImpliedSpecializes, directNodeShouldContributeSpecs, inputs, outputs); } // Initialize the task list. Pcp_PrimIndexer indexer(inputs, outputs, rootSite, ancestorRecursionDepth, previousFrame, evaluateImpliedSpecializes, evaluateVariants); indexer.AddTasksForNode( outputs->primIndex.GetRootNode() ); // Process task list. bool tasksAreLeft = true; while (tasksAreLeft) { Task task = indexer.PopTask(); switch (task.type) { case Task::Type::EvalNodeRelocations: _EvalNodeRelocations(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalImpliedRelocations: _EvalImpliedRelocations(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalNodeReferences: _EvalNodeReferences(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalNodePayload: _EvalNodePayload(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalNodeInherits: _EvalNodeInherits(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalImpliedClasses: _EvalImpliedClasses(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalNodeSpecializes: _EvalNodeSpecializes(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalImpliedSpecializes: _EvalImpliedSpecializes(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalNodeVariantSets: _EvalNodeVariantSets(&outputs->primIndex, task.node, &indexer); break; case Task::Type::EvalNodeVariantAuthored: _EvalNodeAuthoredVariant(&outputs->primIndex, task.node, &indexer, *task.vsetName, task.vsetNum); break; case Task::Type::EvalNodeVariantFallback: _EvalNodeFallbackVariant(&outputs->primIndex, task.node, &indexer, *task.vsetName, task.vsetNum); break; case Task::Type::EvalNodeVariantNoneFound: // No-op. These tasks are just markers for RetryVariantTasks(). break; case Task::Type::None: tasksAreLeft = false; break; } } } void PcpComputePrimIndex( const SdfPath& primPath, const PcpLayerStackPtr& layerStack, const PcpPrimIndexInputs& inputs, PcpPrimIndexOutputs* outputs, ArResolver* resolver) { TfAutoMallocTag2 tag("Pcp", "PcpComputePrimIndex"); TRACE_FUNCTION(); if (!(primPath.IsAbsolutePath() && (primPath.IsAbsoluteRootOrPrimPath() || primPath.IsPrimVariantSelectionPath()))) { TF_CODING_ERROR("Path <%s> must be an absolute path to a prim, " "a prim variant-selection, or the pseudo-root.", primPath.GetText()); return; } ArResolverContextBinder binder( resolver ? resolver : &ArGetResolver(), layerStack->GetIdentifier().pathResolverContext); const PcpLayerStackSite site(layerStack, primPath); Pcp_BuildPrimIndex(site, site, /* ancestorRecursionDepth = */ 0, /* evaluateImpliedSpecializes = */ true, /* evaluateVariants = */ true, /* directNodeShouldContributeSpecs = */ true, /* previousFrame = */ NULL, inputs, outputs); // Tag each node that's not allowed to contribute prim specs due to // permissions. Note that we do this as a post-processing pass here, // but not in Pcp_BuildPrimIndex(), which gets called recursively above. // We don't actually need to *enforce* permissions until after the node // graph has been built. While it's being built, we only need to make // sure each node's permission is set correctly, which is done in // _AddArc() and _ConvertNodeForChild(). So we can defer calling // _EnforcePermissions() until the very end, which saves us from // doing some redundant work. if (!inputs.usd) { _EnforcePermissions(&outputs->primIndex, &outputs->allErrors); } // Determine whether this prim index is instanceable and store that // information in the prim index. This requires composed metadata // values, so we do this here after the prim index is fully composed // instead of in Pcp_BuildPrimIndex. outputs->primIndex.GetGraph()->SetIsInstanceable( Pcp_PrimIndexIsInstanceable(outputs->primIndex)); // We're done modifying the graph, so finalize it. outputs->primIndex.GetGraph()->Finalize(); // Collect the prim stack and the node for each prim in the stack. // Also collect all prim specs found in any node -- this is different // from the prim stack when nodes don't contribute prim specs. // // Note that we *must* do this after the graph is finalized, as // finalization will cause outstanding PcpNodeRefs to be invalidated. Pcp_RescanForSpecs(&outputs->primIndex, inputs.usd, /* updateHasSpecs */false ); } //////////////////////////////////////////////////////////////////////// // Name children / property names // Walk the graph, strong-to-weak, composing prim child names. // Account for spec children in each layer, list-editing statements, // and relocations. static void _ComposePrimChildNamesAtNode( const PcpPrimIndex& primIndex, const PcpNodeRef& node, bool usd, TfTokenVector *nameOrder, PcpTokenSet *nameSet, PcpTokenSet *prohibitedNameSet) { if (!usd) { // Apply relocations from just this layer stack. // Classify them into three groups: names to add, remove, or replace. std::set<TfToken> namesToAdd, namesToRemove; std::map<TfToken, TfToken> namesToReplace; // Check for relocations with a child as source. // See _EvalNodeRelocations for why we use the incremental relocates. const SdfRelocatesMap & relocatesSourceToTarget = node.GetLayerStack()->GetIncrementalRelocatesSourceToTarget(); for (SdfRelocatesMap::const_iterator i = relocatesSourceToTarget.lower_bound(node.GetPath()); i != relocatesSourceToTarget.end() && i->first.HasPrefix(node.GetPath()); ++i) { const SdfPath & oldPath = i->first; const SdfPath & newPath = i->second; if (oldPath.GetParentPath() == node.GetPath()) { if (newPath.GetParentPath() == node.GetPath()) { // Target is the same parent, so this is a rename. namesToReplace[oldPath.GetNameToken()] = newPath.GetNameToken(); } else { // Target is not the same parent, so this is remove. namesToRemove.insert(oldPath.GetNameToken()); } // The source name is now prohibited. prohibitedNameSet->insert(oldPath.GetNameToken()); } } // Check for relocations with a child as target. // See _EvalNodeRelocations for why we use the incremental relocates. const SdfRelocatesMap & relocatesTargetToSource = node.GetLayerStack()->GetIncrementalRelocatesTargetToSource(); for (SdfRelocatesMap::const_iterator i = relocatesTargetToSource.lower_bound(node.GetPath()); i != relocatesTargetToSource.end() && i->first.HasPrefix(node.GetPath()); ++i) { const SdfPath & newPath = i->first; const SdfPath & oldPath = i->second; if (newPath.GetParentPath() == node.GetPath()) { if (oldPath.GetParentPath() == node.GetPath()) { // Source is the same parent, so this is a rename. // We will have already handled this above. } else { // Source is not the same parent, so this is an add. if (nameSet->find(newPath.GetNameToken()) == nameSet->end()) { namesToAdd.insert(newPath.GetNameToken()); } } } } // Apply the names to replace or remove. if (!namesToReplace.empty() || !namesToRemove.empty()) { // Do one pass, building a list of names to retain. TfTokenVector namesToRetain; namesToRetain.reserve( nameOrder->size() ); TF_FOR_ALL(name, *nameOrder) { std::map<TfToken, TfToken>::const_iterator i = namesToReplace.find(*name); if (i != namesToReplace.end()) { // This name was replaced. const TfToken & newName = i->second; nameSet->erase(*name); // Check if newName is already in the nameSet before adding // it to the new name order. newName may already be in // the nameSet (and nameOrder) if it was contributed by // a child spec from a weaker node. // // This can happen when a relocation renames X to Y and // there is also a child spec for Y across a reference. // The intended behavior of the relocation arc is that // that "shadow" child Y is silently ignored. PcpPrimIndex // already ignores it when composing Y, but we also need // to check for it here, when composing the child names // for Y's parent. See TrickyMultipleRelocations for a // test that exercises this. // // TODO: Although silently ignoring the duplicate // name is consistent with Csd's behavior, which we want // to preserve for the initial Pcp work, we think this // should perhaps be reported as a composition error, // since the relocation arc is introducing a name collision. // if (nameSet->insert(newName).second) { // Retain the new name in the same position as the // old name. namesToRetain.push_back(newName); } } else if (namesToRemove.find(*name) == namesToRemove.end()) { // Retain this name as-is. namesToRetain.push_back(*name); } else { // Do not retain this name. nameSet->erase(*name); } } nameOrder->swap(namesToRetain); } // Append children relocated to under this prim in lexicographic order. // // Semantics note: We use alphabetical order as a default ordering // because there is no required statement of ordering among prims // relocated here. (We will, however, subsequently apply // re-ordering restatements in this site's layer stack.) // nameOrder->insert(nameOrder->end(), namesToAdd.begin(), namesToAdd.end()); nameSet->insert(namesToAdd.begin(), namesToAdd.end()); } // Compose the site's local names over the current result. if (node.CanContributeSpecs()) { PcpComposeSiteChildNames( node.GetLayerStack()->GetLayers(), node.GetPath(), SdfChildrenKeys->PrimChildren, nameOrder, nameSet, &SdfFieldKeys->PrimOrder); } // Post-conditions, for debugging. // Disabled by default to avoid extra overhead. #ifdef PCP_DIAGNOSTIC_VALIDATION TF_VERIFY(nameSet->size() == nameOrder->size()); TF_VERIFY(*nameSet == PcpTokenSet(nameOrder->begin(), nameOrder->end())); #endif // PCP_DIAGNOSTIC_VALIDATION } static void _ComposePrimChildNames( const PcpPrimIndex& primIndex, const PcpNodeRef& node, bool usd, TfTokenVector *nameOrder, PcpTokenSet *nameSet, PcpTokenSet *prohibitedNameSet ) { if (node.IsCulled()) { return; } // Reverse strength-order traversal (weak-to-strong). TF_REVERSE_FOR_ALL(child, Pcp_GetChildrenRange(node)) { _ComposePrimChildNames(primIndex, *child, usd, nameOrder, nameSet, prohibitedNameSet); } _ComposePrimChildNamesAtNode( primIndex, node, usd, nameOrder, nameSet, prohibitedNameSet); } // Helper struct for _ComposePrimChildNamesForInstance, see comments // below. struct Pcp_PrimChildNameVisitor { Pcp_PrimChildNameVisitor( const PcpPrimIndex& primIndex, bool usd, TfTokenVector *nameOrder, PcpTokenSet *nameSet, PcpTokenSet *prohibitedNameSet ) : _primIndex(primIndex) , _usd(usd) , _nameOrder(nameOrder) , _nameSet(nameSet) , _prohibitedNameSet(prohibitedNameSet) { } void Visit(PcpNodeRef node, bool nodeIsInstanceable) { if (nodeIsInstanceable) { _ComposePrimChildNamesAtNode( _primIndex, node, _usd, _nameOrder, _nameSet, _prohibitedNameSet); } } private: const PcpPrimIndex& _primIndex; bool _usd; TfTokenVector* _nameOrder; PcpTokenSet* _nameSet; PcpTokenSet* _prohibitedNameSet; }; static void _ComposePrimChildNamesForInstance( const PcpPrimIndex& primIndex, bool usd, TfTokenVector *nameOrder, PcpTokenSet *nameSet, PcpTokenSet *prohibitedNameSet ) { Pcp_PrimChildNameVisitor visitor( primIndex, usd, nameOrder, nameSet, prohibitedNameSet); Pcp_TraverseInstanceableWeakToStrong(primIndex, &visitor); } static void _ComposePrimPropertyNames( const PcpPrimIndex& primIndex, const PcpNodeRef& node, bool isUsd, TfTokenVector *nameOrder, PcpTokenSet *nameSet ) { if (node.IsCulled()) { return; } // Reverse strength-order traversal (weak-to-strong). TF_REVERSE_FOR_ALL(child, Pcp_GetChildrenRange(node)) { _ComposePrimPropertyNames( primIndex, *child, isUsd, nameOrder, nameSet ); } // Compose the site's local names over the current result. if (node.CanContributeSpecs()) { PcpComposeSiteChildNames( node.GetLayerStack()->GetLayers(), node.GetPath(), SdfChildrenKeys->PropertyChildren, nameOrder, nameSet, isUsd ? nullptr : &SdfFieldKeys->PropertyOrder); } } void PcpPrimIndex::ComputePrimChildNames( TfTokenVector *nameOrder, PcpTokenSet *prohibitedNameSet ) const { if (!_graph) { return; } TRACE_FUNCTION(); // Provide a set with any existing nameOrder contents. PcpTokenSet nameSet(nameOrder->begin(), nameOrder->end()); // Walk the graph to compose prim child names. if (IsInstanceable()) { _ComposePrimChildNamesForInstance( *this, IsUsd(), nameOrder, &nameSet, prohibitedNameSet); } else { _ComposePrimChildNames( *this, GetRootNode(), IsUsd(), nameOrder, &nameSet, prohibitedNameSet); } // Remove prohibited names from the composed prim child names. if (!prohibitedNameSet->empty()) { nameOrder->erase( std::remove_if(nameOrder->begin(), nameOrder->end(), [prohibitedNameSet](const TfToken& name) { return prohibitedNameSet->find(name) != prohibitedNameSet->end(); }), nameOrder->end()); } } void PcpPrimIndex::ComputePrimPropertyNames( TfTokenVector *nameOrder ) const { if (!_graph) { return; } TRACE_FUNCTION(); // Provide a set with any existing nameOrder contents. PcpTokenSet nameSet; nameSet.insert_unique(nameOrder->begin(), nameOrder->end()); // Walk the graph to compose prim child names. _ComposePrimPropertyNames( *this, GetRootNode(), IsUsd(), nameOrder, &nameSet); } PXR_NAMESPACE_CLOSE_SCOPE
38.328047
81
0.616787
navefx
4fe9c225e69fafe57d715f9a24c4ab3a36656d64
18,866
hpp
C++
src/Interface/hiopInterface.hpp
pelesh/hiop
26bf95fc380dfee6d251d6c870cf1b4c76841828
[ "BSD-3-Clause" ]
null
null
null
src/Interface/hiopInterface.hpp
pelesh/hiop
26bf95fc380dfee6d251d6c870cf1b4c76841828
[ "BSD-3-Clause" ]
null
null
null
src/Interface/hiopInterface.hpp
pelesh/hiop
26bf95fc380dfee6d251d6c870cf1b4c76841828
[ "BSD-3-Clause" ]
null
null
null
// Copyright (c) 2017, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory (LLNL). // Written by Cosmin G. Petra, petra1@llnl.gov. // LLNL-CODE-742473. All rights reserved. // // This file is part of HiOp. For details, see https://github.com/LLNL/hiop. HiOp // is released under the BSD 3-clause license (https://opensource.org/licenses/BSD-3-Clause). // Please also read “Additional BSD Notice” below. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // i. Redistributions of source code must retain the above copyright notice, this list // of conditions and the disclaimer below. // ii. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the disclaimer (as noted below) in the documentation and/or // other materials provided with the distribution. // iii. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to // endorse or promote products derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS // OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED // AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Additional BSD Notice // 1. This notice is required to be provided under our contract with the U.S. Department // of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under // Contract No. DE-AC52-07NA27344 with the DOE. // 2. Neither the United States Government nor Lawrence Livermore National Security, LLC // nor any of their employees, makes any warranty, express or implied, or assumes any // liability or responsibility for the accuracy, completeness, or usefulness of any // information, apparatus, product, or process disclosed, or represents that its use would // not infringe privately-owned rights. // 3. Also, reference herein to any specific commercial products, process, or services by // trade name, trademark, manufacturer or otherwise does not necessarily constitute or // imply its endorsement, recommendation, or favoring by the United States Government or // Lawrence Livermore National Security, LLC. The views and opinions of authors expressed // herein do not necessarily state or reflect those of the United States Government or // Lawrence Livermore National Security, LLC, and shall not be used for advertising or // product endorsement purposes. #ifndef HIOP_INTERFACE_BASE #define HIOP_INTERFACE_BASE #include "hiop_defs.hpp" #ifdef HIOP_USE_MPI #include "mpi.h" #else #ifndef MPI_Comm #define MPI_Comm int #endif #ifndef MPI_COMM_WORLD #define MPI_COMM_WORLD 0 #endif #endif namespace hiop { /** Solver status codes. */ enum hiopSolveStatus { //(partial) success Solve_Success=0, Solve_Success_RelTol=1, Solve_Acceptable_Level=2, Infeasible_Problem=5, Iterates_Diverging=6, Feasible_Not_Optimal = 7, //solver stopped based on user-defined criteria that are not related to optimality Max_Iter_Exceeded=10, Max_CpuTime_Exceeded=11, User_Stopped=12, //NLP algorithm/solver reports issues in solving the problem and stops without being certain //that is solved the problem to optimality or that the problem is infeasible. //Feasible_Point_Found, NlpAlgorithm_failure=-1, Diverging_Iterates=-2, Search_Dir_Too_Small=-3, Steplength_Too_Small=-4, Err_Step_Computation=-5, //errors related to user-provided data (e.g., inconsistent problem specification, 'nans' in the //function/sensitivity evaluations, invalid options) Invalid_Problem_Definition=-11, Invalid_Parallelization=-12, Invalid_UserOption=-13, Invalid_Number=-14, Error_In_User_Function=-15, //ungraceful errors and returns Exception_Unrecoverable=-100, Memory_Alloc_Problem=-101, SolverInternal_Error=-199, //unknown NLP solver errors or return codes UnknownNLPSolveStatus=-1000, SolveInitializationError=-1001, //intermediary statuses for the solver NlpSolve_IncompleteInit=-10001, NlpSolve_SolveNotCalled=-10002, NlpSolve_Pending=-10003 }; /** The base interface class */ class hiopInterfaceBase { /** Base class for the solver's interface that has no assumptions how the * matrices are stored. The vectors are dense and distributed row-wise. * The data distribution is decided by the calling code (that implements * this interface) and specified to the optimization via 'get_vecdistrib_info' * * Two possible implementations are for sparse NLPs and NLPs with small * number of global constraints. * * */ public: enum NonlinearityType{ hiopLinear=0, hiopQuadratic, hiopNonlinear}; public: hiopInterfaceBase() {}; virtual ~hiopInterfaceBase() {}; /** problem dimensions: n number of variables, m number of constraints */ virtual bool get_prob_sizes(long long& n, long long& m)=0; /** bounds on the variables * (xlow<=-1e20 means no lower bound, xupp>=1e20 means no upper bound) */ virtual bool get_vars_info(const long long& n, double *xlow, double* xupp, NonlinearityType* type)=0; /** bounds on the constraints * (clow<=-1e20 means no lower bound, cupp>=1e20 means no upper bound) */ virtual bool get_cons_info(const long long& m, double* clow, double* cupp, NonlinearityType* type)=0; /** Objective function evaluation * When MPI enabled, each rank returns the obj. value. Also, x points to the local entries and * the function is responsible for knowing the local buffer size. */ virtual bool eval_f(const long long& n, const double* x, bool new_x, double& obj_value)=0; /** Gradient of objective. * When MPI enabled, each rank works only with local buffers x and gradf. */ virtual bool eval_grad_f(const long long& n, const double* x, bool new_x, double* gradf)=0; /** Evaluates a subset of the constraints cons(x) (where clow<=cons(x)<=cupp). The subset is of size * 'num_cons' and is described by indexes in the 'idx_cons' array. The method will be called at each * iteration separately for the equality constraints subset and for the inequality constraints subset. * This is done for performance considerations, to avoid auxiliary/temporary storage and copying. * * Parameters: * - n, m: the global number of variables and constraints * - num_cons, the number constraints/size of subset to be evaluated * - idx_cons: indexes in {1,2,...,m} of the constraints to be evaluated * - x: the point where the constraints are to be evaluated * - new_x: whether x has been changed from the previous call to f, grad_f, or Jac * - cons: array of size num_cons containing the value of the constraints indicated by idx_cons * * When MPI enabled, every rank populates 'cons' since the constraints are not distributed. */ virtual bool eval_cons(const long long& n, const long long& m, const long long& num_cons, const long long* idx_cons, const double* x, bool new_x, double* cons)=0; /** Evaluates the constraints cons(x), both equalities and inequalities in one call. * * Parameters: * - n, m: the global number of variables and constraints * - x: the point where the constraints are to be evaluated * - new_x: whether x has been changed from the previous call to f, grad_f, or Jac * - cons: array of size num_cons containing the value of the constraints indicated by idx_cons * * When MPI enabled, every rank populates 'cons' since the constraints are not distributed. * * HiOp will first call 'eval_cons' above (twice). If the implementer/user wants the functionality * of the one-call 'eval_cons' below, he should return false from 'eval_cons' above; then HiOp will * call the method below. * */ virtual bool eval_cons(const long long& n, const long long& m, const double* x, bool new_x, double* cons) { return false; } /** pass the communicator, defaults to MPI_COMM_WORLD (dummy for non-MPI builds) */ virtual bool get_MPI_comm(MPI_Comm& comm_out) { comm_out=MPI_COMM_WORLD; return true;} /** column partitioning specification for distributed memory vectors * Process P owns cols[P], cols[P]+1, ..., cols[P+1]-1, P={0,1,...,NumRanks}. * Example: for a vector x of 6 elements on 3 ranks, the col partitioning is cols=[0,2,4,6]. * The caller manages memory associated with 'cols', array of size NumRanks+1 */ virtual bool get_vecdistrib_info(long long global_n, long long* cols) { return false; //defaults to serial } /* Method providing a primal starting point. This point is subject to internal adjustments in hiOP. * The method returns true (and populate x0) or return false, in which case hiOP will use set * x0 to all zero (still subject to internal adjustement). * * TODO: provide API for a full, primal-dual restart. */ virtual bool get_starting_point(const long long&n, double* x0) { return false; } /** callback for the optimal solution. * Note that: * i. x, z_L, z_U contain only the array slice that is local to the calling process * ii. g, lambda are replicated across all processes, which means they can be used as-is, without reducing them. * iii. all other scalar quantities are replicated across all processes, which means they can be used as-is, * without reducing them. */ virtual void solution_callback(hiopSolveStatus status, int n, const double* x, const double* z_L, const double* z_U, int m, const double* g, const double* lambda, double obj_value) { }; /** Callback for the iteration: at the end of each iteration. This is NOT called during the line-searches. * Note: all the notes for @solution_callback apply. */ virtual bool iterate_callback(int iter, double obj_value, int n, const double* x, const double* z_L, const double* z_U, int m, const double* g, const double* lambda, double inf_pr, double inf_du, double mu, double alpha_du, double alpha_pr, int ls_trials) {return true;} private: hiopInterfaceBase(const hiopInterfaceBase& ) {}; void operator=(const hiopInterfaceBase&) {}; }; /** Specialized interface for NLPs with 'global' but few constraints. */ class hiopInterfaceDenseConstraints : public hiopInterfaceBase { public: hiopInterfaceDenseConstraints() {}; virtual ~hiopInterfaceDenseConstraints() {}; /** Evaluates the Jacobian of the subset of constraints indicated by idx_cons and of size num_cons. * Example: Assuming idx_cons[k]=i, which means that the gradient of the (i+1)th constraint is * to be evaluated, one needs to do Jac[k][0]=d/dx_0 con_i(x), Jac[k][1]=d/dx_1 con_i(x), ... * When MPI enabled, each rank computes only the local columns of the Jacobian, that is the partials * with respect to local variables. * * Parameters: see eval_cons */ virtual bool eval_Jac_cons(const long long& n, const long long& m, const long long& num_cons, const long long* idx_cons, const double* x, bool new_x, double** Jac) = 0; /** Evaluates the Jacobian of equality and inequality constraints in one call. * * The main difference from the above 'eval_Jac_cons' is that the implementer/user of this * method does not have to split the constraints into equalities and inequalities; instead, * HiOp does this internally. * * TODO: build an example (new one-call Nlp formulation derived from ex2) to illustrate this * feature and to test HiOp's internal implementation of eq.-ineq. spliting. */ virtual bool eval_Jac_cons(const long long& n, const long long& m, const double* x, bool new_x, double** Jac) { return false; } }; /** Specialized interface for NLPs having mixed DENSE and sparse (MDS) blocks in the * Jacobian and Hessian. * * More specifically, this interface is for specifying optimization problem in x * split as (xs,xd), the rule of thumb being that xs have sparse derivatives and * xd have dense derivatives * * min f(x) s.t. g(x) <= or = 0, lb<=x<=ub * * such that * - Jacobian w.r.t. xs and LagrHessian w.r.t. (xs,xs) are sparse * - Jacobian w.r.t. xd and LagrHessian w.r.t. (xd,xd) are dense * - LagrHessian w.r.t (xs,xd) is zero (later this assumption will be relaxed) * * Notes * 1) HiOp expects the sparse variables first and then the dense variables. In many cases, * the implementer has to (inconviniently) keep a map between his internal variables * indexes and the indexes HiOp * 2) this interface is 'local' in the sense that data is not assumed to be * distributed across MPI ranks ('get_vecdistrib_info' should return 'false') * */ class hiopInterfaceMDS : public hiopInterfaceBase { public: hiopInterfaceMDS() {}; virtual ~hiopInterfaceMDS() {}; virtual bool get_sparse_dense_blocks_info(int& nx_sparse, int& nx_dense, int& nnz_sparse_Jaceq, int& nnz_sparse_Jacineq, int& nnz_sparse_Hess_Lagr_SS, int& nnz_sparse_Hess_Lagr_SD) = 0; /** Evaluates the Jacobian of constraints split in the sparse (triplet format) and * dense matrices (rows storage) * * This method is called twice per Jacobian evaluation, once for equalities and once for * inequalities (see 'eval_cons' for more information). It is advantageous to provide * this method when the underlying NLP's constraints come naturally split in equalities * and inequalities. When it is not convinient to do so, use 'eval_Jac_cons' below. * * Parameters: * - first six: see eval_cons (in parent class) * - nnzJacS, iJacS, jJacS, MJacS: number of nonzeros, (i,j) indexes, and values of * the sparse Jacobian * - JacD: dense Jacobian as a contiguous array storing the matrix by rows; array is * "primed" to support double indexing JacD[i][j] * * Notes for implementer of this method: * 1) 'JacD' parameter will be always non-null * 2) When 'iJacS' and 'jJacS' are non-null, the implementer should provide the (i,j) * indexes. * 3) When 'MJacS' is non-null, the implementer should provide the values corresponding to * entries specified by 'iJacS' and 'jJacS' * 4) 'iJacS' and 'jJacS' are both either non-null or null during a call. * 5) Both 'iJacS'/'jJacS' and 'MJacS' can be non-null during the same call or only one of them * non-null; but they will not be both null. * */ virtual bool eval_Jac_cons(const long long& n, const long long& m, const long long& num_cons, const long long* idx_cons, const double* x, bool new_x, const long long& nsparse, const long long& ndense, const int& nnzJacS, int* iJacS, int* jJacS, double* MJacS, double** JacD) = 0; /** Evaluates the Jacobian of equality and inequality constraints in one call. This Jacobian is * mixed dense-sparse (MDS), which means is structurally split in the sparse (triplet format) and * dense matrices (rows storage) * * The main difference from the above 'eval_Jac_cons' is that the implementer/user of this * method does not have to split the constraints into equalities and inequalities; instead, * HiOp does this internally. * * Parameters: * - first four: number of variables, number of constraints, (primal) variables at which the * Jacobian should be evaluated, and boolean flag indicating whether the variables 'x' have * changed since a previous call to ny of the function and derivative evaluations. * - nsparse and ndense: number of sparse and dense variables, respectively; must add * up to 'n' * - nnzJacS, iJacS, jJacS, MJacS: number of nonzeros, (i,j) indexes, and values of * the sparse Jacobian block; indexes are within the sparse Jacobian block (not within * the entire Jacobian) * - JacD: dense Jacobian block as a contiguous array storing the matrix by rows; array is * "primed" to support double indexing JacD[i][j] * * Notes for implementer of this method: * 1) 'JacD' parameter will be always non-null * 2) When 'iJacS' and 'jJacS' are non-null, the implementer should provide the (i,j) * indexes. * 3) When 'MJacS' is non-null, the implementer should provide the values corresponding to * entries specified by 'iJacS' and 'jJacS' * 4) 'iJacS' and 'jJacS' are both either non-null or null during a call. * 5) Both 'iJacS'/'jJacS' and 'MJacS' can be non-null during the same call or only one of them * non-null; but they will not be both null. * * HiOp will call this method whenever the implementer/user returns false from the 'eval_Jac_cons' * (which is called for equalities and inequalities separately) above. */ virtual bool eval_Jac_cons(const long long& n, const long long& m, const double* x, bool new_x, const long long& nsparse, const long long& ndense, const int& nnzJacS, int* iJacS, int* jJacS, double* MJacS, double** JacD){ return false; } /** Evaluates the Hessian of the Lagrangian function in 3 structural blocks * - HSS is the Hessian w.r.t.(xs,xs) * - HDD is the Hessian w.r.t.(xd,xd) * - HSD is the Hessian w.r.t (xs,xd) * * Note: HSD is for now assumed to be zero. The implementer should return nnzHSD=0 * during the first call to 'eval_Hess_Lagr'. On subsequent calls, HiOp will pass the * triplet arrays for HSD set to NULL and the implementer (obviously) should not use them. * * Notes * 1)-5) from 'eval_Jac_cons' applies to xxxHSS and HDD arrays * 6) The order is multipliers is: lambda=[lambda_eq, lambda_ineq] */ virtual bool eval_Hess_Lagr(const long long& n, const long long& m, const double* x, bool new_x, const double& obj_factor, const double* lambda, bool new_lambda, const long long& nsparse, const long long& ndense, const int& nnzHSS, int* iHSS, int* jHSS, double* MHSS, double** HDD, int& nnzHSD, int* iHSD, int* jHSD, double* MHSD) = 0; }; } //end of namespace #endif
45.902676
115
0.716262
pelesh
4fecebbbb9b3f3cab920782731e176f84c039690
2,366
cpp
C++
ex2/matmultran/src/main.cpp
akowalew/rim-lab
4ffc992c543e1ed7fcaa7c88a0fcd94d09daa829
[ "MIT" ]
null
null
null
ex2/matmultran/src/main.cpp
akowalew/rim-lab
4ffc992c543e1ed7fcaa7c88a0fcd94d09daa829
[ "MIT" ]
null
null
null
ex2/matmultran/src/main.cpp
akowalew/rim-lab
4ffc992c543e1ed7fcaa7c88a0fcd94d09daa829
[ "MIT" ]
null
null
null
#include <cmath> #include <cstdio> #include <ctime> #include <chrono> #include <thread> #include "matmultran.hpp" void alloc_mem(int m, int n, float **A_ptr, float **C_ptr, float **D_ptr) { *A_ptr = (float *) malloc(m * n * sizeof(float)); *C_ptr = (float *) malloc(m * m * sizeof(float)); *D_ptr = (float *) malloc(m * m * sizeof(float)); } void free_mem(float *A, float *C, float *D) { free(A); free(C); free(D); } void read_data(int *m_ptr, int *n_ptr, float **A_ptr, float **C_ptr, float **D_ptr) { FILE *f = fopen("matmultran.dat", "rb"); fread(m_ptr, sizeof(int), 1, f); fread(n_ptr, sizeof(int), 1, f); alloc_mem(*m_ptr, *n_ptr, A_ptr, C_ptr, D_ptr); fread(*A_ptr, sizeof(float), *m_ptr * *n_ptr, f); fread(*D_ptr, sizeof(float), *m_ptr * *m_ptr, f); fclose(f); } void matcmp(float *C, float *D, int m, int n) { int k; float d, e = -1.0f; for (k = 0; k < m * n; k++) { if ((d = fabsf(C[k] - D[k])) > e) { e = d; } } printf("max. abs. err. = %.1e\n", e); } #ifdef _WIN32 #define WINDOWS_LEAN_AND_MEAN #include <windows.h> typedef LARGE_INTEGER app_timer_t; #define timer(t_ptr) QueryPerformanceCounter(t_ptr) void elapsed_time(app_timer_t start, app_timer_t stop, double flop) { double etime; LARGE_INTEGER clk_freq; QueryPerformanceFrequency(&clk_freq); etime = (stop.QuadPart - start.QuadPart) / (double)clk_freq.QuadPart; printf("CPU (total!) time = %.3f ms (%6.3f GFLOP/s)\n", etime * 1e3, 1e-9 * flop / etime); } #else using app_timer_t = std::chrono::time_point<std::chrono::steady_clock>; #define timer(t_ptr) *t_ptr = std::chrono::steady_clock::now() void elapsed_time(app_timer_t start, app_timer_t stop, double flop) { const auto diff = stop - start; const auto diff_ms = std::chrono::duration_cast<std::chrono::milliseconds>(diff); const auto diff_ms_count = diff_ms.count(); printf("CPU (total!) time = %ldms (%6.3f GFLOP/s)\n", diff_ms_count, flop/diff_ms_count); } #endif int main(int argc, char *argv[]) { app_timer_t start, stop; int m, n; float *A, *C, *D; read_data(&m, &n, &A, &C, &D); timer(&start); matmultran(C, A, m, n); timer(&stop); elapsed_time(start, stop, 2 * m * m * n); matcmp(C, D, m, m); free_mem(A, C, D); return 0; }
24.391753
94
0.609045
akowalew
4fedc7a154eb5284125d41a1b45c3a29855af6fe
3,387
cpp
C++
src/day12.cpp
beached/aoc_2017
553d42e50b81384ad93aae6e0aec624ca7c8bf58
[ "MIT" ]
1
2017-12-11T16:17:18.000Z
2017-12-11T16:17:18.000Z
src/day12.cpp
beached/aoc_2017
553d42e50b81384ad93aae6e0aec624ca7c8bf58
[ "MIT" ]
null
null
null
src/day12.cpp
beached/aoc_2017
553d42e50b81384ad93aae6e0aec624ca7c8bf58
[ "MIT" ]
null
null
null
// The MIT License (MIT) // // Copyright (c) 2017 Darrell Wright // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files( the "Software" ), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <algorithm> #include <cstdint> #include <cstdlib> #include <iostream> #include <map> #include <numeric> #include <set> #include <string> #include <vector> #include <daw/daw_string.h> #include <daw/daw_string_view.h> #include "day12.h" #include "str_splitter.h" namespace daw { namespace aoc_2017 { namespace day12 { using node_name_t = std::string; using node_t = std::set<node_name_t>; using graph_t = std::map<node_name_t, node_t>; namespace { std::pair<node_name_t, node_t> parse_line( daw::string_view line ) { std::pair<node_name_t, node_t> result{}; result.first = line.pop_front( " " ).to_string( ); line.pop_front( "> " ); while( !line.empty( ) ) { result.second.insert( line.pop_front( ", " ).to_string( ) ); } return result; } std::set<node_name_t> get_group( graph_t const &nodes, node_name_t cur_node ) { std::set<node_name_t> visited{}; std::vector<node_name_t> to_visit{}; to_visit.push_back( cur_node ); while( !to_visit.empty( ) ) { auto tmp = to_visit.back( ); to_visit.pop_back( ); visited.insert( tmp ); for( auto const &node : nodes.find( tmp )->second ) { if( visited.count( node ) == 0 ) { to_visit.push_back( node ); } } } return visited; } graph_t parse_input( std::vector<std::string> lines ) { graph_t graph{}; for( auto const &line : lines ) { auto tmp = parse_line( line ); graph[tmp.first].insert( tmp.second.begin( ), tmp.second.end( ) ); for( auto const &node : tmp.second ) { graph[node].insert( tmp.first ); } } return graph; } } // namespace intmax_t count_connections_to_zero( std::vector<std::string> lines ) { auto graph = parse_input( lines ); return static_cast<intmax_t>( get_group( graph, "0" ).size( ) ); } intmax_t num_groups( std::vector<std::string> lines ) { auto graph = parse_input( lines ); std::set<node_t> groups{}; for( auto const &node : graph ) { groups.insert( get_group( graph, node.first ) ); } return static_cast<intmax_t>( groups.size( ) ); } } // namespace day12 } // namespace aoc_2017 } // namespace daw
32.883495
83
0.663714
beached
4fef4ce7a337887fc210a91cdf8e90794590e7fe
33,522
cpp
C++
main.cpp
abitrolly/watchman
658e9ec680fe7691e1fe1d136180b282511a301a
[ "Apache-2.0" ]
1
2019-10-18T12:35:33.000Z
2019-10-18T12:35:33.000Z
main.cpp
MedRedha/watchman
073fdd5c13d73c66563a8f07acebc53a1c6d7dde
[ "Apache-2.0" ]
null
null
null
main.cpp
MedRedha/watchman
073fdd5c13d73c66563a8f07acebc53a1c6d7dde
[ "Apache-2.0" ]
1
2019-12-19T01:16:22.000Z
2019-12-19T01:16:22.000Z
/* Copyright 2012-present Facebook, Inc. * Licensed under the Apache License, Version 2.0 */ #include "watchman.h" #include "ChildProcess.h" #include "Logging.h" #include "ThreadPool.h" #ifndef _WIN32 #include <poll.h> #endif #include <folly/Exception.h> #include <folly/ScopeGuard.h> #include <folly/Singleton.h> using watchman::ChildProcess; using watchman::FileDescriptor; using Options = ChildProcess::Options; using namespace watchman; static int show_help = 0; static int show_version = 0; static enum w_pdu_type server_pdu = is_bser; static enum w_pdu_type output_pdu = is_json_pretty; static uint32_t server_capabilities = 0; static uint32_t output_capabilities = 0; static char* server_encoding = NULL; static char* output_encoding = NULL; static char* test_state_dir = NULL; static char* pid_file = NULL; static char** daemon_argv = NULL; static int persistent = 0; static int foreground = 0; static int no_pretty = 0; static int no_spawn = 0; static int no_local = 0; static int no_site_spawner = 0; #ifndef _WIN32 static int inetd_style = 0; static struct sockaddr_un un; #endif static int json_input_arg = 0; #ifdef __APPLE__ #include <mach-o/dyld.h> #endif static const char* compute_user_name(void); static void compute_file_name( char** strp, const char* user, const char* suffix, const char* what); static bool lock_pidfile(void) { // We defer computing this path until we're in the server context because // eager evaluation can trigger integration test failures unless all clients // are aware of both the pidfile and the sockpath being used in the tests. compute_file_name(&pid_file, compute_user_name(), "pid", "pidfile"); #if !defined(USE_GIMLI) && !defined(_WIN32) struct flock lock; pid_t mypid; mypid = getpid(); memset(&lock, 0, sizeof(lock)); lock.l_type = F_WRLCK; lock.l_start = 0; lock.l_whence = SEEK_SET; lock.l_len = 0; FileDescriptor fd(open(pid_file, O_RDWR | O_CREAT, 0644)); if (!fd) { w_log( W_LOG_ERR, "Failed to open pidfile %s for write: %s\n", pid_file, strerror(errno)); return false; } // Ensure that no children inherit the locked pidfile descriptor fd.setCloExec(); if (fcntl(fd.fd(), F_SETLK, &lock) != 0) { char pidstr[32]; int len; len = read(fd.fd(), pidstr, sizeof(pidstr) - 1); pidstr[len] = '\0'; w_log( W_LOG_ERR, "Failed to lock pidfile %s: process %s owns it: %s\n", pid_file, pidstr, strerror(errno)); return false; } // Replace contents of the pidfile with our pid string if (ftruncate(fd.fd(), 0)) { w_log( W_LOG_ERR, "Failed to truncate pidfile %s: %s\n", pid_file, strerror(errno)); return false; } auto pidString = watchman::to<std::string>(mypid); ignore_result(write(fd.fd(), pidString.data(), pidString.size())); fsync(fd.fd()); /* We are intentionally not closing the fd and intentionally not storing * a reference to it anywhere: the intention is that it remain locked * for the rest of the lifetime of our process. * close(fd); // NOPE! */ fd.release(); return true; #else // One does not simply, and without risk of races, write a pidfile // on win32. Instead we're using a named mutex in the global namespace. // This gives us a very simple way to exclusively claim ownership of // the lock for this user. To make things a little more complicated, // since we scope our locks based on the state dir location and require // this to work for our integration tests, we need to create a unique // name per state dir. This is made even more interesting because // we are forbidden from using windows directory separator characters // in the name, so we cannot simply concatenate the state dir path // with a watchman specific prefix. Instead we iterate the path // and rewrite any backslashes with forward slashes and use that // for the name. // Using a mutex for this does make it more awkward to discover // the process id of the exclusive owner, but that's not critically // important; it is possible to connect to the instance and issue // a get-pid command if that is needed. // We use the global namespace so that we ensure that we have one // watchman process per user per state dir location. If we didn't // use the Global namespace we'd end using a local namespace scoped // to the user session and that might cause confusion/insanity if // they are doing something elaborate like being logged in via // ssh in multiple sessions and expecting to share state. std::string name("Global\\Watchman-"); const auto* it = pid_file; while (*it != 0) { if (*it == '\\') { // We're not allowed to use backslash in the name, so normalize // to forward slashes. name.append("/"); } else { name.append(it, 1); } ++it; } auto mutex = CreateMutexA(nullptr, true, name.c_str()); if (!mutex) { watchman::log( watchman::ERR, "Failed to create mutex named: ", name, ": ", GetLastError(), "\n"); return false; } if (GetLastError() == ERROR_ALREADY_EXISTS) { watchman::log( watchman::ERR, "Failed to acquire mutex named: ", name, "; watchman is already running for this context\n"); return false; } /* We are intentionally not closing the mutex and intentionally not storing * a reference to it anywhere: the intention is that it remain locked * for the rest of the lifetime of our process. * CloseHandle(mutex); // NOPE! */ return true; #endif } #ifndef _WIN32 // Returns the current process priority aka `nice` level. // Since `-1` is a valid nice level, in order to detect an // error we clear errno first and then test whether it is // non-zero after we have retrieved the nice value. static int get_nice_value() { errno = 0; auto value = nice(0); folly::checkPosixError(errno, "failed to get `nice` value"); return value; } static void check_nice_value() { if (get_nice_value() > cfg_get_int("min_acceptable_nice_value", 0)) { watchman::log( watchman::FATAL, "Watchman is running at a lower than normal priority. Since that " "results in poor performance that is otherwise very difficult to " "trace, diagnose and debug, Watchman is refusing to start.\n"); } } #endif static void run_service(void) { int fd; bool res; #ifndef _WIN32 // Before we redirect stdin/stdout to the log files, move any inetd-provided // socket to a different descriptor number. if (inetd_style) { w_listener_prep_inetd(); } if (isatty(0)) { // This case can happen when a user is running watchman using // the `--foreground` switch. // Check and raise this error before we detach from the terminal check_nice_value(); } #endif // redirect std{in,out,err} fd = ::open("/dev/null", O_RDONLY); if (fd != -1) { ignore_result(::dup2(fd, STDIN_FILENO)); ::close(fd); } fd = open(log_name, O_WRONLY | O_APPEND | O_CREAT, 0600); if (fd != -1) { ignore_result(::dup2(fd, STDOUT_FILENO)); ignore_result(::dup2(fd, STDERR_FILENO)); ::close(fd); } #ifndef _WIN32 // If we weren't attached to a tty, check this now that we've opened // the log files so that we can log the problem there. check_nice_value(); #endif if (!lock_pidfile()) { return; } #ifndef _WIN32 /* we are the child, let's set things up */ ignore_result(chdir("/")); #endif w_set_thread_name("listener"); { char hostname[256]; gethostname(hostname, sizeof(hostname)); hostname[sizeof(hostname) - 1] = '\0'; w_log( W_LOG_ERR, "Watchman %s %s starting up on %s\n", PACKAGE_VERSION, #ifdef WATCHMAN_BUILD_INFO WATCHMAN_BUILD_INFO, #else "<no build info set>", #endif hostname); } #ifndef _WIN32 // Block SIGCHLD by default; we only want it to be delivered // to the reaper thread and only when it is ready to reap. // This MUST happen before we spawn any threads so that they // can pick up our default blocked signal mask. { sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); } #endif watchman::getThreadPool().start( cfg_get_int("thread_pool_worker_threads", 16), cfg_get_int("thread_pool_max_items", 1024 * 1024)); ClockSpec::init(); w_state_load(); res = w_start_listener(sock_name); w_root_free_watched_roots(); cfg_shutdown(); watchman::log(watchman::ERR, "Exiting from service with res=", res, "\n"); if (res) { exit(0); } exit(1); } #ifndef _WIN32 // close any random descriptors that we may have inherited, // leaving only the main stdio descriptors open, if we execute a // child process. static void close_random_fds(void) { struct rlimit limit; long open_max = 0; int max_fd; // Deduce the upper bound for number of descriptors limit.rlim_cur = 0; #ifdef RLIMIT_NOFILE if (getrlimit(RLIMIT_NOFILE, &limit) != 0) { limit.rlim_cur = 0; } #elif defined(RLIM_OFILE) if (getrlimit(RLIMIT_OFILE, &limit) != 0) { limit.rlim_cur = 0; } #endif #ifdef _SC_OPEN_MAX open_max = sysconf(_SC_OPEN_MAX); #endif if (open_max <= 0) { open_max = 36; /* POSIX_OPEN_MAX (20) + some padding */ } if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > INT_MAX) { // "no limit", which seems unlikely limit.rlim_cur = INT_MAX; } // Take the larger of the two values we compute if (limit.rlim_cur > (rlim_t)open_max) { open_max = limit.rlim_cur; } for (max_fd = open_max; max_fd > STDERR_FILENO; --max_fd) { close(max_fd); } } #endif #if !defined(USE_GIMLI) && !defined(_WIN32) static void daemonize(void) { // Make sure we're not about to inherit an undesirable nice value check_nice_value(); close_random_fds(); // the double-fork-and-setsid trick establishes a // child process that runs in its own process group // with its own session and that won't get killed // off when your shell exits (for example). if (fork()) { // The parent of the first fork is the client // process that is being run by the user, and // we want to allow that to continue. return; } setsid(); if (fork()) { // The parent of the second fork has served its // purpose, so we simply exit here, otherwise // we'll duplicate the effort of either the // client or the server depending on if we // return or not. _exit(0); } // we are the child, let's set things up run_service(); } #endif #ifdef _WIN32 static void spawn_win32(void) { char module_name[WATCHMAN_NAME_MAX]; GetModuleFileName(NULL, module_name, sizeof(module_name)); Options opts; opts.setFlags(POSIX_SPAWN_SETPGROUP); opts.open(STDIN_FILENO, "/dev/null", O_RDONLY, 0666); opts.open(STDOUT_FILENO, log_name, O_WRONLY | O_CREAT | O_APPEND, 0600); opts.dup2(STDOUT_FILENO, STDERR_FILENO); std::vector<w_string_piece> args{module_name, "--foreground"}; for (size_t i = 0; daemon_argv[i]; i++) { args.push_back(daemon_argv[i]); } ChildProcess proc(args, std::move(opts)); proc.disown(); } #endif #ifdef USE_GIMLI static void spawn_via_gimli(void) { std::vector<w_string_piece> args{ GIMLI_MONITOR_PATH, #ifdef WATCHMAN_STATE_DIR "--trace-dir=" WATCHMAN_STATE_DIR "/traces", #endif "--pidfile", pid_file, "watchman", "--foreground", }; for (size_t i = 0; daemon_argv[i]; i++) { args.push_back(daemon_argv[i]); } close_random_fds(); Options opts; opts.open(STDIN_FILENO, "/dev/null", O_RDONLY, 0666); opts.open(STDOUT_FILENO, log_name, O_WRONLY | O_CREAT | O_APPEND, 0600); opts.dup2(STDOUT_FILENO, STDERR_FILENO); ChildProcess proc(args, std::move(opts)); proc.disown(); } #endif #ifndef _WIN32 // Spawn watchman via a site-specific spawn helper program. // We'll pass along any daemon-appropriate arguments that // we noticed during argument parsing. static void spawn_site_specific(const char* spawner) { std::vector<w_string_piece> args{ spawner, }; for (size_t i = 0; daemon_argv[i]; i++) { args.push_back(daemon_argv[i]); } close_random_fds(); // Note that we're not setting up the output to go to the log files // here. This is intentional; we'd like any failures in the spawner // to bubble up to the user as having things silently fail and get // logged to the server log doesn't provide any obvious cues to the // user about what went wrong. Watchman will open and redirect output // to its log files when it ultimately is launched and enters the // run_service() function above. // However, we do need to make sure that any output from both stdout // and stderr goes to stderr of the end user. Options opts; opts.open(STDIN_FILENO, "/dev/null", O_RDONLY, 0666); opts.dup2(STDERR_FILENO, STDOUT_FILENO); opts.dup2(STDERR_FILENO, STDERR_FILENO); try { ChildProcess proc(args, std::move(opts)); auto res = proc.wait(); if (WIFEXITED(res) && WEXITSTATUS(res) == 0) { return; } if (WIFEXITED(res)) { log(FATAL, spawner, ": exited with status ", WEXITSTATUS(res), "\n"); } else if (WIFSIGNALED(res)) { log(FATAL, spawner, ": signaled with ", WTERMSIG(res), "\n"); } log(FATAL, spawner, ": failed to start, exit status ", res, "\n"); } catch (const std::exception& exc) { log(FATAL, "Failed to spawn watchman via `", spawner, "': ", exc.what(), "\n"); } } #endif #ifdef __APPLE__ static void spawn_via_launchd(void) { char watchman_path[WATCHMAN_NAME_MAX]; uint32_t size = sizeof(watchman_path); char plist_path[WATCHMAN_NAME_MAX]; FILE* fp; struct passwd* pw; uid_t uid; close_random_fds(); if (_NSGetExecutablePath(watchman_path, &size) == -1) { log(FATAL, "_NSGetExecutablePath: path too long; size ", size, "\n"); } uid = getuid(); pw = getpwuid(uid); if (!pw) { log(FATAL, "getpwuid(", uid, ") failed: ", strerror(errno), ". I don't know who you are\n"); } snprintf( plist_path, sizeof(plist_path), "%s/Library/LaunchAgents", pw->pw_dir); // Best effort attempt to ensure that the agents dir exists. We'll detect // and report the failure in the fopen call below. mkdir(plist_path, 0755); snprintf( plist_path, sizeof(plist_path), "%s/Library/LaunchAgents/com.github.facebook.watchman.plist", pw->pw_dir); if (access(plist_path, R_OK) == 0) { // Unload any that may already exist, as it is likely wrong ChildProcess unload_proc( {"/bin/launchctl", "unload", "-F", plist_path}, Options()); unload_proc.wait(); // Forcibly remove the plist. In some cases it may have some attributes // set that prevent launchd from loading it. This can happen where // the system was re-imaged or restored from a backup unlink(plist_path); } fp = fopen(plist_path, "w"); if (!fp) { log(FATAL, "Failed to open ", plist_path, " for write: ", strerror(errno), "\n"); } compute_file_name(&pid_file, compute_user_name(), "pid", "pidfile"); auto plist_content = watchman::to<std::string>( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" " "\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n" "<plist version=\"1.0\">\n" "<dict>\n" " <key>Label</key>\n" " <string>com.github.facebook.watchman</string>\n" " <key>Disabled</key>\n" " <false/>\n" " <key>ProgramArguments</key>\n" " <array>\n" " <string>", watchman_path, "</string>\n" " <string>--foreground</string>\n" " <string>--logfile=", log_name, "</string>\n" " <string>--log-level=", log_level, "</string>\n" " <string>--sockname=", sock_name, "</string>\n" " <string>--statefile=", watchman_state_file, "</string>\n" " <string>--pidfile=", pid_file, "</string>\n" " </array>\n" " <key>KeepAlive</key>\n" " <dict>\n" " <key>Crashed</key>\n" " <true/>\n" " </dict>\n" " <key>RunAtLoad</key>\n" " <true/>\n" " <key>EnvironmentVariables</key>\n" " <dict>\n" " <key>PATH</key>\n" " <string><![CDATA[", getenv("PATH"), "]]></string>\n" " </dict>\n" " <key>ProcessType</key>\n" " <string>Interactive</string>\n" " <key>Nice</key>\n" " <integer>-5</integer>\n" "</dict>\n" "</plist>\n"); fwrite(plist_content.data(), 1, plist_content.size(), fp); fclose(fp); // Don't rely on umask, ensure we have the correct perms chmod(plist_path, 0644); ChildProcess load_proc( {"/bin/launchctl", "load", "-F", plist_path}, Options()); auto res = load_proc.wait(); if (WIFEXITED(res) && WEXITSTATUS(res) == 0) { return; } // Most likely cause is "headless" operation with no GUI context if (WIFEXITED(res)) { w_log(W_LOG_ERR, "launchctl: exited with status %d\n", WEXITSTATUS(res)); } else if (WIFSIGNALED(res)) { w_log(W_LOG_ERR, "launchctl: signaled with %d\n", WTERMSIG(res)); } w_log(W_LOG_ERR, "Falling back to daemonize\n"); daemonize(); } #endif static void parse_encoding(const char* enc, enum w_pdu_type* pdu) { if (!enc) { return; } if (!strcmp(enc, "json")) { *pdu = is_json_compact; return; } if (!strcmp(enc, "bser")) { *pdu = is_bser; return; } if (!strcmp(enc, "bser-v2")) { *pdu = is_bser_v2; return; } w_log( W_LOG_ERR, "Invalid encoding '%s', use one of json, bser or bser-v2\n", enc); exit(EX_USAGE); } static const char* get_env_with_fallback( const char* name1, const char* name2, const char* fallback) { const char* val; val = getenv(name1); if (!val || *val == 0) { val = getenv(name2); } if (!val || *val == 0) { val = fallback; } return val; } static void compute_file_name( char** strp, const char* user, const char* suffix, const char* what) { char* str = NULL; str = *strp; if (!str) { /* We'll put our various artifacts in a user specific dir * within the state dir location */ char* state_dir = NULL; const char* state_parent = test_state_dir ? test_state_dir : #ifdef WATCHMAN_STATE_DIR WATCHMAN_STATE_DIR #else watchman_tmp_dir #endif ; ignore_result(asprintf(&state_dir, "%s/%s-state", state_parent, user)); if (!state_dir) { log(FATAL, "out of memory computing ", what, "\n"); } if (mkdir(state_dir, 0700) == 0 || errno == EEXIST) { #ifndef _WIN32 // verify ownership struct stat st; int dir_fd; int ret = 0; uid_t euid = geteuid(); // TODO: also allow a gid to be specified here const char* sock_group_name = cfg_get_string("sock_group", nullptr); // S_ISGID is set so that files inside this directory inherit the group // name mode_t dir_perms = cfg_get_perms( "sock_access", false /* write bits */, true /* execute bits */) | S_ISGID; auto dirp = w_dir_open(state_dir, false /* don't need strict symlink rules */); dir_fd = dirp->getFd(); if (dir_fd == -1) { w_log(W_LOG_ERR, "dirfd(%s): %s\n", state_dir, strerror(errno)); goto bail; } if (fstat(dir_fd, &st) != 0) { w_log(W_LOG_ERR, "fstat(%s): %s\n", state_dir, strerror(errno)); ret = 1; goto bail; } if (euid != st.st_uid) { w_log( W_LOG_ERR, "the owner of %s is uid %d and doesn't match your euid %d\n", state_dir, st.st_uid, euid); ret = 1; goto bail; } if (st.st_mode & 0022) { w_log( W_LOG_ERR, "the permissions on %s allow others to write to it. " "Verify that you own the contents and then fix its " "permissions by running `chmod 0700 %s`\n", state_dir, state_dir); ret = 1; goto bail; } if (sock_group_name) { const struct group* sock_group = w_get_group(sock_group_name); if (!sock_group) { ret = 1; goto bail; } if (fchown(dir_fd, -1, sock_group->gr_gid) == -1) { w_log( W_LOG_ERR, "setting up group '%s' failed: %s\n", sock_group_name, strerror(errno)); ret = 1; goto bail; } } // Depending on group and world accessibility, change permissions on the // directory. We can't leave the directory open and set permissions on the // socket because not all POSIX systems respect permissions on UNIX domain // sockets, but all POSIX systems respect permissions on the containing // directory. w_log(W_LOG_DBG, "Setting permissions on state dir to 0%o\n", dir_perms); if (fchmod(dir_fd, dir_perms) == -1) { w_log( W_LOG_ERR, "fchmod(%s, %#o): %s\n", state_dir, dir_perms, strerror(errno)); ret = 1; goto bail; } bail: if (ret) { exit(ret); } #endif } else { w_log( W_LOG_ERR, "while computing %s: failed to create %s: %s\n", what, state_dir, strerror(errno)); exit(1); } ignore_result(asprintf(&str, "%s/%s", state_dir, suffix)); if (!str) { log(FATAL, "out of memory computing ", what, "\n"); } free(state_dir); } #ifndef _WIN32 if (str[0] != '/') { log(FATAL, "invalid ", what, ": ", str, "\n"); } #endif *strp = str; } static const char* compute_user_name(void) { const char* user = get_env_with_fallback("USER", "LOGNAME", NULL); #ifdef _WIN32 static char user_buf[256]; #endif if (!user) { #ifdef _WIN32 DWORD size = sizeof(user_buf); if (GetUserName(user_buf, &size)) { user_buf[size] = 0; user = user_buf; } else { log(FATAL, "GetUserName failed: ", win32_strerror(GetLastError()), ". I don't know who you are\n"); } #else uid_t uid = getuid(); struct passwd* pw; pw = getpwuid(uid); if (!pw) { log(FATAL, "getpwuid(", uid, ") failed: ", strerror(errno), ". I don't know who you are\n"); } user = pw->pw_name; #endif if (!user) { log(FATAL, "watchman requires that you set $USER in your env\n"); } } return user; } static void setup_sock_name(void) { const char* user = compute_user_name(); watchman_tmp_dir = get_env_with_fallback("TMPDIR", "TMP", "/tmp"); #ifdef _WIN32 if (!sock_name) { asprintf(&sock_name, "\\\\.\\pipe\\watchman-%s", user); } #else compute_file_name(&sock_name, user, "sock", "sockname"); #endif compute_file_name(&watchman_state_file, user, "state", "statefile"); compute_file_name(&log_name, user, "log", "logname"); #ifdef USE_GIMLI compute_file_name(&pid_file, user, "pid", "pidfile"); #endif #ifndef _WIN32 if (strlen(sock_name) >= sizeof(un.sun_path) - 1) { log(FATAL, sock_name, ": path is too long\n"); } un.sun_family = PF_LOCAL; memcpy(un.sun_path, sock_name, strlen(sock_name) + 1); #endif } static bool should_start(int err) { if (err == ECONNREFUSED) { return true; } if (err == ENOENT) { return true; } return false; } static bool try_command(json_t* cmd, int timeout) { w_jbuffer_t buffer; w_jbuffer_t output_pdu_buffer; int err; auto client = w_stm_connect(sock_name, timeout * 1000); if (!client) { return false; } if (!cmd) { return true; } // Send command if (!buffer.pduEncodeToStream( server_pdu, server_capabilities, cmd, client.get())) { err = errno; w_log(W_LOG_ERR, "error sending PDU to server\n"); errno = err; return false; } buffer.clear(); do { if (!buffer.passThru( output_pdu, output_capabilities, &output_pdu_buffer, client.get())) { return false; } } while (persistent); return true; } static struct watchman_getopt opts[] = { {"help", 'h', "Show this help", OPT_NONE, &show_help, NULL, NOT_DAEMON}, #ifndef _WIN32 {"inetd", 0, "Spawning from an inetd style supervisor", OPT_NONE, &inetd_style, NULL, IS_DAEMON}, #endif {"no-site-spawner", 'S', "Don't use the site or system spawner", OPT_NONE, &no_site_spawner, NULL, IS_DAEMON}, {"version", 'v', "Show version number", OPT_NONE, &show_version, NULL, NOT_DAEMON}, {"sockname", 'U', "Specify alternate sockname", REQ_STRING, &sock_name, "PATH", IS_DAEMON}, {"logfile", 'o', "Specify path to logfile", REQ_STRING, &log_name, "PATH", IS_DAEMON}, {"log-level", 0, "set the log level (0 = off, default is 1, verbose = 2)", REQ_INT, &log_level, NULL, IS_DAEMON}, #ifdef USE_GIMLI {"pidfile", 0, "Specify path to gimli monitor pidfile", REQ_STRING, &pid_file, "PATH", NOT_DAEMON}, #else {"pidfile", 0, "Specify path to pidfile", REQ_STRING, &pid_file, "PATH", IS_DAEMON}, #endif {"persistent", 'p', "Persist and wait for further responses", OPT_NONE, &persistent, NULL, NOT_DAEMON}, {"no-save-state", 'n', "Don't save state between invocations", OPT_NONE, &dont_save_state, NULL, IS_DAEMON}, {"statefile", 0, "Specify path to file to hold watch and trigger state", REQ_STRING, &watchman_state_file, "PATH", IS_DAEMON}, {"json-command", 'j', "Instead of parsing CLI arguments, take a single " "json object from stdin", OPT_NONE, &json_input_arg, NULL, NOT_DAEMON}, {"output-encoding", 0, "CLI output encoding. json (default) or bser", REQ_STRING, &output_encoding, NULL, NOT_DAEMON}, {"server-encoding", 0, "CLI<->server encoding. bser (default) or json", REQ_STRING, &server_encoding, NULL, NOT_DAEMON}, {"foreground", 'f', "Run the service in the foreground", OPT_NONE, &foreground, NULL, NOT_DAEMON}, {"no-pretty", 0, "Don't pretty print JSON", OPT_NONE, &no_pretty, NULL, NOT_DAEMON}, {"no-spawn", 0, "Don't try to start the service if it is not available", OPT_NONE, &no_spawn, NULL, NOT_DAEMON}, {"no-local", 0, "When no-spawn is enabled, don't try to handle request" " in client mode if service is unavailable", OPT_NONE, &no_local, NULL, NOT_DAEMON}, // test-state-dir is for testing only and should not be used in production: // instead, use the compile-time WATCHMAN_STATE_DIR option {"test-state-dir", 0, NULL, REQ_STRING, &test_state_dir, "DIR", NOT_DAEMON}, {0, 0, 0, OPT_NONE, 0, 0, 0}}; static void parse_cmdline(int* argcp, char*** argvp) { cfg_load_global_config_file(); w_getopt(opts, argcp, argvp, &daemon_argv); if (show_help) { usage(opts, stdout); } if (show_version) { printf("%s\n", PACKAGE_VERSION); exit(0); } watchman::getLog().setStdErrLoggingLevel( static_cast<enum watchman::LogLevel>(log_level)); setup_sock_name(); parse_encoding(server_encoding, &server_pdu); parse_encoding(output_encoding, &output_pdu); if (!output_encoding) { output_pdu = no_pretty ? is_json_compact : is_json_pretty; } // Prevent integration tests that call the watchman cli from // accidentally spawning a server. if (getenv("WATCHMAN_NO_SPAWN")) { no_spawn = true; } } static json_ref build_command(int argc, char** argv) { int i; // Read blob from stdin if (json_input_arg) { auto err = json_error_t(); w_jbuffer_t buf; auto cmd = buf.decodeNext(w_stm_stdin(), &err); if (buf.pdu_type == is_bser) { // If they used bser for the input, select bser for output // unless they explicitly requested something else if (!server_encoding) { server_pdu = is_bser; } if (!output_encoding) { output_pdu = is_bser; } } else if (buf.pdu_type == is_bser_v2) { // If they used bser v2 for the input, select bser v2 for output // unless they explicitly requested something else if (!server_encoding) { server_pdu = is_bser_v2; } if (!output_encoding) { output_pdu = is_bser_v2; } } if (!cmd) { fprintf( stderr, "failed to parse command from stdin: " "line %d, column %d, position %d: %s\n", err.line, err.column, err.position, err.text); exit(1); } return cmd; } // Special case: no arguments means that we just want // to verify that the service is up, starting it if // needed if (argc == 0) { return nullptr; } auto cmd = json_array(); for (i = 0; i < argc; i++) { json_array_append_new(cmd, typed_string_to_json(argv[i], W_STRING_UNICODE)); } return cmd; } static void spawn_watchman(void) { #ifndef _WIN32 if (no_site_spawner) { // The astute reader will notice this we're calling daemonize() here // and not the various other platform spawning functions in the block // further below in this function. This is deliberate: we want // to do the most simple background running possible when the // no_site_spawner flag is used. In the future we plan to // migrate the platform spawning functions to use the site_spawn // functionality. daemonize(); return; } // If we have a site-specific spawning requirement, then we'll // invoke that spawner rather than using any of the built-in // spawning functionality. const char* site_spawn = cfg_get_string("spawn_watchman_service", nullptr); if (site_spawn) { spawn_site_specific(site_spawn); return; } #endif #ifdef USE_GIMLI spawn_via_gimli(); #elif defined(__APPLE__) spawn_via_launchd(); #elif defined(_WIN32) spawn_win32(); #else daemonize(); #endif } int main(int argc, char** argv) { bool ran; // Since we don't fully integrate with folly, but may pull // in dependencies that do, we need to perform a little bit // of bootstrapping. We don't want to run the full folly // init today because it will interfere with our own signal // handling. In the future we will integrate this properly. folly::SingletonVault::singleton()->registrationComplete(); SCOPE_EXIT { folly::SingletonVault::singleton()->destroyInstances(); }; parse_cmdline(&argc, &argv); if (foreground) { run_service(); return 0; } w_set_thread_name("cli"); auto cmd = build_command(argc, argv); preprocess_command(cmd, output_pdu, output_capabilities); ran = try_command(cmd, 0); if (!ran && should_start(errno)) { if (no_spawn) { if (!no_local) { ran = try_client_mode_command(cmd, !no_pretty); } } else { spawn_watchman(); // Some site spawner scripts will asynchronously launch the service. // When that happens we may encounter ECONNREFUSED. We need to // tolerate this, so we add some retries. int attempts = 10; std::chrono::milliseconds interval(10); while (true) { ran = try_command(cmd, 10); if (!ran && should_start(errno) && attempts-- > 0) { /* sleep override */ std::this_thread::sleep_for(interval); interval *= 2; continue; } // Success or terminal failure break; } } } if (ran) { return 0; } if (!no_spawn) { w_log( W_LOG_ERR, "unable to talk to your watchman on %s! (%s)\n", sock_name, strerror(errno)); #ifdef __APPLE__ if (getenv("TMUX")) { w_log( W_LOG_ERR, "\n" "You may be hitting a tmux related session issue.\n" "An immediate workaround is to run:\n" "\n" " watchman version\n" "\n" "just once, from *outside* your tmux session, to allow the launchd\n" "registration to be setup. Once done, you can continue to access\n" "watchman from inside your tmux sessions as usual.\n" "\n" "Longer term, you may wish to install this tool:\n" "\n" " https://github.com/ChrisJohnsen/tmux-MacOSX-pasteboard\n" "\n" "and configure tmux to use `reattach-to-user-namespace`\n" "when it launches your shell.\n"); } #endif } return 1; } /* vim:ts=2:sw=2:et: */
26.27116
80
0.614373
abitrolly
4ff03862e9080e0d503f1a21891f33cd504b5ef4
3,303
hpp
C++
cppcache/src/FixedPartitionAttributesImpl.hpp
austxcodemonkey/geode-native
a816ac99cbbac557629686cb2542fdc74d464338
[ "Apache-2.0" ]
1
2018-09-08T05:05:22.000Z
2018-09-08T05:05:22.000Z
cppcache/src/FixedPartitionAttributesImpl.hpp
austxcodemonkey/geode-native
a816ac99cbbac557629686cb2542fdc74d464338
[ "Apache-2.0" ]
1
2021-02-23T12:27:00.000Z
2021-02-23T12:27:00.000Z
cppcache/src/FixedPartitionAttributesImpl.hpp
isabella232/geode-native
0d9a99d5e0632de62df17921950cf3f6640efb33
[ "Apache-2.0" ]
null
null
null
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifndef GEODE_FIXEDPARTITIONATTRIBUTESIMPL_H_ #define GEODE_FIXEDPARTITIONATTRIBUTESIMPL_H_ #include <geode/CacheableBuiltins.hpp> #include <geode/CacheableString.hpp> #include <geode/DataInput.hpp> #include <geode/DataOutput.hpp> #include <geode/Serializable.hpp> namespace apache { namespace geode { namespace client { class FixedPartitionAttributesImpl : public internal::DataSerializableInternal { private: std::string m_partitionName; bool m_isPrimary; int m_numBuckets; int m_startingBucketId; public: FixedPartitionAttributesImpl() : Serializable(), m_partitionName(nullptr), m_isPrimary(false), m_numBuckets(1), m_startingBucketId(-1) {} const std::string& getPartitionName() { return m_partitionName; } int getNumBuckets() const { return m_numBuckets; } int isPrimary() const { return m_isPrimary; } void toData(DataOutput& output) const override { output.writeString(m_partitionName); output.writeBoolean(m_isPrimary); output.writeInt(m_numBuckets); output.writeInt(m_startingBucketId); } void fromData(DataInput& input) override { m_partitionName = input.readString(); m_isPrimary = input.readBoolean(); m_numBuckets = input.readInt32(); m_startingBucketId = input.readInt32(); } size_t objectSize() const override { return sizeof(int) + sizeof(int) + sizeof(bool) + (m_partitionName.length() * sizeof(decltype(m_partitionName)::value_type)); } FixedPartitionAttributesImpl& operator=( const FixedPartitionAttributesImpl& rhs) { if (this == &rhs) return *this; this->m_partitionName = rhs.m_partitionName; this->m_isPrimary = rhs.m_isPrimary; this->m_numBuckets = rhs.m_numBuckets; this->m_startingBucketId = rhs.m_startingBucketId; return *this; } FixedPartitionAttributesImpl(const FixedPartitionAttributesImpl& rhs) { this->m_partitionName = rhs.m_partitionName; this->m_isPrimary = rhs.m_isPrimary; this->m_numBuckets = rhs.m_numBuckets; this->m_startingBucketId = rhs.m_startingBucketId; } int getStartingBucketID() const { return m_startingBucketId; } int getLastBucketID() const { return m_startingBucketId + m_numBuckets - 1; } bool hasBucket(int bucketId) { return getStartingBucketID() <= bucketId && bucketId <= getLastBucketID(); } }; } // namespace client } // namespace geode } // namespace apache #endif // GEODE_FIXEDPARTITIONATTRIBUTESIMPL_H_
31.457143
80
0.735695
austxcodemonkey
4ff27ab12280b56abdf72056fe69ec713f2f2f46
8,695
cc
C++
paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc
shippingwang/Paddle
55a785bb10c9b494e6256855cbb1f73a63bb36e7
[ "Apache-2.0" ]
1
2019-03-14T02:29:12.000Z
2019-03-14T02:29:12.000Z
paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc
shippingwang/Paddle
55a785bb10c9b494e6256855cbb1f73a63bb36e7
[ "Apache-2.0" ]
null
null
null
paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc
shippingwang/Paddle
55a785bb10c9b494e6256855cbb1f73a63bb36e7
[ "Apache-2.0" ]
1
2019-11-27T11:58:44.000Z
2019-11-27T11:58:44.000Z
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/lrn_op.h" #include "paddle/fluid/platform/mkldnn_helper.h" namespace paddle { namespace operators { using paddle::framework::Tensor; using paddle::platform::MKLDNNDeviceContext; namespace { template <typename T, typename... Args> std::shared_ptr<T> insert_to_context(const std::string& key, const MKLDNNDeviceContext& dev_ctx, Args&&... args) { auto p = std::static_pointer_cast<T, void>(dev_ctx.GetBlob(key)); if (!p) { p = std::make_shared<T>(args...); dev_ctx.SetBlob(key, std::static_pointer_cast<void, T>(p)); } return p; } template <typename... Args> void run_primitive(Args&&... args) { auto forward_op = mkldnn::lrn_forward{args...}; std::vector<mkldnn::primitive> pipeline = {forward_op}; mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); } } // namespace template <typename T> class LRNMKLDNNOpKernel : public paddle::framework::OpKernel<T> { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { const bool is_float_type = std::is_same<T, float>::value; PADDLE_ENFORCE(is_float_type, "MKLDNN LRN must use float data."); PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), "MKLDNN LRN must use CPUPlace."); auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>(); const auto& mkldnn_engine = dev_ctx.GetEngine(); auto x = ctx.Input<Tensor>("X"); auto out = ctx.Output<Tensor>("Out"); auto mid = ctx.Output<Tensor>("MidOut"); auto input_data = x->data<T>(); auto output_data = out->mutable_data<T>(ctx.GetPlace()); mid->mutable_data<T>(ctx.GetPlace()); const int n = ctx.Attr<int>("n"); // MKL-DNN implements LRN in a caffe way: // http://caffe.berkeleyvision.org/tutorial/layers/lrn.html // Where sum of squares is divided by size of normalization window // this is not the case for PaddlePaddle LRN. // Hence we need to compensate for this diffrence by // multipliing alpha by size of window(n) const float alpha = ctx.Attr<float>("alpha") * static_cast<float>(n); const float beta = ctx.Attr<float>("beta"); const float k = ctx.Attr<float>("k"); const bool is_test = ctx.Attr<bool>("is_test"); auto e_mid = framework::EigenTensor<T, 4>::From(*mid); e_mid = e_mid.constant(k); auto src_md = x->get_mkldnn_prim_desc().desc(); auto forward_desc = mkldnn::lrn_forward::desc{mkldnn::prop_kind::forward, mkldnn::lrn_across_channels, src_md, n, alpha, beta, k}; auto src_memory_pd = x->get_mkldnn_prim_desc(); if (!is_test) { const std::string key = ctx.op().Output("Out"); const std::string key_src_memory = key + "@lrn_src_memory"; const std::string key_pd = key + "@lrn_pd"; const std::string key_workspace_memory = key + "@lrn_workspace_memory"; auto forward_pd = insert_to_context<mkldnn::lrn_forward::primitive_desc>( key_pd, dev_ctx, forward_desc, mkldnn_engine); auto src_memory = insert_to_context<mkldnn::memory>( key_src_memory, dev_ctx, src_memory_pd); src_memory->set_data_handle( static_cast<void*>(const_cast<T*>(input_data))); auto dst_memory_pd = forward_pd->dst_primitive_desc(); auto dst_memory = mkldnn::memory(dst_memory_pd, static_cast<void*>(output_data)); auto workspace_memory = insert_to_context<mkldnn::memory>( key_workspace_memory, dev_ctx, forward_pd->workspace_primitive_desc()); run_primitive(*forward_pd, *src_memory, *workspace_memory, dst_memory); out->set_mkldnn_prim_desc(dst_memory_pd); } else { auto forward_pd = mkldnn::lrn_forward::primitive_desc{forward_desc, mkldnn_engine}; auto src_memory = mkldnn::memory{ src_memory_pd, static_cast<void*>(const_cast<T*>(input_data))}; auto workspace_memory = mkldnn::memory{forward_pd.workspace_primitive_desc()}; auto dst_memory_pd = forward_pd.dst_primitive_desc(); auto dst_memory = mkldnn::memory(forward_pd.dst_primitive_desc(), static_cast<void*>(output_data)); run_primitive(forward_pd, src_memory, workspace_memory, dst_memory); out->set_mkldnn_prim_desc(dst_memory_pd); } } }; template <typename T> class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { const bool is_float_type = std::is_same<T, float>::value; PADDLE_ENFORCE(is_float_type, "MKLDNN LRN must use float data."); PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), "MKLDNN LRN must use CPUPlace."); PADDLE_ENFORCE( !ctx.Attr<bool>("is_test"), "is_test attribute should be set to False in training phase."); auto x = ctx.Input<Tensor>("X"); auto out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto x_grad = ctx.Output<Tensor>(framework::GradVarName("X")); const std::string key = ctx.op().Input("Out"); const std::string key_src_memory = key + "@lrn_src_memory"; const std::string key_pd = key + "@lrn_pd"; const std::string key_workspace_memory = key + "@lrn_workspace_memory"; const int n = ctx.Attr<int>("n"); const float alpha = ctx.Attr<float>("alpha") * static_cast<float>(n); const float beta = ctx.Attr<float>("beta"); const float k = ctx.Attr<float>("k"); auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>(); const auto& mkldnn_engine = dev_ctx.GetEngine(); auto x_grad_data = x_grad->mutable_data<T>(ctx.GetPlace()); auto out_grad_data = out_grad->data<T>(); auto dims = paddle::framework::vectorize2int(x->dims()); auto src_md = paddle::platform::MKLDNNMemDesc( dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); auto diff_src_md = paddle::platform::MKLDNNMemDesc( dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); auto diff_dst_md = paddle::platform::MKLDNNMemDesc( dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); auto diff_dst_memory = mkldnn::memory{{diff_dst_md, mkldnn_engine}, static_cast<void*>(const_cast<float*>(out_grad_data))}; auto diff_src_memory = mkldnn::memory{{diff_src_md, mkldnn_engine}, static_cast<void*>(x_grad_data)}; auto backward_desc = mkldnn::lrn_backward::desc{ mkldnn::lrn_across_channels, src_md, diff_src_md, n, alpha, beta, k}; auto forward_pd = dev_ctx.GetBlob(key_pd); auto backward_pd = mkldnn::lrn_backward::primitive_desc{ backward_desc, mkldnn_engine, *static_cast<mkldnn::lrn_forward::primitive_desc*>(forward_pd.get())}; std::shared_ptr<void> workspace_memory = dev_ctx.GetBlob(key_workspace_memory); auto src_memory = dev_ctx.GetBlob(key_src_memory); auto backward_op = mkldnn::lrn_backward{ backward_pd, *static_cast<mkldnn::memory*>(src_memory.get()), diff_dst_memory, *static_cast<mkldnn::memory*>(workspace_memory.get()), diff_src_memory}; std::vector<mkldnn::primitive> pipeline = {backward_op}; mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_KERNEL(lrn, MKLDNN, paddle::platform::CPUPlace, ops::LRNMKLDNNOpKernel<float>); REGISTER_OP_KERNEL(lrn_grad, MKLDNN, paddle::platform::CPUPlace, ops::LRNMKLDNNGradOpKernel<float>);
39.703196
79
0.658309
shippingwang
4ff51b38cbe2ae34b8f6913080ab6f8a0936a4db
4,295
hpp
C++
lib/boost_1.78.0/boost/geometry/strategies/simplify/spherical.hpp
LaudateCorpus1/math
990a66b3cccd27a5fd48626360bb91093a48278b
[ "BSD-3-Clause" ]
326
2015-02-08T13:47:49.000Z
2022-03-16T02:13:59.000Z
lib/boost_1.78.0/boost/geometry/strategies/simplify/spherical.hpp
LaudateCorpus1/math
990a66b3cccd27a5fd48626360bb91093a48278b
[ "BSD-3-Clause" ]
623
2015-01-02T23:45:23.000Z
2022-03-09T11:15:23.000Z
lib/boost_1.78.0/boost/geometry/strategies/simplify/spherical.hpp
LaudateCorpus1/math
990a66b3cccd27a5fd48626360bb91093a48278b
[ "BSD-3-Clause" ]
215
2015-01-14T15:50:38.000Z
2022-02-23T03:58:36.000Z
// Boost.Geometry // Copyright (c) 2021, Oracle and/or its affiliates. // Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle // Licensed under the Boost Software License version 1.0. // http://www.boost.org/users/license.html #ifndef BOOST_GEOMETRY_STRATEGIES_SIMPLIFY_SPHERICAL_HPP #define BOOST_GEOMETRY_STRATEGIES_SIMPLIFY_SPHERICAL_HPP #include <boost/geometry/strategies/detail.hpp> #include <boost/geometry/strategies/distance/comparable.hpp> #include <boost/geometry/strategies/distance/detail.hpp> #include <boost/geometry/strategies/simplify/services.hpp> #include <boost/geometry/strategies/agnostic/simplify_douglas_peucker.hpp> #include <boost/geometry/strategies/spherical/distance_haversine.hpp> #include <boost/geometry/strategies/spherical/distance_cross_track.hpp> #include <boost/geometry/strategies/spherical/point_in_point.hpp> #include <boost/geometry/strategy/spherical/area.hpp> namespace boost { namespace geometry { namespace strategies { namespace simplify { template < typename RadiusTypeOrSphere = double, typename CalculationType = void > class spherical : public strategies::detail::spherical_base<RadiusTypeOrSphere> { using base_t = strategies::detail::spherical_base<RadiusTypeOrSphere>; public: spherical() = default; template <typename RadiusOrSphere> explicit spherical(RadiusOrSphere const& radius_or_sphere) : base_t(radius_or_sphere) {} // TODO: Replace this if calculate_point_order() is used in simplify template <typename Geometry> auto area(Geometry const&) const { return strategy::area::spherical < typename base_t::radius_type, CalculationType >(base_t::radius()); } // For perimeter() template <typename Geometry1, typename Geometry2> auto distance(Geometry1 const&, Geometry2 const&, distance::detail::enable_if_pp_t<Geometry1, Geometry2> * = nullptr) const { return strategy::distance::haversine < typename base_t::radius_type, CalculationType >(base_t::radius()); } // For douglas_peucker template <typename Geometry1, typename Geometry2> auto distance(Geometry1 const&, Geometry2 const&, distance::detail::enable_if_ps_t<Geometry1, Geometry2> * = nullptr) const { return strategy::distance::cross_track < CalculationType, strategy::distance::haversine<typename base_t::radius_type, CalculationType> >(base_t::radius()); } // For equals() template <typename Geometry1, typename Geometry2> static auto relate(Geometry1 const&, Geometry2 const&, std::enable_if_t < util::is_pointlike<Geometry1>::value && util::is_pointlike<Geometry2>::value > * = nullptr) { return strategy::within::spherical_point_point(); } }; namespace services { template <typename Geometry> struct default_strategy<Geometry, spherical_equatorial_tag> { using type = strategies::simplify::spherical<>; }; template <typename P, typename CT, typename S> struct strategy_converter < strategy::simplify::douglas_peucker < P, strategy::distance::cross_track<CT, S> > > { template <typename Strategy> static auto get(Strategy const& ) { return strategies::simplify::spherical<typename S::radius_type, CT>(); } }; template <typename P, typename CT, typename S> struct strategy_converter < strategy::simplify::douglas_peucker < P, strategy::distance::comparable::cross_track<CT, S> > > { template <typename Strategy> static auto get(Strategy const& ) { return strategies::distance::detail::comparable < strategies::simplify::spherical<typename S::radius_type, CT> >(); } }; } // namespace services }} // namespace strategies::simplify }} // namespace boost::geometry #endif // BOOST_GEOMETRY_STRATEGIES_SIMPLIFY_SPHERICAL_HPP
28.256579
92
0.655879
LaudateCorpus1
4ff77fb96ce84be8fc4866cf29b0a17cd6a21fb0
5,312
cpp
C++
OOP/OOP-Homework-2/BigInteger.cpp
Rossoner40/NBU-Classwork-and-Homework
823e5eab2da616ae6d965da9c0a22fa0212d7887
[ "MIT" ]
null
null
null
OOP/OOP-Homework-2/BigInteger.cpp
Rossoner40/NBU-Classwork-and-Homework
823e5eab2da616ae6d965da9c0a22fa0212d7887
[ "MIT" ]
null
null
null
OOP/OOP-Homework-2/BigInteger.cpp
Rossoner40/NBU-Classwork-and-Homework
823e5eab2da616ae6d965da9c0a22fa0212d7887
[ "MIT" ]
null
null
null
#include "BigInteger.h" BigInteger::BigInteger():n(0), val(NULL), pos(false){ } BigInteger::BigInteger(std::string s){ n = s.length(); pos = (s[0]=='-')?false:true; if(!pos) n--; val = new int[n]; for (int i = 0; i < n; i++) { if(s[i]!='-' || s[i]!='+'){ if(pos) val[i] = s[i]-'0'; else val[i] = s[i+1]-'0'; } } } BigInteger::BigInteger(const BigInteger & r){ n = r.n; val = new int[n]; pos = r.pos; for (int i = 0; i < n; i++) { val[i] = r.val[i]; } } BigInteger::~BigInteger(){ if(n!=0) delete [] val; } BigInteger& BigInteger::operator=(const BigInteger & r){ if(this!=&r){ if(n!=0) delete [] val; n = r.n; val = new int[n]; pos = r.pos; for (int i = 0; i < n; i++) { val[i] = r.val[i]; } } return *this; } BigInteger BigInteger::operator+(const BigInteger & r) const{ if(pos == r.pos){ int * t = reverse(); int * k = r.reverse(); int length = (n>r.n)? n:r.n; int add_val = 0, curr1, curr2; int * ans = new int[length+1]; for (int i = 0; i < length; i++) { curr1 = (i<n)?t[i]:0; curr2 = (i<r.n)?k[i]:0; ans[i] = (curr1+curr2+add_val)%10; add_val = (curr1+curr2+add_val)/10; } if(add_val) ans[length] = add_val; else ans[length] = 0; BigInteger answer; answer.pos = pos; int cnt; if(add_val){ cnt = length+1; } else cnt = length; answer.n = cnt; answer.val = new int[cnt]; for(int i=cnt-1, j=0; i>=0; i--,j++){ answer.val[j] = ans[i]; } delete [] ans; return answer; } else{ int sub_val=0; BigInteger max = abs_max(r)?*this:r; BigInteger min = !abs_max(r)?*this:r; int * t = max.reverse(); int * k = min.reverse(); int length = max.n, curr1, curr2; int * ans = new int[length]; for(int i=0; i<length; i++){ curr1 = (i<n)?t[i]:0; curr1 -= sub_val; curr2 = (i<r.n)?k[i]:0; if(curr1 < curr2){ ans[i] = curr1+10-curr2; sub_val = 1; } else{ ans[i] = curr1-curr2; sub_val = 0; } } BigInteger answer; answer.pos = max.pos; int cnt=0; bool flag = true; for(int i=length-1; i>=0; i--){ if(!(ans[i] == 0 && flag)){ cnt++; flag = false; } } answer.n = cnt; answer.val = new int[cnt]; for(int i=cnt-1, j=0; i>=0; i--,j++){ answer.val[j] = ans[i]; } delete [] ans; return answer; } return BigInteger(0); } BigInteger BigInteger::operator-(const BigInteger & r) const{ if(pos == r.pos){ int sub_val=0; BigInteger max = abs_max(r)?*this:r; BigInteger min = !abs_max(r)?*this:r; int * t = max.reverse(); int * k = min.reverse(); int length = max.n, curr1, curr2; int * ans = new int[length]; for(int i=0; i<length; i++){ curr1 = (i<n)?t[i]:0; curr1 -= sub_val; curr2 = (i<r.n)?k[i]:0; if(curr1 < curr2){ ans[i] = curr1+10-curr2; sub_val = 1; } else{ ans[i] = curr1-curr2; sub_val = 0; } } BigInteger answer; answer.pos = max.pos; int cnt=0; bool flag = true; for(int i=length-1; i>=0; i--){ if(!(ans[i] == 0 && flag)){ cnt++; flag = false; } } answer.n = cnt; answer.val = new int[cnt]; for(int i=cnt-1, j=0; i>=0; i--,j++){ answer.val[j] = ans[i]; } delete [] ans; return answer; } else{ int * t = reverse(); int * k = r.reverse(); int length = (n>r.n)? n:r.n; int add_val = 0, curr1, curr2; int * ans = new int[length+1]; for (int i = 0; i < length; i++) { curr1 = (i<n)?t[i]:0; curr2 = (i<r.n)?k[i]:0; ans[i] = (curr1+curr2+add_val)%10; add_val = (curr1+curr2+add_val)/10; } if(add_val) ans[length] = add_val; else ans[length] = 0; BigInteger answer; answer.pos = pos; int cnt; if(add_val){ cnt = length+1; } else cnt = length; answer.n = cnt; answer.val = new int[cnt]; for(int i=cnt-1, j=0; i>=0; i--,j++){ answer.val[j] = ans[i]; } delete [] ans; return answer; } return BigInteger(0); } bool BigInteger::operator >(const BigInteger & r)const{ if(pos != r.pos){ return pos; } else{ if(n>r.n) return true; else if(n<r.n) return false; else{ for (int i = 0; i < n; i++) { if(val[i]>r.val[i]) return true; } } } return false; } bool BigInteger::operator <(const BigInteger & r)const{ return !(*this>r); } bool BigInteger::abs_max(const BigInteger & r)const{ if(n>r.n) return true; else if(n<r.n) return false; else{ for (int i = 0; i < n; i++) { if(val[i]>r.val[i]) return true; } } return false; } int BigInteger::getN()const{ return n; } void BigInteger::setN(int length){ n = length; } int * BigInteger::reverse() const{ int * r = new int[n]; int ind=0; for(int i=n-1; i>=0; i--){ r[ind] = val[i]; ind++; } return r; } std::ostream& operator << (std::ostream& out, const BigInteger & r){ if(!r.pos) out << "-"; for (int i = 0; i < r.n; i++) { out<<r.val[i]; } return out; }
20.274809
68
0.492282
Rossoner40
4ff94fda49ac35645dadad0f002bc31ff25dc5d3
9,075
cc
C++
src/solver.cc
CHEN-Lin/OpenMoor
f463f586487b9023e7f3678c9d851000558b14d7
[ "Apache-2.0" ]
7
2019-02-10T07:03:45.000Z
2022-03-04T16:09:38.000Z
src/solver.cc
CHEN-Lin/OpenMoor
f463f586487b9023e7f3678c9d851000558b14d7
[ "Apache-2.0" ]
null
null
null
src/solver.cc
CHEN-Lin/OpenMoor
f463f586487b9023e7f3678c9d851000558b14d7
[ "Apache-2.0" ]
4
2021-01-25T23:33:11.000Z
2022-03-27T13:22:56.000Z
// This file is part of OpenMOOR, an Open-source simulation program for MOORing // systems in offshore renewable energy applications. // // Created by Lin Chen on Sep 15, 2017. // // Copyright 2018 Lin Chen <l.chen.tj@gmail.com> & Biswajit Basu <basub@tcd.ie> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //////////////////////////////////////////////////////////////////////////////// #include "solver.h" namespace moor { //////////////////////////////////////////////////////////////////////////////// /// When basic parameters are obatined from the input data. Initialize the other /// parameters. //////////////////////////////////////////////////////////////////////////////// void Solver::initialize(const int n, const int n_b) { n_nodal_state = n; n_bound_constraint = n_b; n_iteration = 0; relaxation_factor = initial_relaxation_factor; alpha_k = lambda_infinity/(lambda_infinity - 1.0); alpha_m = (3*lambda_infinity+1)/2/(lambda_infinity - 1.0); gamma = 0.5 - alpha_m + alpha_k; // Initialization constants for use. alpha_k1 = 1-alpha_k; alpha_k_square = pow(alpha_k, 2.0); alpha_k1_square = pow(alpha_k1, 2.0); alpha_k_cross = alpha_k * alpha_k1; alpha_m1 = 1 - alpha_m; alpha_m_square = pow(alpha_m, 2.0); alpha_m1_square = pow(alpha_m1, 2.0); alpha_m_cross = alpha_m * alpha_m1; gamma1 = 1 - gamma; } //////////////////////////////////////////////////////////////////////////////// /// Solve the augmented matrix for state increment, by three steps: /// - Reduction; /// - Gauss-Jordan elimination; /// - Back substitution. //////////////////////////////////////////////////////////////////////////////// int Solver::solve(std::vector< Eigen::MatrixXd >& aug_mat) { int fail = 0, n_node = aug_mat.size() - 1; fail = gauss_jordan_eliminate(aug_mat[0], n_bound_constraint, n_nodal_state-1, n_nodal_state); int k = 1; for (k=1; k<n_node; k++) { reduce(aug_mat, k); fail = gauss_jordan_eliminate(aug_mat[k], 0, n_nodal_state-1, n_bound_constraint); } reduce(aug_mat,n_node); fail = gauss_jordan_eliminate(aug_mat[n_node], 0, (n_nodal_state-n_bound_constraint)-1, n_bound_constraint); back_substitute(aug_mat); // Sort the last column of the augmented matrix. for (k=0; k<n_node; k++) { aug_mat[k].block(0,2*n_nodal_state,n_bound_constraint,1) = aug_mat[k].block(n_nodal_state-n_bound_constraint,2*n_nodal_state,n_bound_constraint,1); aug_mat[k].block(n_bound_constraint,2*n_nodal_state,n_nodal_state-n_bound_constraint,1) = aug_mat[k+1].block(0,2*n_nodal_state,n_nodal_state-n_bound_constraint,1); } return fail; } //////////////////////////////////////////////////////////////////////////////// /// Adjust the relaxation factor according to the error change trend. //////////////////////////////////////////////////////////////////////////////// void Solver::adjust_relaxation(double present_error, double prev_error) { if (present_error > prev_error || prev_error == 0.0) relaxation_factor = relaxation_factor / decrement_factor; else relaxation_factor = relaxation_factor * increment_factor; relaxation_factor = (relaxation_factor < initial_relaxation_factor ? relaxation_factor : initial_relaxation_factor); relaxation_factor = relaxation_factor > 1E-5 ? relaxation_factor : 1E-5; } //////////////////////////////////////////////////////////////////////////////// /// Diagonalize the square block of augmented matrix by Gauss Jordan Elimination /// using pivoting. /// <pre> /// 0 0 0 X X X X X X X B 0 0 0 1 0 0 0 0 S S C /// 0 0 0 X X X X X X X B 0 0 0 0 1 0 0 0 S S C /// 0 0 0 X X X X X X X B => 0 0 0 0 0 1 0 0 S S C /// 0 0 0 X X X X X X X B 0 0 0 0 0 0 1 0 S S C /// 0 0 0 X X X X X X X B 0 0 0 0 0 0 0 1 S S C /// </pre> /// The rows are swapped to form a block diagonal matrix. //////////////////////////////////////////////////////////////////////////////// int Solver::gauss_jordan_eliminate(Eigen::MatrixXd& aug_mat, int i_start, int i_end, int j_start) { int n_dim = i_end - i_start + 1; int i_pivot, j_pivot; double pivot; Eigen::RowVectorXd temp_row; for (int k=0; k<n_dim; k++) { pivot = aug_mat.block(i_start+k,j_start+k,n_dim-k,1).array(). abs().maxCoeff(&i_pivot,&j_pivot); if (pivot <= 1E-20) return 1; // Singularity. // assert(pivot > 1E-20); // Swap rows. if (i_pivot!=0) { temp_row = aug_mat.row(i_start+k+i_pivot); aug_mat.row(i_start+k+i_pivot) = aug_mat.row(i_start+k); aug_mat.row(i_start+k) = temp_row; } aug_mat.row(i_start+k) = aug_mat.row(i_start+k) * (1.0/aug_mat(i_start+k,j_start+k)); aug_mat(i_start+k,j_start+k) = 1.0; // Elimination. for (int i=k+1; i<n_dim; i++) { aug_mat.row(i_start+i) -= aug_mat.row(i_start+k) * aug_mat(i_start+i,j_start+k); } // Set zeros. if (n_dim > 1) aug_mat.block(i_start+k+1,j_start+k,n_dim-k-1,1).setZero(); } // Back substitution. for (int j=n_dim-1; j>0; j--) { for (int i=j-1; i>=0; i--) { aug_mat.row(i_start+i) -= aug_mat.row(i_start+j) * aug_mat(i_start+i,j_start+j); } aug_mat.block(i_start,j_start+j,j,1).setZero(); } return 0; } //////////////////////////////////////////////////////////////////////////////// /// Reduce columns jz1 .. jz2-1 of the s matrix, using previous results as /// stored in the c matrix. Only colums jm1 .. jm2-1 and jmf are affected by /// prior results. /// <pre> /// X X X X X X X X X X B 0 0 0 S S S S S S S C /// X X X X X X X X X X B 0 0 0 S S S S S S S C /// X X X X X X X X X X B => 0 0 0 S S S S S S S C /// X X X X X X X X X X B 0 0 0 S S S S S S S C /// X X X X X X X X X X B 0 0 0 S S S S S S S C /// </pre> //////////////////////////////////////////////////////////////////////////////// void Solver::reduce(std::vector< Eigen::MatrixXd >& s,int i) { // Alter the columns of the coefficient matrix. s[i].block(0,n_bound_constraint,n_nodal_state,n_nodal_state-n_bound_constraint) -= (s[i].block(0,0,n_nodal_state,n_bound_constraint) * s[i-1].block(n_nodal_state-n_bound_constraint, n_nodal_state+n_bound_constraint, n_bound_constraint,n_nodal_state-n_bound_constraint)); // Alter the b column. s[i].col(2*n_nodal_state) -= (s[i].block(0,0,n_nodal_state,n_bound_constraint) * s[i-1].block(n_nodal_state-n_bound_constraint, 2*n_nodal_state,n_bound_constraint,1)); // For testing. s[i].block(0,0,n_nodal_state,n_bound_constraint) -= (s[i].block(0,0,n_nodal_state,n_bound_constraint) * s[i-1].block(n_nodal_state-n_bound_constraint, n_nodal_state,n_bound_constraint,n_bound_constraint)); } //////////////////////////////////////////////////////////////////////////////// /// Back substitute to dealing with the following structure /// <pre> /// 1 X X V B /// 1 X X V B /// 1 X X V B /// 1 X X V B /// 1 X X V B /// 1 X X V B /// 1 V B /// 1 V B /// </pre> /// Note: Values of B after back substitution are the solution. //////////////////////////////////////////////////////////////////////////////// void Solver::back_substitute(std::vector< Eigen::MatrixXd >& s) { int n = s.size(); for (int i=n-2; i>=0; i--) { s[i].col(2*n_nodal_state) -= (s[i].block(0,n_nodal_state+n_bound_constraint, n_nodal_state, n_nodal_state-n_bound_constraint) * s[i+1].block(0,2*n_nodal_state,n_nodal_state-n_bound_constraint,1)); } } } // End of namespace moor.
37.970711
98
0.513388
CHEN-Lin
4ff9e38e38ce9ed38cef419716d27168344d17b6
235
hpp
C++
include/mpi/core/enums/comparison.hpp
acdemiralp/mpi
c3d445404ed129f1f0dee61fa7b36033a11801d5
[ "MIT" ]
9
2021-11-09T06:07:00.000Z
2022-02-06T12:03:56.000Z
include/mpi/core/enums/comparison.hpp
acdemiralp/mpi
c3d445404ed129f1f0dee61fa7b36033a11801d5
[ "MIT" ]
3
2021-10-19T00:04:53.000Z
2021-11-10T07:26:34.000Z
include/mpi/core/enums/comparison.hpp
acdemiralp/mpi
c3d445404ed129f1f0dee61fa7b36033a11801d5
[ "MIT" ]
null
null
null
#pragma once #include <cstdint> #include <mpi/core/mpi.hpp> namespace mpi { enum class comparison : std::int32_t { identical = MPI_IDENT , congruent = MPI_CONGRUENT, similar = MPI_SIMILAR , unequal = MPI_UNEQUAL }; }
14.6875
36
0.685106
acdemiralp
4ffa16bfcf8e2aa794f27ed2543621bbad9bd3f5
2,989
cpp
C++
tests/test_logmanager.cpp
DeanoC/simple_logmanager
406317ae0f0a3b2b187a505ba6694f1902f63124
[ "Apache-2.0" ]
null
null
null
tests/test_logmanager.cpp
DeanoC/simple_logmanager
406317ae0f0a3b2b187a505ba6694f1902f63124
[ "Apache-2.0" ]
null
null
null
tests/test_logmanager.cpp
DeanoC/simple_logmanager
406317ae0f0a3b2b187a505ba6694f1902f63124
[ "Apache-2.0" ]
null
null
null
#include "al2o3_platform/platform.h" #include "al2o3_catch2/catch2.hpp" #include "utils_simple_logmanager/logmanager.h" #include "al2o3_os/file.h" #include "al2o3_os/filesystem.h" TEST_CASE("Alloc/Free", "[SimpleLogManager]") { auto slm = SimpleLogManager_Alloc(); REQUIRE(slm); SimpleLogManager_Free(slm); } TEST_CASE("Quiet settings", "[SimpleLogManager]") { auto slm = SimpleLogManager_Alloc(); REQUIRE(slm); // defaults REQUIRE(!SimpleLogManager_IsFailedAssertQuiet(slm)); REQUIRE(!SimpleLogManager_IsInfoQuiet(slm)); REQUIRE(!SimpleLogManager_IsDebugMsgQuiet(slm)); REQUIRE(!SimpleLogManager_IsErrorQuiet(slm)); REQUIRE(!SimpleLogManager_IsWarningQuiet(slm)); REQUIRE(SimpleLogManager_IsInfoFileLineQuiet(slm)); REQUIRE(!SimpleLogManager_IsWarningFileLineQuiet(slm)); REQUIRE(!SimpleLogManager_IsErrorFileLineQuiet(slm)); SimpleLogManager_SetInfoFileLineQuiet(slm, true); REQUIRE(SimpleLogManager_IsInfoFileLineQuiet(slm)); SimpleLogManager_SetWarningFileLineQuiet(slm, true); REQUIRE(SimpleLogManager_IsWarningFileLineQuiet(slm)); SimpleLogManager_SetErrorFileLineQuiet(slm, true); REQUIRE(SimpleLogManager_IsErrorFileLineQuiet(slm)); SimpleLogManager_SetInfoFileLineQuiet(slm, false); REQUIRE(!SimpleLogManager_IsInfoFileLineQuiet(slm)); SimpleLogManager_SetWarningFileLineQuiet(slm, false); REQUIRE(!SimpleLogManager_IsWarningFileLineQuiet(slm)); SimpleLogManager_SetErrorFileLineQuiet(slm, false); REQUIRE(!SimpleLogManager_IsErrorFileLineQuiet(slm)); SimpleLogManager_SetInfoQuiet(slm, true); SimpleLogManager_SetDebugMsgQuiet(slm, true); SimpleLogManager_SetErrorQuiet(slm, true); SimpleLogManager_SetWarningQuiet(slm, true); REQUIRE(SimpleLogManager_IsInfoQuiet(slm)); REQUIRE(SimpleLogManager_IsDebugMsgQuiet(slm)); REQUIRE(SimpleLogManager_IsErrorQuiet(slm)); REQUIRE(SimpleLogManager_IsWarningQuiet(slm)); SimpleLogManager_Free(slm); } TEST_CASE("Default log file OK", "[SimpleLogManager]") { char filePath[2048]; char const logFilename[] = "log.log"; Os_GetCurrentDir(filePath, sizeof(filePath)); ASSERT( strlen(filePath) + sizeof(logFilename) < sizeof(filePath)); strcat(filePath, logFilename); // delete any old log first if( Os_FileExists(filePath) ) { Os_FileDelete(filePath); } auto slm = SimpleLogManager_Alloc(); REQUIRE(slm); LOGINFO("test default"); REQUIRE(Os_FileExists(filePath)); SimpleLogManager_Free(slm); } TEST_CASE("Custom log file OK", "[SimpleLogManager]") { char filePath[2048]; char const logFilename[] = "custom_test.log"; Os_GetCurrentDir(filePath, sizeof(filePath)); ASSERT( strlen(filePath) + sizeof(logFilename) < sizeof(filePath)); strcat(filePath, logFilename); // delete any old log first if( Os_FileExists(filePath) ) { Os_FileDelete(filePath); } auto slm = SimpleLogManager_Alloc(); REQUIRE(slm); LOGINFO("test default"); SimpleLogManager_UseFileForLog(slm, filePath); LOGINFO("test custom"); REQUIRE(Os_FileExists(filePath)); SimpleLogManager_Free(slm); }
31.135417
68
0.79458
DeanoC
4ffb7b971e0ea1a7dae67d8a598fab3e9e8e4e7f
6,357
cpp
C++
src/Emulators/nestopiaue/common/nstini.cpp
slajerek/RetroDebugger
e761e4f9efd103a05e65ef283423b142fa4324c7
[ "Apache-2.0", "MIT" ]
34
2021-05-29T07:04:17.000Z
2022-03-10T20:16:03.000Z
src/Emulators/nestopiaue/common/nstini.cpp
slajerek/RetroDebugger
e761e4f9efd103a05e65ef283423b142fa4324c7
[ "Apache-2.0", "MIT" ]
6
2021-12-25T13:05:21.000Z
2022-01-19T17:35:17.000Z
src/Emulators/nestopiaue/common/nstini.cpp
slajerek/RetroDebugger
e761e4f9efd103a05e65ef283423b142fa4324c7
[ "Apache-2.0", "MIT" ]
6
2021-12-24T18:37:41.000Z
2022-02-06T23:06:02.000Z
/* inih -- simple .INI file parser Copyright (c) 2009, Brush Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Brush Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY BRUSH TECHNOLOGY ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BRUSH TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <ctype.h> #include <string.h> #include "nstini.h" #if !INI_USE_STACK #include <stdlib.h> #endif #define MAX_SECTION 50 #define MAX_NAME 50 /* Strip whitespace chars off end of given string, in place. Return s. */ static char* rstrip(char* s) { char* p = s + strlen(s); while (p > s && isspace((unsigned char)(*--p))) *p = '\0'; return s; } /* Return pointer to first non-whitespace char in given string. */ static char* lskip(const char* s) { while (*s && isspace((unsigned char)(*s))) s++; return (char*)s; } /* Return pointer to first char c or ';' comment in given string, or pointer to null at end of string if neither found. ';' must be prefixed by a whitespace character to register as a comment. */ static char* find_char_or_comment(const char* s, char c) { int was_whitespace = 0; while (*s && *s != c && !(was_whitespace && *s == ';')) { was_whitespace = isspace((unsigned char)(*s)); s++; } return (char*)s; } /* Version of strncpy that ensures dest (size bytes) is null-terminated. */ static char* strncpy0(char* dest, const char* src, size_t size) { strncpy(dest, src, size); dest[size - 1] = '\0'; return dest; } /* See documentation in header file. */ int ini_parse_file(FILE* file, int (*handler)(void*, const char*, const char*, const char*), void* user) { /* Uses a fair bit of stack (use heap instead if you need to) */ #if INI_USE_STACK char line[INI_MAX_LINE]; #else char* line; #endif char section[MAX_SECTION] = ""; char prev_name[MAX_NAME] = ""; char* start; char* end; char* name; char* value; int lineno = 0; int error = 0; #if !INI_USE_STACK line = (char*)malloc(INI_MAX_LINE); if (!line) { return -2; } #endif /* Scan through file line by line */ while (fgets(line, INI_MAX_LINE, file) != NULL) { lineno++; start = line; #if INI_ALLOW_BOM if (lineno == 1 && (unsigned char)start[0] == 0xEF && (unsigned char)start[1] == 0xBB && (unsigned char)start[2] == 0xBF) { start += 3; } #endif start = lskip(rstrip(start)); if (*start == ';' || *start == '#') { /* Per Python ConfigParser, allow '#' comments at start of line */ } #if INI_ALLOW_MULTILINE else if (*prev_name && *start && start > line) { /* Non-black line with leading whitespace, treat as continuation of previous name's value (as per Python ConfigParser). */ if (!handler(user, section, prev_name, start) && !error) error = lineno; } #endif else if (*start == '[') { /* A "[section]" line */ end = find_char_or_comment(start + 1, ']'); if (*end == ']') { *end = '\0'; strncpy0(section, start + 1, sizeof(section)); *prev_name = '\0'; } else if (!error) { /* No ']' found on section line */ error = lineno; } } else if (*start && *start != ';') { /* Not a comment, must be a name[=:]value pair */ end = find_char_or_comment(start, '='); if (*end != '=') { end = find_char_or_comment(start, ':'); } if (*end == '=' || *end == ':') { *end = '\0'; name = rstrip(start); value = lskip(end + 1); end = find_char_or_comment(value, '\0'); if (*end == ';') *end = '\0'; rstrip(value); /* Valid name[=:]value pair found, call handler */ strncpy0(prev_name, name, sizeof(prev_name)); if (!handler(user, section, name, value) && !error) error = lineno; } else if (!error) { /* No '=' or ':' found on name[=:]value line */ error = lineno; } } } #if !INI_USE_STACK free(line); #endif return error; } /* See documentation in header file. */ int ini_parse(const char* filename, int (*handler)(void*, const char*, const char*, const char*), void* user) { FILE* file; int error; file = fopen(filename, "r"); if (!file) return -1; error = ini_parse_file(file, handler, user); fclose(file); return error; }
32.269036
80
0.55608
slajerek
4ffbdf4569166734a20b6794b81d291056b33c2a
2,232
cpp
C++
35-minimum-spanning-tree/minimumtree.cpp
wlep/cp-course
9e52788e8f6a76752149b74d06d0272e16c3b528
[ "MIT" ]
null
null
null
35-minimum-spanning-tree/minimumtree.cpp
wlep/cp-course
9e52788e8f6a76752149b74d06d0272e16c3b528
[ "MIT" ]
null
null
null
35-minimum-spanning-tree/minimumtree.cpp
wlep/cp-course
9e52788e8f6a76752149b74d06d0272e16c3b528
[ "MIT" ]
null
null
null
#include <iostream> #include <vector> #include <algorithm> using namespace std; template<typename T> class UnionFind { public: struct Node { T data; int rank; int forestIndex; Node* parent; Node(T _data, int _forestIndex) { data = _data; rank = 0; forestIndex = _forestIndex; parent = this; } }; UnionFind() { } Node* MakeSet(T data) { Node* newNode = new Node(data, forest.size()); forest.push_back(newNode); return newNode; } // Union by Rank, if equal y becomes root void Union(Node* x, Node* y) { Node* rootX = Find(x); Node* rootY = Find(y); if (rootX == rootY) { return; } if (rootX->rank > rootY->rank) { rootY->parent = rootX; return; } rootX->parent = rootY; if (rootX->rank == rootY->rank) rootY->rank++; } // Find with Path Compression Node* Find(Node* x) { if (x->parent != x) x->parent = Find(x->parent); return x->parent; } vector<Node*> Forest() { return forest; } private: vector<Node*> forest; }; struct Edge { int i; int j; int w; Edge(int _i, int _j, int _w) { i = _i; j = _j; w = _w; } }; int main() { std::ios_base::sync_with_stdio(false); int n, m; cin >> n >> m; vector<Edge*> edges; for (int e = 0; e < m; e++) { int i, j, w; cin >> i >> j >> w; edges.push_back(new Edge(i,j,w)); } sort(edges.begin(), edges.end(), [](Edge* e1, Edge* e2) {return e1->w < e2->w;}); UnionFind<int> uf; vector<UnionFind<int>::Node*> nodes; for (int i = 0; i < n; i++) { nodes.push_back(uf.MakeSet(i)); } uint64_t sum = 0; for (int i = 0; i < m; i++) { Edge* e = edges[i]; UnionFind<int>::Node* u = nodes[e->i-1]; UnionFind<int>::Node* v = nodes[e->j-1]; if (uf.Find(u)->data != uf.Find(v)->data) { uf.Union(u,v); sum += e->w; } } cout << sum << endl; return 0; }
19.578947
85
0.46595
wlep
4ffcb0753f8cacdf4f3dc3f4d649e96f8a39b82e
2,852
cpp
C++
Code/Engine/Foundation/Utilities/Implementation/Node.cpp
asheraryam/ezEngine
bfe6d109b72d8fd6f13d043b11980796625c648e
[ "MIT" ]
null
null
null
Code/Engine/Foundation/Utilities/Implementation/Node.cpp
asheraryam/ezEngine
bfe6d109b72d8fd6f13d043b11980796625c648e
[ "MIT" ]
null
null
null
Code/Engine/Foundation/Utilities/Implementation/Node.cpp
asheraryam/ezEngine
bfe6d109b72d8fd6f13d043b11980796625c648e
[ "MIT" ]
null
null
null
#include <FoundationPCH.h> #include <Foundation/Utilities/Node.h> // EZ_CHECK_AT_COMPILETIME(sizeof(ezNodePin) == 4); // clang-format off EZ_BEGIN_DYNAMIC_REFLECTED_TYPE(ezNode, 1, ezRTTINoAllocator) EZ_END_DYNAMIC_REFLECTED_TYPE; EZ_BEGIN_STATIC_REFLECTED_TYPE(ezNodePin, ezNoBase, 1, ezRTTINoAllocator) { EZ_BEGIN_ATTRIBUTES { new ezHiddenAttribute(), } EZ_END_ATTRIBUTES; } EZ_END_STATIC_REFLECTED_TYPE; EZ_BEGIN_STATIC_REFLECTED_TYPE(ezInputNodePin, ezNodePin, 1, ezRTTINoAllocator) EZ_END_STATIC_REFLECTED_TYPE; EZ_BEGIN_STATIC_REFLECTED_TYPE(ezOutputNodePin, ezNodePin, 1, ezRTTINoAllocator) EZ_END_STATIC_REFLECTED_TYPE; EZ_BEGIN_STATIC_REFLECTED_TYPE(ezPassThroughNodePin, ezNodePin, 1, ezRTTINoAllocator) EZ_END_STATIC_REFLECTED_TYPE; // clang-format on void ezNode::InitializePins() { m_InputPins.Clear(); m_OutputPins.Clear(); m_NameToPin.Clear(); const ezRTTI* pType = GetDynamicRTTI(); ezHybridArray<ezAbstractProperty*, 32> properties; pType->GetAllProperties(properties); for (auto pProp : properties) { if (pProp->GetCategory() != ezPropertyCategory::Member || !pProp->GetSpecificType()->IsDerivedFrom(ezGetStaticRTTI<ezNodePin>())) continue; auto pPinProp = static_cast<ezAbstractMemberProperty*>(pProp); ezNodePin* pPin = static_cast<ezNodePin*>(pPinProp->GetPropertyPointer(this)); pPin->m_pParent = this; if (pPin->m_Type == ezNodePin::Type::Unknown) { EZ_REPORT_FAILURE( "Pin '{0}' has an invalid type. Do not use ezNodePin directly as member but one of its derived types", pProp->GetPropertyName()); continue; } if (pPin->m_Type == ezNodePin::Type::Input || pPin->m_Type == ezNodePin::Type::PassThrough) { pPin->m_uiInputIndex = static_cast<ezUInt8>(m_InputPins.GetCount()); m_InputPins.PushBack(pPin); } if (pPin->m_Type == ezNodePin::Type::Output || pPin->m_Type == ezNodePin::Type::PassThrough) { pPin->m_uiOutputIndex = static_cast<ezUInt8>(m_OutputPins.GetCount()); m_OutputPins.PushBack(pPin); } ezHashedString sHashedName; sHashedName.Assign(pProp->GetPropertyName()); m_NameToPin.Insert(sHashedName, pPin); } } ezHashedString ezNode::GetPinName(const ezNodePin* pPin) const { for (auto it = m_NameToPin.GetIterator(); it.IsValid(); ++it) { if (it.Value() == pPin) { return it.Key(); } } return ezHashedString(); } const ezNodePin* ezNode::GetPinByName(const char* szName) const { ezHashedString sHashedName; sHashedName.Assign(szName); return GetPinByName(sHashedName); } const ezNodePin* ezNode::GetPinByName(ezHashedString sName) const { const ezNodePin* pin; if (m_NameToPin.TryGetValue(sName, pin)) { return pin; } return nullptr; } EZ_STATICLINK_FILE(Foundation, Foundation_Utilities_Implementation_Node);
26.407407
137
0.73317
asheraryam
4fffb2cad6e15713572254da23a904b649a21e5b
2,393
cpp
C++
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp
ElenaGvozdeva/openvino
084aa4e5916fa2ed3e353dcd45d081ab11d9c75a
[ "Apache-2.0" ]
null
null
null
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp
ElenaGvozdeva/openvino
084aa4e5916fa2ed3e353dcd45d081ab11d9c75a
[ "Apache-2.0" ]
23
2021-03-12T07:34:43.000Z
2022-02-21T13:06:03.000Z
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp
ElenaGvozdeva/openvino
084aa4e5916fa2ed3e353dcd45d081ab11d9c75a
[ "Apache-2.0" ]
1
2020-07-22T15:53:40.000Z
2020-07-22T15:53:40.000Z
// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include <string> #include <vector> #include "gpu/gpu_config.hpp" #include "multi/multi_remote_blob_tests.hpp" #include "common_test_utils/test_constants.hpp" const std::vector<DevicesNamesAndSupportPair> device_names_and_support_for_remote_blobs { {{GPU}, true}, // GPU via MULTI, #ifdef ENABLE_MKL_DNN {{GPU, CPU}, true}, // GPU+CPU {{CPU, GPU}, true}, // CPU+GPU #endif }; INSTANTIATE_TEST_CASE_P(smoke_RemoteBlobMultiGPU, MultiDevice_SupportTest, ::testing::ValuesIn(device_names_and_support_for_remote_blobs), MultiDevice_SupportTest::getTestCaseName); TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) { InferenceEngine::CNNNetwork net(fn_ptr); auto ie = PluginCache::get().ie(); // load a network to the GPU to make sure we have a remote context auto exec_net = ie->LoadNetwork(net, GPU); auto ctx = exec_net.GetContext(); const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo(); auto& first_input_name = inputInfo.begin()->first; auto& first_input = inputInfo.begin()->second; auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx); rblob->allocate(); InferenceEngine::ExecutableNetwork exec_net_multi; try { exec_net_multi = ie->LoadNetwork(net, device_names); } catch(...) { // device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test return; } InferenceEngine::InferRequest req = exec_net_multi.CreateInferRequest(); ASSERT_TRUE(req); ASSERT_NO_THROW(req.SetBlob(first_input_name, rblob)); ASSERT_NO_THROW(req.StartAsync()); ASSERT_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY), InferenceEngine::Exception); } const std::vector<DevicesNames> device_names_and_support_for_remote_blobs2 { #ifdef ENABLE_MKL_DNN {CPU}, // stand-alone CPU via MULTI (no GPU), no OCL context #endif {"GPU.1"}, // another GPU (the test will test its presence), different OCL contexts }; INSTANTIATE_TEST_CASE_P(smoke_RemoteBlobMultiInitializedWithoutGPU, MultiDevice_Test, ::testing::ValuesIn(device_names_and_support_for_remote_blobs2), MultiDevice_Test::getTestCaseName);
41.258621
130
0.728374
ElenaGvozdeva
8b01a9285a5ca03ea5b64552a3b85ac5e45dc213
1,494
hh
C++
dune/ax1/acme1MD/configurations/bigmac/bigmac_solution_con.hh
pederpansen/dune-ax1
152153824d95755a55bdd4fba80686863e928196
[ "BSD-3-Clause" ]
null
null
null
dune/ax1/acme1MD/configurations/bigmac/bigmac_solution_con.hh
pederpansen/dune-ax1
152153824d95755a55bdd4fba80686863e928196
[ "BSD-3-Clause" ]
null
null
null
dune/ax1/acme1MD/configurations/bigmac/bigmac_solution_con.hh
pederpansen/dune-ax1
152153824d95755a55bdd4fba80686863e928196
[ "BSD-3-Clause" ]
null
null
null
/* * bigmac_solution_con.hh * * Created on: Jan 17, 2012 * Author: jpods */ #ifndef DUNE_AX1_BIGMAC_SOLUTION_CON_HH #define DUNE_AX1_BIGMAC_SOLUTION_CON_HH #include <dune/pdelab/common/function.hh> #include <dune/ax1/common/constants.hh> #include <dune/ax1/acme1MD/common/acme1MD_parametertree.hh> template<typename GV, typename RF, int dim> class BigmacCon : public Dune::PDELab::AnalyticGridFunctionBase< Dune::PDELab::AnalyticGridFunctionTraits<GV,RF,dim>, BigmacCon<GV,RF,dim> > { public: typedef Dune::PDELab::AnalyticGridFunctionTraits<GV,RF,dim> Traits; typedef Dune::PDELab::AnalyticGridFunctionBase<Traits, BigmacCon<GV,RF,dim> > BaseT; typedef typename Traits::DomainType DomainType; typedef typename Traits::RangeType RangeType; BigmacCon(const GV& gv_, const Acme1MDParameters& params_) : BaseT(gv_), gv(gv_), params(params_), time(0.0) {} inline void evaluateGlobal(const DomainType & x, RangeType & y) const { // double A = 1.0; double B = 0.0; double v = 1.0; y[0] = - 2.0 * A * A * ( 1.0 - std::pow(tanh( A * ( x - v * time ) + B ),2) ); } inline const GV& getGridView () const { return gv; } // set time for subsequent evaluation virtual void setTime (double t) { time = t; } private: const GV& gv; const Acme1MDParameters& params; protected: RF time; }; #endif /* DUNE_AX1_BIGMAC_SOLUTION_CON_HH */
22.636364
88
0.65328
pederpansen
8b022a18324bcac46b891a0dd9b0b2c361d698d7
40,091
cpp
C++
extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/shamap/impl/SHAMap.cpp
trongnmchainos/validator-keys-tool
cae131d6ab46051c0f47509b79b6efc47a70eec0
[ "BSL-1.0" ]
2
2020-03-03T12:46:29.000Z
2020-11-14T09:52:14.000Z
extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/shamap/impl/SHAMap.cpp
trongnmchainos/validator-keys-tool
cae131d6ab46051c0f47509b79b6efc47a70eec0
[ "BSL-1.0" ]
null
null
null
extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/shamap/impl/SHAMap.cpp
trongnmchainos/validator-keys-tool
cae131d6ab46051c0f47509b79b6efc47a70eec0
[ "BSL-1.0" ]
1
2020-03-03T12:46:30.000Z
2020-03-03T12:46:30.000Z
//------------------------------------------------------------------------------ /* This file is part of jbcoind: https://github.com/jbcoin/jbcoind Copyright (c) 2012, 2013 Jbcoin Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ //============================================================================== #include <BeastConfig.h> #include <jbcoin/basics/contract.h> #include <jbcoin/shamap/SHAMap.h> namespace jbcoin { SHAMap::SHAMap ( SHAMapType t, Family& f, version v) : f_ (f) , journal_(f.journal()) , seq_ (1) , state_ (SHAMapState::Modifying) , type_ (t) { if (v == version{2}) root_ = std::make_shared<SHAMapInnerNodeV2>(seq_, 0); else root_ = std::make_shared<SHAMapInnerNode>(seq_); } SHAMap::SHAMap ( SHAMapType t, uint256 const& hash, Family& f, version v) : f_ (f) , journal_(f.journal()) , seq_ (1) , state_ (SHAMapState::Synching) , type_ (t) { if (v == version{2}) root_ = std::make_shared<SHAMapInnerNodeV2>(seq_, 0); else root_ = std::make_shared<SHAMapInnerNode>(seq_); } SHAMap::~SHAMap () { state_ = SHAMapState::Invalid; } std::shared_ptr<SHAMap> SHAMap::snapShot (bool isMutable) const { auto ret = std::make_shared<SHAMap> (type_, f_, get_version()); SHAMap& newMap = *ret; if (!isMutable) newMap.state_ = SHAMapState::Immutable; newMap.seq_ = seq_ + 1; newMap.root_ = root_; newMap.backed_ = backed_; if ((state_ != SHAMapState::Immutable) || !isMutable) { // If either map may change, they cannot share nodes newMap.unshare (); } return ret; } std::shared_ptr<SHAMap> SHAMap::make_v2() const { assert(!is_v2()); auto ret = std::make_shared<SHAMap>(type_, f_, version{2}); ret->seq_ = seq_ + 1; SharedPtrNodeStack stack; for (auto leaf = peekFirstItem(stack); leaf != nullptr; leaf = peekNextItem(leaf->peekItem()->key(), stack)) { auto node_type = leaf->getType(); ret->addGiveItem(leaf->peekItem(), node_type != SHAMapTreeNode::tnACCOUNT_STATE, node_type == SHAMapTreeNode::tnTRANSACTION_MD); } NodeObjectType t; switch (type_) { case SHAMapType::TRANSACTION: t = hotTRANSACTION_NODE; break; case SHAMapType::STATE: t = hotACCOUNT_NODE; break; default: t = hotUNKNOWN; break; } ret->flushDirty(t, ret->seq_); ret->unshare(); return ret; } std::shared_ptr<SHAMap> SHAMap::make_v1() const { assert(is_v2()); auto ret = std::make_shared<SHAMap>(type_, f_, version{1}); ret->seq_ = seq_ + 1; SharedPtrNodeStack stack; for (auto leaf = peekFirstItem(stack); leaf != nullptr; leaf = peekNextItem(leaf->peekItem()->key(), stack)) { auto node_type = leaf->getType(); ret->addGiveItem(leaf->peekItem(), node_type != SHAMapTreeNode::tnACCOUNT_STATE, node_type == SHAMapTreeNode::tnTRANSACTION_MD); } NodeObjectType t; switch (type_) { case SHAMapType::TRANSACTION: t = hotTRANSACTION_NODE; break; case SHAMapType::STATE: t = hotACCOUNT_NODE; break; default: t = hotUNKNOWN; break; } ret->flushDirty(t, ret->seq_); ret->unshare(); return ret; } void SHAMap::dirtyUp (SharedPtrNodeStack& stack, uint256 const& target, std::shared_ptr<SHAMapAbstractNode> child) { // walk the tree up from through the inner nodes to the root_ // update hashes and links // stack is a path of inner nodes up to, but not including, child // child can be an inner node or a leaf assert ((state_ != SHAMapState::Synching) && (state_ != SHAMapState::Immutable)); assert (child && (child->getSeq() == seq_)); while (!stack.empty ()) { auto node = std::dynamic_pointer_cast<SHAMapInnerNode>(stack.top ().first); SHAMapNodeID nodeID = stack.top ().second; stack.pop (); assert (node != nullptr); int branch = nodeID.selectBranch (target); assert (branch >= 0); node = unshareNode(std::move(node), nodeID); node->setChild (branch, child); child = std::move (node); } } SHAMapTreeNode* SHAMap::walkTowardsKey(uint256 const& id, SharedPtrNodeStack* stack) const { assert(stack == nullptr || stack->empty()); auto inNode = root_; SHAMapNodeID nodeID; auto const isv2 = is_v2(); while (inNode->isInner()) { if (stack != nullptr) stack->push({inNode, nodeID}); if (isv2) { auto n = std::static_pointer_cast<SHAMapInnerNodeV2>(inNode); if (!n->has_common_prefix(id)) return nullptr; } auto const inner = std::static_pointer_cast<SHAMapInnerNode>(inNode); auto const branch = nodeID.selectBranch (id); if (inner->isEmptyBranch (branch)) return nullptr; inNode = descendThrow (inner, branch); if (isv2) { if (inNode->isInner()) { auto n = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(inNode); if (n == nullptr) { assert (false); return nullptr; } nodeID = SHAMapNodeID{n->depth(), n->common()}; } else { nodeID = SHAMapNodeID{64, inNode->key()}; } } else { nodeID = nodeID.getChildNodeID (branch); } } if (stack != nullptr) stack->push({inNode, nodeID}); return static_cast<SHAMapTreeNode*>(inNode.get()); } SHAMapTreeNode* SHAMap::findKey(uint256 const& id) const { SHAMapTreeNode* leaf = walkTowardsKey(id); if (leaf && leaf->peekItem()->key() != id) leaf = nullptr; return leaf; } std::shared_ptr<SHAMapAbstractNode> SHAMap::fetchNodeFromDB (SHAMapHash const& hash) const { std::shared_ptr<SHAMapAbstractNode> node; if (backed_) { std::shared_ptr<NodeObject> obj = f_.db().fetch (hash.as_uint256()); if (obj) { try { node = SHAMapAbstractNode::make(makeSlice(obj->getData()), 0, snfPREFIX, hash, true, f_.journal()); if (node && node->isInner()) { bool isv2 = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(node) != nullptr; if (isv2 != is_v2()) { auto root = std::dynamic_pointer_cast<SHAMapInnerNode>(root_); assert(root); assert(root->isEmpty()); if (isv2) { auto temp = make_v2(); swap(temp->root_, const_cast<std::shared_ptr<SHAMapAbstractNode>&>(root_)); } else { auto temp = make_v1(); swap(temp->root_, const_cast<std::shared_ptr<SHAMapAbstractNode>&>(root_)); } } } if (node) canonicalize (hash, node); } catch (std::exception const&) { JLOG(journal_.warn()) << "Invalid DB node " << hash; return std::shared_ptr<SHAMapTreeNode> (); } } else if (ledgerSeq_ != 0) { f_.missing_node(ledgerSeq_); const_cast<std::uint32_t&>(ledgerSeq_) = 0; } } return node; } // See if a sync filter has a node std::shared_ptr<SHAMapAbstractNode> SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const { std::shared_ptr<SHAMapAbstractNode> node; if (auto nodeData = filter->getNode (hash)) { node = SHAMapAbstractNode::make( makeSlice(*nodeData), 0, snfPREFIX, hash, true, f_.journal ()); if (node) { filter->gotNode (true, hash, std::move(*nodeData), node->getType ()); if (backed_) canonicalize (hash, node); } } return node; } // Get a node without throwing // Used on maps where missing nodes are expected std::shared_ptr<SHAMapAbstractNode> SHAMap::fetchNodeNT( SHAMapHash const& hash, SHAMapSyncFilter* filter) const { std::shared_ptr<SHAMapAbstractNode> node = getCache (hash); if (node) return node; if (backed_) { node = fetchNodeFromDB (hash); if (node) { canonicalize (hash, node); return node; } } if (filter) node = checkFilter (hash, filter); return node; } std::shared_ptr<SHAMapAbstractNode> SHAMap::fetchNodeNT (SHAMapHash const& hash) const { auto node = getCache (hash); if (!node && backed_) node = fetchNodeFromDB (hash); return node; } // Throw if the node is missing std::shared_ptr<SHAMapAbstractNode> SHAMap::fetchNode (SHAMapHash const& hash) const { auto node = fetchNodeNT (hash); if (!node) Throw<SHAMapMissingNode> (type_, hash); return node; } SHAMapAbstractNode* SHAMap::descendThrow (SHAMapInnerNode* parent, int branch) const { SHAMapAbstractNode* ret = descend (parent, branch); if (! ret && ! parent->isEmptyBranch (branch)) Throw<SHAMapMissingNode> (type_, parent->getChildHash (branch)); return ret; } std::shared_ptr<SHAMapAbstractNode> SHAMap::descendThrow (std::shared_ptr<SHAMapInnerNode> const& parent, int branch) const { std::shared_ptr<SHAMapAbstractNode> ret = descend (parent, branch); if (! ret && ! parent->isEmptyBranch (branch)) Throw<SHAMapMissingNode> (type_, parent->getChildHash (branch)); return ret; } SHAMapAbstractNode* SHAMap::descend (SHAMapInnerNode* parent, int branch) const { SHAMapAbstractNode* ret = parent->getChildPointer (branch); if (ret || !backed_) return ret; std::shared_ptr<SHAMapAbstractNode> node = fetchNodeNT (parent->getChildHash (branch)); if (!node || isInconsistentNode(node)) return nullptr; node = parent->canonicalizeChild (branch, std::move(node)); return node.get (); } std::shared_ptr<SHAMapAbstractNode> SHAMap::descend (std::shared_ptr<SHAMapInnerNode> const& parent, int branch) const { std::shared_ptr<SHAMapAbstractNode> node = parent->getChild (branch); if (node || !backed_) return node; node = fetchNode (parent->getChildHash (branch)); if (!node || isInconsistentNode(node)) return nullptr; node = parent->canonicalizeChild (branch, std::move(node)); return node; } // Gets the node that would be hooked to this branch, // but doesn't hook it up. std::shared_ptr<SHAMapAbstractNode> SHAMap::descendNoStore (std::shared_ptr<SHAMapInnerNode> const& parent, int branch) const { std::shared_ptr<SHAMapAbstractNode> ret = parent->getChild (branch); if (!ret && backed_) ret = fetchNode (parent->getChildHash (branch)); return ret; } std::pair <SHAMapAbstractNode*, SHAMapNodeID> SHAMap::descend (SHAMapInnerNode * parent, SHAMapNodeID const& parentID, int branch, SHAMapSyncFilter * filter) const { assert (parent->isInner ()); assert ((branch >= 0) && (branch < 16)); assert (!parent->isEmptyBranch (branch)); SHAMapAbstractNode* child = parent->getChildPointer (branch); auto const& childHash = parent->getChildHash (branch); if (!child) { std::shared_ptr<SHAMapAbstractNode> childNode = fetchNodeNT (childHash, filter); if (childNode) { childNode = parent->canonicalizeChild (branch, std::move(childNode)); child = childNode.get (); } if (child && isInconsistentNode(childNode)) child = nullptr; } if (child && is_v2()) { if (child->isInner()) { auto n = static_cast<SHAMapInnerNodeV2*>(child); return std::make_pair(child, SHAMapNodeID{n->depth(), n->key()}); } return std::make_pair(child, SHAMapNodeID{64, child->key()}); } return std::make_pair (child, parentID.getChildNodeID (branch)); } SHAMapAbstractNode* SHAMap::descendAsync (SHAMapInnerNode* parent, int branch, SHAMapSyncFilter * filter, bool & pending) const { pending = false; SHAMapAbstractNode* ret = parent->getChildPointer (branch); if (ret) return ret; auto const& hash = parent->getChildHash (branch); std::shared_ptr<SHAMapAbstractNode> ptr = getCache (hash); if (!ptr) { if (filter) ptr = checkFilter (hash, filter); if (!ptr && backed_) { std::shared_ptr<NodeObject> obj; if (! f_.db().asyncFetch (hash.as_uint256(), obj)) { pending = true; return nullptr; } if (!obj) return nullptr; ptr = SHAMapAbstractNode::make(makeSlice(obj->getData()), 0, snfPREFIX, hash, true, f_.journal()); if (ptr && backed_) canonicalize (hash, ptr); } } if (ptr && isInconsistentNode(ptr)) ptr = nullptr; if (ptr) ptr = parent->canonicalizeChild (branch, std::move(ptr)); return ptr.get (); } template <class Node> std::shared_ptr<Node> SHAMap::unshareNode (std::shared_ptr<Node> node, SHAMapNodeID const& nodeID) { // make sure the node is suitable for the intended operation (copy on write) assert (node->isValid ()); assert (node->getSeq () <= seq_); if (node->getSeq () != seq_) { // have a CoW assert (state_ != SHAMapState::Immutable); node = std::static_pointer_cast<Node>(node->clone(seq_)); assert (node->isValid ()); if (nodeID.isRoot ()) root_ = node; } return node; } SHAMapTreeNode* SHAMap::firstBelow(std::shared_ptr<SHAMapAbstractNode> node, SharedPtrNodeStack& stack, int branch) const { // Return the first item at or below this node if (node->isLeaf()) { auto n = std::static_pointer_cast<SHAMapTreeNode>(node); stack.push({node, {64, n->peekItem()->key()}}); return n.get(); } auto inner = std::static_pointer_cast<SHAMapInnerNode>(node); if (stack.empty()) stack.push({inner, SHAMapNodeID{}}); else { if (is_v2()) { auto inner2 = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(inner); assert(inner2 != nullptr); stack.push({inner2, {inner2->depth(), inner2->common()}}); } else { stack.push({inner, stack.top().second.getChildNodeID(branch)}); } } for (int i = 0; i < 16;) { if (!inner->isEmptyBranch(i)) { node = descendThrow(inner, i); assert(!stack.empty()); if (node->isLeaf()) { auto n = std::static_pointer_cast<SHAMapTreeNode>(node); stack.push({n, {64, n->peekItem()->key()}}); return n.get(); } inner = std::static_pointer_cast<SHAMapInnerNode>(node); if (is_v2()) { auto inner2 = std::static_pointer_cast<SHAMapInnerNodeV2>(inner); stack.push({inner2, {inner2->depth(), inner2->common()}}); } else { stack.push({inner, stack.top().second.getChildNodeID(branch)}); } i = 0; // scan all 16 branches of this new node } else ++i; // scan next branch } return nullptr; } static const std::shared_ptr<SHAMapItem const> no_item; std::shared_ptr<SHAMapItem const> const& SHAMap::onlyBelow (SHAMapAbstractNode* node) const { // If there is only one item below this node, return it while (!node->isLeaf ()) { SHAMapAbstractNode* nextNode = nullptr; auto inner = static_cast<SHAMapInnerNode*>(node); for (int i = 0; i < 16; ++i) { if (!inner->isEmptyBranch (i)) { if (nextNode) return no_item; nextNode = descendThrow (inner, i); } } if (!nextNode) { assert (false); return no_item; } node = nextNode; } // An inner node must have at least one leaf // below it, unless it's the root_ auto leaf = static_cast<SHAMapTreeNode*>(node); assert (leaf->hasItem () || (leaf == root_.get ())); return leaf->peekItem (); } static std::shared_ptr< SHAMapItem const> const nullConstSHAMapItem; SHAMapTreeNode const* SHAMap::peekFirstItem(SharedPtrNodeStack& stack) const { assert(stack.empty()); SHAMapTreeNode* node = firstBelow(root_, stack); if (!node) { while (!stack.empty()) stack.pop(); return nullptr; } return node; } SHAMapTreeNode const* SHAMap::peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const { assert(!stack.empty()); assert(stack.top().first->isLeaf()); stack.pop(); while (!stack.empty()) { auto node = stack.top().first; auto nodeID = stack.top().second; assert(!node->isLeaf()); auto inner = std::static_pointer_cast<SHAMapInnerNode>(node); for (auto i = nodeID.selectBranch(id) + 1; i < 16; ++i) { if (!inner->isEmptyBranch(i)) { node = descendThrow(inner, i); auto leaf = firstBelow(node, stack, i); if (!leaf) Throw<SHAMapMissingNode> (type_, id); assert(leaf->isLeaf()); return leaf; } } stack.pop(); } // must be last item return nullptr; } std::shared_ptr<SHAMapItem const> const& SHAMap::peekItem (uint256 const& id) const { SHAMapTreeNode* leaf = findKey(id); if (!leaf) return no_item; return leaf->peekItem (); } std::shared_ptr<SHAMapItem const> const& SHAMap::peekItem (uint256 const& id, SHAMapTreeNode::TNType& type) const { SHAMapTreeNode* leaf = findKey(id); if (!leaf) return no_item; type = leaf->getType (); return leaf->peekItem (); } std::shared_ptr<SHAMapItem const> const& SHAMap::peekItem (uint256 const& id, SHAMapHash& hash) const { SHAMapTreeNode* leaf = findKey(id); if (!leaf) return no_item; hash = leaf->getNodeHash (); return leaf->peekItem (); } SHAMap::const_iterator SHAMap::upper_bound(uint256 const& id) const { // Get a const_iterator to the next item in the tree after a given item // item need not be in tree SharedPtrNodeStack stack; walkTowardsKey(id, &stack); std::shared_ptr<SHAMapAbstractNode> node; SHAMapNodeID nodeID; auto const isv2 = is_v2(); while (!stack.empty()) { std::tie(node, nodeID) = stack.top(); if (node->isLeaf()) { auto leaf = static_cast<SHAMapTreeNode*>(node.get()); if (leaf->peekItem()->key() > id) return const_iterator(this, leaf->peekItem().get(), std::move(stack)); } else { auto inner = std::static_pointer_cast<SHAMapInnerNode>(node); int branch; if (isv2) { auto n = std::static_pointer_cast<SHAMapInnerNodeV2>(inner); if (n->has_common_prefix(id)) branch = nodeID.selectBranch(id) + 1; else if (id < n->common()) branch = 0; else branch = 16; } else { branch = nodeID.selectBranch(id) + 1; } for (; branch < 16; ++branch) { if (!inner->isEmptyBranch(branch)) { node = descendThrow(inner, branch); auto leaf = firstBelow(node, stack, branch); if (!leaf) Throw<SHAMapMissingNode> (type_, id); return const_iterator(this, leaf->peekItem().get(), std::move(stack)); } } } stack.pop(); } return end(); } bool SHAMap::hasItem (uint256 const& id) const { // does the tree have an item with this ID SHAMapTreeNode* leaf = findKey(id); return (leaf != nullptr); } bool SHAMap::delItem (uint256 const& id) { // delete the item with this ID assert (state_ != SHAMapState::Immutable); SharedPtrNodeStack stack; walkTowardsKey(id, &stack); if (stack.empty ()) Throw<SHAMapMissingNode> (type_, id); auto leaf = std::dynamic_pointer_cast<SHAMapTreeNode>(stack.top ().first); stack.pop (); if (!leaf || (leaf->peekItem ()->key() != id)) return false; SHAMapTreeNode::TNType type = leaf->getType (); // What gets attached to the end of the chain // (For now, nothing, since we deleted the leaf) std::shared_ptr<SHAMapAbstractNode> prevNode; while (!stack.empty ()) { auto node = std::static_pointer_cast<SHAMapInnerNode>(stack.top().first); SHAMapNodeID nodeID = stack.top().second; stack.pop(); node = unshareNode(std::move(node), nodeID); node->setChild(nodeID.selectBranch(id), prevNode); if (!nodeID.isRoot ()) { // we may have made this a node with 1 or 0 children // And, if so, we need to remove this branch int bc = node->getBranchCount(); if (is_v2()) { assert(bc != 0); if (bc == 1) { for (int i = 0; i < 16; ++i) { if (!node->isEmptyBranch (i)) { prevNode = descendThrow(node, i); break; } } } else // bc >= 2 { // This node is now the end of the branch prevNode = std::move(node); } } else { if (bc == 0) { // no children below this branch prevNode.reset (); } else if (bc == 1) { // If there's only one item, pull up on the thread auto item = onlyBelow (node.get ()); if (item) { for (int i = 0; i < 16; ++i) { if (!node->isEmptyBranch (i)) { node->setChild (i, nullptr); break; } } prevNode = std::make_shared<SHAMapTreeNode>(item, type, node->getSeq()); } else { prevNode = std::move (node); } } else { // This node is now the end of the branch prevNode = std::move (node); } } } } return true; } static uint256 prefix(unsigned depth, uint256 const& key) { uint256 r{}; auto x = r.begin(); auto y = key.begin(); for (auto i = 0; i < depth/2; ++i, ++x, ++y) *x = *y; if (depth & 1) *x = *y & 0xF0; return r; } bool SHAMap::addGiveItem (std::shared_ptr<SHAMapItem const> const& item, bool isTransaction, bool hasMeta) { // add the specified item, does not update uint256 tag = item->key(); SHAMapTreeNode::TNType type = !isTransaction ? SHAMapTreeNode::tnACCOUNT_STATE : (hasMeta ? SHAMapTreeNode::tnTRANSACTION_MD : SHAMapTreeNode::tnTRANSACTION_NM); assert (state_ != SHAMapState::Immutable); SharedPtrNodeStack stack; walkTowardsKey(tag, &stack); if (stack.empty ()) Throw<SHAMapMissingNode> (type_, tag); auto node = stack.top ().first; auto nodeID = stack.top ().second; stack.pop (); if (node->isLeaf()) { auto leaf = std::static_pointer_cast<SHAMapTreeNode>(node); if (leaf->peekItem()->key() == tag) return false; } node = unshareNode(std::move(node), nodeID); if (is_v2()) { if (node->isInner()) { auto inner = std::static_pointer_cast<SHAMapInnerNodeV2>(node); if (inner->has_common_prefix(tag)) { int branch = nodeID.selectBranch(tag); assert(inner->isEmptyBranch(branch)); auto newNode = std::make_shared<SHAMapTreeNode>(item, type, seq_); inner->setChild(branch, newNode); } else { assert(!stack.empty()); auto parent = unshareNode( std::static_pointer_cast<SHAMapInnerNodeV2>(stack.top().first), stack.top().second); stack.top().first = parent; auto parent_depth = parent->depth(); auto depth = inner->get_common_prefix(tag); auto new_inner = std::make_shared<SHAMapInnerNodeV2>(seq_); nodeID = SHAMapNodeID{depth, prefix(depth, inner->common())}; new_inner->setChild(nodeID.selectBranch(inner->common()), inner); nodeID = SHAMapNodeID{depth, prefix(depth, tag)}; new_inner->setChild(nodeID.selectBranch(tag), std::make_shared<SHAMapTreeNode>(item, type, seq_)); new_inner->set_common(depth, prefix(depth, tag)); nodeID = SHAMapNodeID{parent_depth, prefix(parent_depth, tag)}; parent->setChild(nodeID.selectBranch(tag), new_inner); node = new_inner; } } else { auto leaf = std::static_pointer_cast<SHAMapTreeNode>(node); auto inner = std::make_shared<SHAMapInnerNodeV2>(seq_); inner->setChildren(leaf, std::make_shared<SHAMapTreeNode>(item, type, seq_)); assert(!stack.empty()); auto parent = unshareNode( std::static_pointer_cast<SHAMapInnerNodeV2>(stack.top().first), stack.top().second); stack.top().first = parent; node = inner; } } else // !is_v2() { if (node->isInner ()) { // easy case, we end on an inner node auto inner = std::static_pointer_cast<SHAMapInnerNode>(node); int branch = nodeID.selectBranch (tag); assert (inner->isEmptyBranch (branch)); auto newNode = std::make_shared<SHAMapTreeNode> (item, type, seq_); inner->setChild (branch, newNode); } else { // this is a leaf node that has to be made an inner node holding two items auto leaf = std::static_pointer_cast<SHAMapTreeNode>(node); std::shared_ptr<SHAMapItem const> otherItem = leaf->peekItem (); assert (otherItem && (tag != otherItem->key())); node = std::make_shared<SHAMapInnerNode>(node->getSeq()); int b1, b2; while ((b1 = nodeID.selectBranch (tag)) == (b2 = nodeID.selectBranch (otherItem->key()))) { stack.push ({node, nodeID}); // we need a new inner node, since both go on same branch at this level nodeID = nodeID.getChildNodeID (b1); node = std::make_shared<SHAMapInnerNode> (seq_); } // we can add the two leaf nodes here assert (node->isInner ()); std::shared_ptr<SHAMapTreeNode> newNode = std::make_shared<SHAMapTreeNode> (item, type, seq_); assert (newNode->isValid () && newNode->isLeaf ()); auto inner = std::static_pointer_cast<SHAMapInnerNode>(node); inner->setChild (b1, newNode); newNode = std::make_shared<SHAMapTreeNode> (otherItem, type, seq_); assert (newNode->isValid () && newNode->isLeaf ()); inner->setChild (b2, newNode); } } dirtyUp (stack, tag, node); return true; } bool SHAMap::addItem(SHAMapItem&& i, bool isTransaction, bool hasMetaData) { return addGiveItem(std::make_shared<SHAMapItem const>(std::move(i)), isTransaction, hasMetaData); } SHAMapHash SHAMap::getHash () const { auto hash = root_->getNodeHash(); if (hash.isZero()) { const_cast<SHAMap&>(*this).unshare(); hash = root_->getNodeHash(); } return hash; } bool SHAMap::updateGiveItem (std::shared_ptr<SHAMapItem const> const& item, bool isTransaction, bool hasMeta) { // can't change the tag but can change the hash uint256 tag = item->key(); assert (state_ != SHAMapState::Immutable); SharedPtrNodeStack stack; walkTowardsKey(tag, &stack); if (stack.empty ()) Throw<SHAMapMissingNode> (type_, tag); auto node = std::dynamic_pointer_cast<SHAMapTreeNode>(stack.top().first); auto nodeID = stack.top ().second; stack.pop (); if (!node || (node->peekItem ()->key() != tag)) { assert (false); return false; } node = unshareNode(std::move(node), nodeID); if (!node->setItem (item, !isTransaction ? SHAMapTreeNode::tnACCOUNT_STATE : (hasMeta ? SHAMapTreeNode::tnTRANSACTION_MD : SHAMapTreeNode::tnTRANSACTION_NM))) { JLOG(journal_.trace()) << "SHAMap setItem, no change"; return true; } dirtyUp (stack, tag, node); return true; } bool SHAMap::fetchRoot (SHAMapHash const& hash, SHAMapSyncFilter* filter) { if (hash == root_->getNodeHash ()) return true; if (auto stream = journal_.trace()) { if (type_ == SHAMapType::TRANSACTION) { stream << "Fetch root TXN node " << hash; } else if (type_ == SHAMapType::STATE) { stream << "Fetch root STATE node " << hash; } else { stream << "Fetch root SHAMap node " << hash; } } auto newRoot = fetchNodeNT (hash, filter); if (newRoot) { root_ = newRoot; assert (root_->getNodeHash () == hash); return true; } return false; } // Replace a node with a shareable node. // // This code handles two cases: // // 1) An unshared, unshareable node needs to be made shareable // so immutable SHAMap's can have references to it. // // 2) An unshareable node is shared. This happens when you make // a mutable snapshot of a mutable SHAMap. std::shared_ptr<SHAMapAbstractNode> SHAMap::writeNode ( NodeObjectType t, std::uint32_t seq, std::shared_ptr<SHAMapAbstractNode> node) const { // Node is ours, so we can just make it shareable assert (node->getSeq() == seq_); assert (backed_); node->setSeq (0); canonicalize (node->getNodeHash(), node); Serializer s; node->addRaw (s, snfPREFIX); f_.db().store (t, std::move (s.modData ()), node->getNodeHash ().as_uint256()); return node; } // We can't modify an inner node someone else might have a // pointer to because flushing modifies inner nodes -- it // makes them point to canonical/shared nodes. template <class Node> std::shared_ptr<Node> SHAMap::preFlushNode (std::shared_ptr<Node> node) const { // A shared node should never need to be flushed // because that would imply someone modified it assert (node->getSeq() != 0); if (node->getSeq() != seq_) { // Node is not uniquely ours, so unshare it before // possibly modifying it node = std::static_pointer_cast<Node>(node->clone(seq_)); } return node; } int SHAMap::unshare () { // Don't share nodes wth parent map return walkSubTree (false, hotUNKNOWN, 0); } /** Convert all modified nodes to shared nodes */ // If requested, write them to the node store int SHAMap::flushDirty (NodeObjectType t, std::uint32_t seq) { return walkSubTree (true, t, seq); } int SHAMap::walkSubTree (bool doWrite, NodeObjectType t, std::uint32_t seq) { int flushed = 0; Serializer s; if (!root_ || (root_->getSeq() == 0)) return flushed; if (root_->isLeaf()) { // special case -- root_ is leaf root_ = preFlushNode (std::move(root_)); root_->updateHash(); if (doWrite && backed_) root_ = writeNode(t, seq, std::move(root_)); else root_->setSeq (0); return 1; } auto node = std::static_pointer_cast<SHAMapInnerNode>(root_); if (node->isEmpty ()) { // replace empty root with a new empty root if (is_v2()) root_ = std::make_shared<SHAMapInnerNodeV2>(0, 0); else root_ = std::make_shared<SHAMapInnerNode>(0); return 1; } // Stack of {parent,index,child} pointers representing // inner nodes we are in the process of flushing using StackEntry = std::pair <std::shared_ptr<SHAMapInnerNode>, int>; std::stack <StackEntry, std::vector<StackEntry>> stack; node = preFlushNode(std::move(node)); int pos = 0; // We can't flush an inner node until we flush its children while (1) { while (pos < 16) { if (node->isEmptyBranch (pos)) { ++pos; } else { // No need to do I/O. If the node isn't linked, // it can't need to be flushed int branch = pos; auto child = node->getChild(pos++); if (child && (child->getSeq() != 0)) { // This is a node that needs to be flushed child = preFlushNode(std::move(child)); if (child->isInner ()) { // save our place and work on this node stack.emplace (std::move (node), branch); node = std::static_pointer_cast<SHAMapInnerNode>(std::move(child)); pos = 0; } else { // flush this leaf ++flushed; assert (node->getSeq() == seq_); child->updateHash(); if (doWrite && backed_) child = writeNode(t, seq, std::move(child)); else child->setSeq (0); node->shareChild (branch, child); } } } } // update the hash of this inner node node->updateHashDeep(); // This inner node can now be shared if (doWrite && backed_) node = std::static_pointer_cast<SHAMapInnerNode>(writeNode(t, seq, std::move(node))); else node->setSeq (0); ++flushed; if (stack.empty ()) break; auto parent = std::move (stack.top().first); pos = stack.top().second; stack.pop(); // Hook this inner node to its parent assert (parent->getSeq() == seq_); parent->shareChild (pos, node); // Continue with parent's next child, if any node = std::move (parent); ++pos; } // Last inner node is the new root_ root_ = std::move (node); return flushed; } void SHAMap::dump (bool hash) const { int leafCount = 0; JLOG(journal_.info()) << " MAP Contains"; std::stack <std::pair <SHAMapAbstractNode*, SHAMapNodeID> > stack; stack.push ({root_.get (), SHAMapNodeID ()}); do { auto node = stack.top().first; auto nodeID = stack.top().second; stack.pop(); JLOG(journal_.info()) << node->getString (nodeID); if (hash) { JLOG(journal_.info()) << "Hash: " << node->getNodeHash(); } if (node->isInner ()) { auto inner = static_cast<SHAMapInnerNode*>(node); for (int i = 0; i < 16; ++i) { if (!inner->isEmptyBranch (i)) { auto child = inner->getChildPointer (i); if (child) { assert (child->getNodeHash() == inner->getChildHash (i)); stack.push ({child, nodeID.getChildNodeID (i)}); } } } } else ++leafCount; } while (!stack.empty ()); JLOG(journal_.info()) << leafCount << " resident leaves"; } std::shared_ptr<SHAMapAbstractNode> SHAMap::getCache (SHAMapHash const& hash) const { auto ret = f_.treecache().fetch (hash.as_uint256()); assert (!ret || !ret->getSeq()); return ret; } void SHAMap::canonicalize(SHAMapHash const& hash, std::shared_ptr<SHAMapAbstractNode>& node) const { assert (backed_); assert (node->getSeq() == 0); assert (node->getNodeHash() == hash); f_.treecache().canonicalize (hash.as_uint256(), node); } SHAMap::version SHAMap::get_version() const { if (is_v2()) return version{2}; return version{1}; } void SHAMap::invariants() const { (void)getHash(); // update node hashes auto node = root_.get(); assert(node != nullptr); assert(!node->isLeaf()); SharedPtrNodeStack stack; for (auto leaf = peekFirstItem(stack); leaf != nullptr; leaf = peekNextItem(leaf->peekItem()->key(), stack)) ; node->invariants(is_v2(), true); } bool SHAMap::isInconsistentNode(std::shared_ptr<SHAMapAbstractNode> const& node) const { assert(root_); assert(node); if (std::dynamic_pointer_cast<SHAMapTreeNode>(node) != nullptr) return false; bool is_node_v2 = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(node) != nullptr; assert (! is_node_v2 || (std::dynamic_pointer_cast<SHAMapInnerNodeV2>(node)->depth() != 0)); if (is_v2() == is_node_v2) return false; state_ = SHAMapState::Invalid; return true; } } // jbcoin
28.988431
105
0.541269
trongnmchainos
8b03845050e6020e129e3ddd7fa42fc94e38c713
3,146
cpp
C++
src/module/base/ServiceStatusManager/src/ServiceStatusManager.cpp
403712387/cgf
f26d7fa16ec8c7ca7565109b0d7f483cc7ad6288
[ "MIT" ]
2
2020-03-04T06:54:45.000Z
2021-07-21T05:59:08.000Z
src/module/base/ServiceStatusManager/src/ServiceStatusManager.cpp
403712387/cgf
f26d7fa16ec8c7ca7565109b0d7f483cc7ad6288
[ "MIT" ]
null
null
null
src/module/base/ServiceStatusManager/src/ServiceStatusManager.cpp
403712387/cgf
f26d7fa16ec8c7ca7565109b0d7f483cc7ad6288
[ "MIT" ]
3
2019-12-23T02:13:27.000Z
2021-12-09T08:28:50.000Z
#include <stdio.h> #include <sstream> #include "ServiceStatusInfo.h" #include "GetServiceStatusMessage.h" #include "ServiceStatusManager.h" #include "curl/curl.h" #include "jsoncpp/json.h" #include "libmicrohttpd/microhttpd.h" #include "log4cplus/version.h" #include "cryptopp/cryptlib.h" #define _STR(x) _VAL(x) #define _VAL(x) #x ServiceStatusManager::ServiceStatusManager(MessageRoute *messageRoute) :BaseProcess(messageRoute, "ServiceStatusManager") { // 初始化状态信息 initServiceStatusInfo(); // 获取第三方库信息 initLibraryVersionInfo(); // 订阅消息 subscribeMessage(Service_Status_Message); } // 初始化服务的状态信息 void ServiceStatusManager::initServiceStatusInfo() { // 服务的状态信息 mServiceStatusInfo = std::make_shared<ServiceStatusInfo>(); mServiceStatusInfo->setStartupTime(QDateTime::currentDateTime()); #ifdef GIT_BRANCH mServiceStatusInfo->setGitBranch(_STR(GIT_BRANCH)); #endif #ifdef GIT_COMMIT_ID mServiceStatusInfo->setGitCommitId(_STR(GIT_COMMIT_ID)); #endif } // 初始化模块的版本信息 void ServiceStatusManager::initLibraryVersionInfo() { std::string curlVersion = LIBCURL_VERSION; std::string jsoncppVersion = JSONCPP_VERSION_STRING; std::string libmicrohttpdVersion = MHD_get_version(); std::string log4cplusVersion = LOG4CPLUS_VERSION_STR; std::string qtVersion = qVersion(); mServiceStatusInfo->setLibraryVersion("curl", curlVersion); mServiceStatusInfo->setLibraryVersion("jsoncpp", jsoncppVersion); mServiceStatusInfo->setLibraryVersion("libmicrohttpd", libmicrohttpdVersion); mServiceStatusInfo->setLibraryVersion("log4cplus", log4cplusVersion); mServiceStatusInfo->setLibraryVersion("Qt", qtVersion); } bool ServiceStatusManager::init() { LOG_I(mClassName, "init module " << getModuleName()); // 打印git信息 LOG_I(mClassName, "git info:" << mServiceStatusInfo->gitInfoToString()); // 打印库信息 LOG_I(mClassName, "library version info:" << mServiceStatusInfo->libraryInfoToString()); return true; } void ServiceStatusManager::beginWork() { LOG_I(mClassName, "begin work, module " << getModuleName()); } // 卸载模块 void ServiceStatusManager::uninit() { LOG_I(mClassName, "begin uninit"); BaseProcess::uninit(); LOG_I(mClassName, "end uninit"); } // 处理消息的函数 std::shared_ptr<BaseResponse> ServiceStatusManager::onProcessMessage(std::shared_ptr<BaseMessage> &message) { std::shared_ptr<BaseResponse> response; switch(message->getMessageType()) { case Service_Status_Message: // 获取服务状态信息 response = onProcessGetServiceStatusMessage(message); } return response; } // 偷窥消息的处理函数 bool ServiceStatusManager::onForeseeMessage(std::shared_ptr<BaseMessage> &message) { return false; } // 处理消息的回应 void ServiceStatusManager::onProcessResponse(std::shared_ptr<BaseResponse> &response) { } // 处理获取服务状态消息 std::shared_ptr<BaseResponse> ServiceStatusManager::onProcessGetServiceStatusMessage(std::shared_ptr<BaseMessage> &message) { std::shared_ptr<GetServiceStatusResponse> response = std::make_shared<GetServiceStatusResponse>(mServiceStatusInfo, message, Common::noError()); return response; }
27.596491
148
0.75143
403712387
8b03c5272547f3929145061f550d3e75e9897b76
4,405
cpp
C++
src/appleseedmaya/attributeutils.cpp
wielandrochel/appleseed-maya
913b8e3212e81fd5993d30691c22d1d39f70bf53
[ "MIT" ]
1
2018-07-17T21:57:01.000Z
2018-07-17T21:57:01.000Z
src/appleseedmaya/attributeutils.cpp
wielandrochel/appleseed-maya
913b8e3212e81fd5993d30691c22d1d39f70bf53
[ "MIT" ]
null
null
null
src/appleseedmaya/attributeutils.cpp
wielandrochel/appleseed-maya
913b8e3212e81fd5993d30691c22d1d39f70bf53
[ "MIT" ]
null
null
null
// // This source file is part of appleseed. // Visit https://appleseedhq.net/ for additional information and resources. // // This software is released under the MIT license. // // Copyright (c) 2016-2018 Esteban Tovagliari, The appleseedhq Organization // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // // Interface header. #include "attributeutils.h" // Maya headers. #include "appleseedmaya/_beginmayaheaders.h" #include <maya/MFnMatrixData.h> #include "appleseedmaya/_endmayaheaders.h" namespace { template <typename T> MStatus get3(const MPlug& plug, T& x, T& y, T& z) { if (!plug.isCompound()) return MS::kFailure; if (plug.numChildren() != 3) return MS::kFailure; MStatus status; if (status) status = plug.child(0).getValue(x); if (status) status = plug.child(1).getValue(y); if (status) status = plug.child(2).getValue(z); return status; } } namespace AttributeUtils { MStatus get(const MPlug& plug, MAngle& value) { return plug.getValue(value); } MStatus get(const MPlug& plug, MColor& value) { value.a = 1.0f; return get3(plug, value.r, value.g, value.b); } MStatus get(const MPlug& plug, MPoint& value) { return get3(plug, value.x, value.y, value.z); } MStatus get(const MPlug& plug, MVector& value) { return get3(plug, value.x, value.y, value.z); } MStatus get(const MPlug& plug, MMatrix& value) { value.setToIdentity(); MObject matrixObject; MStatus status = plug.getValue(matrixObject); if (!status) return status; MFnMatrixData matrixDataFn(matrixObject); value = matrixDataFn.matrix(&status); return status; } MStatus getPlugConnectedTo(const MPlug& dstPlug, MPlug& srcPlug) { if (!dstPlug.isConnected()) return MS::kFailure; MStatus status; MPlugArray inputConnections; dstPlug.connectedTo(inputConnections, true, false, &status); if (status) { if (inputConnections.length() == 0) return MS::kFailure; srcPlug = inputConnections[0]; } return status; } bool hasConnections(const MPlug& plug, bool input) { MStatus status; if (!plug.isConnected(&status)) return false; MPlugArray connections; plug.connectedTo( connections, input ? true : false, input ? false : true, &status); if (status) return connections.length() != 0; return false; } bool anyChildPlugConnected(const MPlug& plug, bool input) { MStatus status; if (!plug.isCompound(&status)) return false; if (!status) return false; int numChildren = plug.numChildren(&status); if (!status) return false; for (int i = 0, e = plug.numChildren(); i < e; ++i) { MPlug c = plug.child(i, &status); if (!status) continue; if (hasConnections(c, input)) return true; } return false; } MStatus makeInput(MFnAttribute& attr) { attr.setStorable(true); attr.setReadable(false); attr.setWritable(true); attr.setKeyable(true); return MS::kSuccess; } MStatus makeOutput(MFnAttribute& attr) { attr.setStorable(false); attr.setReadable(true); attr.setWritable(false); attr.setKeyable(false); //attr.setHidden(true); return MS::kSuccess; } }
23.810811
80
0.665153
wielandrochel
8b0b446d2c887a1218d2efe816f881484b09fa7d
31,000
cc
C++
library/common/extensions/filters/http/platform_bridge/filter.cc
Yannic/envoy-mobile
27fd74c88d71b2c91f484e3660c936948b2eb481
[ "Apache-2.0" ]
1
2021-06-24T15:10:49.000Z
2021-06-24T15:10:49.000Z
library/common/extensions/filters/http/platform_bridge/filter.cc
Yannic/envoy-mobile
27fd74c88d71b2c91f484e3660c936948b2eb481
[ "Apache-2.0" ]
null
null
null
library/common/extensions/filters/http/platform_bridge/filter.cc
Yannic/envoy-mobile
27fd74c88d71b2c91f484e3660c936948b2eb481
[ "Apache-2.0" ]
null
null
null
#include "library/common/extensions/filters/http/platform_bridge/filter.h" #include "envoy/server/filter_config.h" #include "source/common/buffer/buffer_impl.h" #include "source/common/common/assert.h" #include "source/common/common/dump_state_utils.h" #include "source/common/common/scope_tracker.h" #include "source/common/common/utility.h" #include "library/common/api/external.h" #include "library/common/buffer/bridge_fragment.h" #include "library/common/data/utility.h" #include "library/common/extensions/filters/http/platform_bridge/c_type_definitions.h" #include "library/common/http/header_utility.h" #include "library/common/http/headers.h" namespace Envoy { namespace Extensions { namespace HttpFilters { namespace PlatformBridge { namespace { // TODO: https://github.com/envoyproxy/envoy-mobile/issues/1287 void replaceHeaders(Http::HeaderMap& headers, envoy_headers c_headers) { headers.clear(); for (envoy_map_size_t i = 0; i < c_headers.length; i++) { headers.addCopy(Http::LowerCaseString(Data::Utility::copyToString(c_headers.entries[i].key)), Data::Utility::copyToString(c_headers.entries[i].value)); } // The C envoy_headers struct can be released now because the headers have been copied. release_envoy_headers(c_headers); } } // namespace static void envoy_filter_release_callbacks(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); delete weak_filter; } static void envoy_filter_callback_resume_decoding(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); if (auto filter = weak_filter->lock()) { filter->resumeDecoding(); } } static void envoy_filter_callback_resume_encoding(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); if (auto filter = weak_filter->lock()) { filter->resumeEncoding(); } } static void envoy_filter_reset_idle(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); if (auto filter = weak_filter->lock()) { filter->resetIdleTimer(); } } PlatformBridgeFilterConfig::PlatformBridgeFilterConfig( const envoymobile::extensions::filters::http::platform_bridge::PlatformBridge& proto_config) : filter_name_(proto_config.platform_filter_name()), platform_filter_(static_cast<envoy_http_filter*>( Api::External::retrieveApi(proto_config.platform_filter_name()))) {} PlatformBridgeFilter::PlatformBridgeFilter(PlatformBridgeFilterConfigSharedPtr config, Event::Dispatcher& dispatcher) : dispatcher_(dispatcher), filter_name_(config->filter_name()), platform_filter_(*config->platform_filter()) { // The initialization above sets platform_filter_ to a copy of the struct stored on the config. // In the typical case, this will represent a filter implementation that needs to be intantiated. // static_context will contain the necessary platform-specific mechanism to produce a filter // instance. instance_context will initially be null, but after initialization, set to the // context needed for actual filter invocations. ENVOY_LOG(trace, "PlatformBridgeFilter({})::PlatformBridgeFilter", filter_name_); if (platform_filter_.init_filter) { // Set the instance_context to the result of the initialization call. Cleanup will ultimately // occur within the onDestroy() invocation below. ENVOY_LOG(trace, "PlatformBridgeFilter({})->init_filter", filter_name_); platform_filter_.instance_context = platform_filter_.init_filter(&platform_filter_); ASSERT(platform_filter_.instance_context, fmt::format("PlatformBridgeFilter({}): init_filter unsuccessful", filter_name_)); } else { // If init_filter is missing, zero out the rest of the struct for safety. ENVOY_LOG(debug, "PlatformBridgeFilter({}): missing initializer", filter_name_); platform_filter_ = {}; } // Set directional filters now that the platform_filter_ has been updated (initialized or zero'ed // out). request_filter_base_ = std::make_unique<RequestFilterBase>(*this); response_filter_base_ = std::make_unique<ResponseFilterBase>(*this); } void PlatformBridgeFilter::setDecoderFilterCallbacks( Http::StreamDecoderFilterCallbacks& callbacks) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::setDecoderCallbacks", filter_name_); decoder_callbacks_ = &callbacks; // TODO(goaway): currently both platform APIs unconditionally set this field, meaning that the // heap allocation below occurs when it could be avoided. if (platform_filter_.set_request_callbacks) { platform_request_callbacks_.resume_iteration = envoy_filter_callback_resume_decoding; platform_request_callbacks_.reset_idle = envoy_filter_reset_idle; platform_request_callbacks_.release_callbacks = envoy_filter_release_callbacks; // We use a weak_ptr wrapper for the filter to ensure presence before dispatching callbacks. // The weak_ptr is heap-allocated, because it must be managed (and eventually released) by // platform code. platform_request_callbacks_.callback_context = new PlatformBridgeFilterWeakPtr{shared_from_this()}; ENVOY_LOG(trace, "PlatformBridgeFilter({})->set_request_callbacks", filter_name_); platform_filter_.set_request_callbacks(platform_request_callbacks_, platform_filter_.instance_context); } } void PlatformBridgeFilter::setEncoderFilterCallbacks( Http::StreamEncoderFilterCallbacks& callbacks) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::setEncoderCallbacks", filter_name_); encoder_callbacks_ = &callbacks; // TODO(goaway): currently both platform APIs unconditionally set this field, meaning that the // heap allocation below occurs when it could be avoided. if (platform_filter_.set_response_callbacks) { platform_response_callbacks_.resume_iteration = envoy_filter_callback_resume_encoding; platform_response_callbacks_.reset_idle = envoy_filter_reset_idle; platform_response_callbacks_.release_callbacks = envoy_filter_release_callbacks; // We use a weak_ptr wrapper for the filter to ensure presence before dispatching callbacks. // The weak_ptr is heap-allocated, because it must be managed (and eventually released) by // platform code. platform_response_callbacks_.callback_context = new PlatformBridgeFilterWeakPtr{shared_from_this()}; ENVOY_LOG(trace, "PlatformBridgeFilter({})->set_response_callbacks", filter_name_); platform_filter_.set_response_callbacks(platform_response_callbacks_, platform_filter_.instance_context); } } void PlatformBridgeFilter::onDestroy() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::onDestroy", filter_name_); alive_ = false; // If the filter chain is destroyed before a response is received, treat as cancellation. if (!response_filter_base_->state_.stream_complete_ && platform_filter_.on_cancel) { ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_cancel", filter_name_); platform_filter_.on_cancel(platform_filter_.instance_context); } // Allow nullptr as no-op only if nothing was initialized. if (platform_filter_.release_filter == nullptr) { ASSERT(!platform_filter_.instance_context, fmt::format("PlatformBridgeFilter({}): release_filter required", filter_name_)); return; } ENVOY_LOG(trace, "PlatformBridgeFilter({})->release_filter", filter_name_); platform_filter_.release_filter(platform_filter_.instance_context); platform_filter_.instance_context = nullptr; } void PlatformBridgeFilter::dumpState(std::ostream& os, int indent_level) const { std::stringstream ss; const char* spaces = spacesForLevel(indent_level); ss << spaces << "PlatformBridgeFilter" << DUMP_MEMBER(filter_name_) << DUMP_MEMBER(error_response_) << std::endl; const char* inner_spaces = spacesForLevel(indent_level + 1); if (request_filter_base_) { ss << inner_spaces << "Request Filter"; request_filter_base_->dumpState(ss, 0); } if (response_filter_base_) { ss << inner_spaces << "Response Filter"; response_filter_base_->dumpState(ss, 0); } // TODO(junr03): only output to ostream arg // https://github.com/envoyproxy/envoy-mobile/issues/1497. ENVOY_LOG(error, "\n{}", ss.str()); os << ss.str(); } Http::FilterHeadersStatus PlatformBridgeFilter::FilterBase::onHeaders(Http::HeaderMap& headers, bool end_stream) { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); state_.stream_complete_ = end_stream; // Allow nullptr to act as no-op. if (on_headers_ == nullptr) { state_.headers_forwarded_ = true; return Http::FilterHeadersStatus::Continue; } envoy_headers in_headers = Http::Utility::toBridgeHeaders(headers); ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_*_headers", parent_.filter_name_); envoy_filter_headers_status result = on_headers_(in_headers, end_stream, parent_.platform_filter_.instance_context); state_.on_headers_called_ = true; switch (result.status) { case kEnvoyFilterHeadersStatusContinue: replaceHeaders(headers, result.headers); state_.headers_forwarded_ = true; return Http::FilterHeadersStatus::Continue; case kEnvoyFilterHeadersStatusStopIteration: pending_headers_ = &headers; state_.iteration_state_ = IterationState::Stopped; ASSERT(result.headers.length == 0 && result.headers.entries == NULL); return Http::FilterHeadersStatus::StopIteration; default: PANIC("invalid filter state: unsupported status for platform filters"); } NOT_REACHED_GCOVR_EXCL_LINE; } Http::FilterDataStatus PlatformBridgeFilter::FilterBase::onData(Buffer::Instance& data, bool end_stream) { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); state_.stream_complete_ = end_stream; // Allow nullptr to act as no-op. if (on_data_ == nullptr) { state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; } auto internal_buffer = buffer(); envoy_data in_data; // Decide whether to preemptively buffer data to present aggregate to platform. bool prebuffer_data = state_.iteration_state_ == IterationState::Stopped && internal_buffer && &data != internal_buffer && internal_buffer->length() > 0; if (prebuffer_data) { internal_buffer->move(data); in_data = Data::Utility::copyToBridgeData(*internal_buffer); } else { in_data = Data::Utility::copyToBridgeData(data); } ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_*_data", parent_.filter_name_); envoy_filter_data_status result = on_data_(in_data, end_stream, parent_.platform_filter_.instance_context); state_.on_data_called_ = true; switch (result.status) { case kEnvoyFilterDataStatusContinue: RELEASE_ASSERT(state_.iteration_state_ != IterationState::Stopped, "invalid filter state: filter iteration must be resumed with ResumeIteration"); data.drain(data.length()); data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(result.data)); state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; case kEnvoyFilterDataStatusStopIterationAndBuffer: if (prebuffer_data) { // Data will already have been added to the internal buffer (above). return Http::FilterDataStatus::StopIterationNoBuffer; } // Data will be buffered on return. state_.iteration_state_ = IterationState::Stopped; return Http::FilterDataStatus::StopIterationAndBuffer; case kEnvoyFilterDataStatusStopIterationNoBuffer: // In this context all previously buffered data can/should be dropped. If no data has been // buffered, this is a no-op. If data was previously buffered, the most likely case is // that a filter has decided to handle generating a response itself and no longer needs it. // We opt for making this assumption since it's otherwise ambiguous how we should handle // buffering when switching between the two stopped states, and since data can be arbitrarily // interleaved, it's unclear that there's any legitimate case to support any more complex // behavior. if (internal_buffer) { internal_buffer->drain(internal_buffer->length()); } state_.iteration_state_ = IterationState::Stopped; return Http::FilterDataStatus::StopIterationNoBuffer; // Resume previously-stopped iteration, possibly forwarding headers if iteration was stopped // during an on*Headers invocation. case kEnvoyFilterDataStatusResumeIteration: RELEASE_ASSERT(state_.iteration_state_ == IterationState::Stopped, "invalid filter state: ResumeIteration may only be used when filter iteration " "is stopped"); // Update pending henders before resuming iteration, if needed. if (result.pending_headers) { replaceHeaders(*pending_headers_, *result.pending_headers); pending_headers_ = nullptr; free(result.pending_headers); } // We've already moved data into the internal buffer and presented it to the platform. Replace // the internal buffer with any modifications returned by the platform filter prior to // resumption. if (internal_buffer) { internal_buffer->drain(internal_buffer->length()); internal_buffer->addBufferFragment( *Buffer::BridgeFragment::createBridgeFragment(result.data)); } else { data.drain(data.length()); data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(result.data)); } state_.iteration_state_ = IterationState::Ongoing; state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; default: PANIC("invalid filter state: unsupported status for platform filters"); } NOT_REACHED_GCOVR_EXCL_LINE; } Http::FilterTrailersStatus PlatformBridgeFilter::FilterBase::onTrailers(Http::HeaderMap& trailers) { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); state_.stream_complete_ = true; // Allow nullptr to act as no-op. if (on_trailers_ == nullptr) { state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; } auto internal_buffer = buffer(); envoy_headers in_trailers = Http::Utility::toBridgeHeaders(trailers); ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_*_trailers", parent_.filter_name_); envoy_filter_trailers_status result = on_trailers_(in_trailers, parent_.platform_filter_.instance_context); state_.on_trailers_called_ = true; switch (result.status) { case kEnvoyFilterTrailersStatusContinue: RELEASE_ASSERT(state_.iteration_state_ != IterationState::Stopped, "invalid filter state: ResumeIteration may only be used when filter iteration " "is stopped"); replaceHeaders(trailers, result.trailers); state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; case kEnvoyFilterTrailersStatusStopIteration: pending_trailers_ = &trailers; state_.iteration_state_ = IterationState::Stopped; ASSERT(result.trailers.length == 0 && result.trailers.entries == NULL); return Http::FilterTrailersStatus::StopIteration; // Resume previously-stopped iteration, possibly forwarding headers and data if iteration was // stopped during an on*Headers or on*Data invocation. case kEnvoyFilterTrailersStatusResumeIteration: RELEASE_ASSERT(state_.iteration_state_ == IterationState::Stopped, "invalid filter state: ResumeIteration may only be used when filter iteration " "is stopped"); // Update pending henders before resuming iteration, if needed. if (result.pending_headers) { replaceHeaders(*pending_headers_, *result.pending_headers); pending_headers_ = nullptr; free(result.pending_headers); } // We've already moved data into the internal buffer and presented it to the platform. Replace // the internal buffer with any modifications returned by the platform filter prior to // resumption. if (result.pending_data) { internal_buffer->drain(internal_buffer->length()); internal_buffer->addBufferFragment( *Buffer::BridgeFragment::createBridgeFragment(*result.pending_data)); free(result.pending_data); } replaceHeaders(trailers, result.trailers); state_.iteration_state_ = IterationState::Ongoing; state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; default: PANIC("invalid filter state: unsupported status for platform filters"); } NOT_REACHED_GCOVR_EXCL_LINE; } Http::FilterHeadersStatus PlatformBridgeFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::decodeHeaders(end_stream:{})", filter_name_, end_stream); // Delegate to base implementation for request and response path. return request_filter_base_->onHeaders(headers, end_stream); } Http::FilterHeadersStatus PlatformBridgeFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::encodeHeaders(end_stream:{})", filter_name_, end_stream); // Presence of internal error header indicates an error that should be surfaced as an // error callback (rather than an HTTP response). const auto error_code_header = headers.get(Http::InternalHeaders::get().ErrorCode); if (error_code_header.empty()) { // No error, so delegate to base implementation for request and response path. return response_filter_base_->onHeaders(headers, end_stream); } // Update stream state, since we won't be delegating to FilterBase. response_filter_base_->state_.stream_complete_ = end_stream; error_response_ = true; envoy_error_code_t error_code; bool parsed_code = absl::SimpleAtoi(error_code_header[0]->value().getStringView(), &error_code); RELEASE_ASSERT(parsed_code, "parse error reading error code"); envoy_data error_message = envoy_nodata; const auto error_message_header = headers.get(Http::InternalHeaders::get().ErrorMessage); if (!error_message_header.empty()) { error_message = Data::Utility::copyToBridgeData(error_message_header[0]->value().getStringView()); } int32_t attempt_count = 1; if (headers.EnvoyAttemptCount()) { bool parsed_attempts = absl::SimpleAtoi(headers.EnvoyAttemptCount()->value().getStringView(), &attempt_count); RELEASE_ASSERT(parsed_attempts, "parse error reading attempt count"); } if (platform_filter_.on_error) { platform_filter_.on_error({error_code, error_message, attempt_count}, platform_filter_.instance_context); } else { release_envoy_data(error_message); } response_filter_base_->state_.headers_forwarded_ = true; return Http::FilterHeadersStatus::Continue; } Http::FilterDataStatus PlatformBridgeFilter::decodeData(Buffer::Instance& data, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::decodeData(length:{}, end_stream:{})", filter_name_, data.length(), end_stream); // Delegate to base implementation for request and response path. return request_filter_base_->onData(data, end_stream); } Http::FilterDataStatus PlatformBridgeFilter::encodeData(Buffer::Instance& data, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::encodeData(length:{}, end_stream:{})", filter_name_, data.length(), end_stream); // Pass through if already mapped to error response. if (error_response_) { response_filter_base_->state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; } // Delegate to base implementation for request and response path. return response_filter_base_->onData(data, end_stream); } Http::FilterTrailersStatus PlatformBridgeFilter::decodeTrailers(Http::RequestTrailerMap& trailers) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::decodeTrailers", filter_name_); // Delegate to base implementation for request and response path. return request_filter_base_->onTrailers(trailers); } Http::FilterTrailersStatus PlatformBridgeFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::encodeTrailers", filter_name_); // Pass through if already mapped to error response. if (error_response_) { response_filter_base_->state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; } // Delegate to base implementation for request and response path. return response_filter_base_->onTrailers(trailers); } void PlatformBridgeFilter::resumeDecoding() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::resumeDecoding", filter_name_); auto weak_self = weak_from_this(); // TODO(goaway): There's a potential shutdown race here, due to the fact that the shared // reference that now holds the filter does not retain the dispatcher. In the future we should // make this safer by, e.g.: // 1) adding support to Envoy for (optionally) retaining the dispatcher, or // 2) retaining the engine to transitively retain the dispatcher via Envoy's ownership graph, or // 3) dispatching via a safe intermediary // Relevant: https://github.com/lyft/envoy-mobile/issues/332 dispatcher_.post([weak_self]() -> void { if (auto self = weak_self.lock()) { // Delegate to base implementation for request and response path. self->request_filter_base_->onResume(); } }); } void PlatformBridgeFilter::resumeEncoding() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::resumeEncoding", filter_name_); auto weak_self = weak_from_this(); dispatcher_.post([weak_self]() -> void { if (auto self = weak_self.lock()) { // Delegate to base implementation for request and response path. self->response_filter_base_->onResume(); } }); } void PlatformBridgeFilter::resetIdleTimer() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::resetIdleTimer", filter_name_); auto weak_self = weak_from_this(); dispatcher_.post([weak_self]() -> void { if (auto self = weak_self.lock()) { // Stream idle timeout is nondirectional. self->decoder_callbacks_->resetIdleTimer(); } }); } void PlatformBridgeFilter::FilterBase::onResume() { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); ENVOY_LOG(debug, "PlatformBridgeFilter({})::onResume", parent_.filter_name_); if (!parent_.isAlive()) { return; } if (state_.iteration_state_ == IterationState::Ongoing) { return; } auto internal_buffer = buffer(); envoy_headers bridged_headers; envoy_data bridged_data; envoy_headers bridged_trailers; envoy_headers* pending_headers = nullptr; envoy_data* pending_data = nullptr; envoy_headers* pending_trailers = nullptr; if (pending_headers_) { bridged_headers = Http::Utility::toBridgeHeaders(*pending_headers_); pending_headers = &bridged_headers; } if (internal_buffer) { bridged_data = Data::Utility::copyToBridgeData(*internal_buffer); pending_data = &bridged_data; } if (pending_trailers_) { bridged_trailers = Http::Utility::toBridgeHeaders(*pending_trailers_); pending_trailers = &bridged_trailers; } ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_resume_*", parent_.filter_name_); envoy_filter_resume_status result = on_resume_(pending_headers, pending_data, pending_trailers, state_.stream_complete_, parent_.platform_filter_.instance_context); state_.on_resume_called_ = true; if (result.status == kEnvoyFilterResumeStatusStopIteration) { RELEASE_ASSERT(!result.pending_headers, "invalid filter state: headers must not be present on " "stopping filter iteration on async resume"); RELEASE_ASSERT(!result.pending_data, "invalid filter state: data must not be present on " "stopping filter iteration on async resume"); RELEASE_ASSERT(!result.pending_trailers, "invalid filter state: trailers must not be present on" " stopping filter iteration on async resume"); return; } if (pending_headers_) { RELEASE_ASSERT(result.pending_headers, "invalid filter state: headers are pending and must be " "returned to resume filter iteration"); replaceHeaders(*pending_headers_, *result.pending_headers); pending_headers_ = nullptr; ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process headers free#1", parent_.filter_name_); if (pending_headers != result.pending_headers) { free(result.pending_headers); } } if (internal_buffer) { RELEASE_ASSERT( result.pending_data, "invalid filter state: data is pending and must be returned to resume filter iteration"); internal_buffer->drain(internal_buffer->length()); internal_buffer->addBufferFragment( *Buffer::BridgeFragment::createBridgeFragment(*result.pending_data)); ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process data free#1", parent_.filter_name_); if (pending_data != result.pending_data) { free(result.pending_data); } } else if (result.pending_data) { addData(*result.pending_data); ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process data free#2", parent_.filter_name_); if (pending_data != result.pending_data) { free(result.pending_data); } } if (pending_trailers_) { RELEASE_ASSERT(result.pending_trailers, "invalid filter state: trailers are pending and must " "be returned to resume filter iteration"); replaceHeaders(*pending_trailers_, *result.pending_trailers); pending_trailers_ = nullptr; ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process trailers free#1", parent_.filter_name_); if (pending_trailers != result.pending_trailers) { free(result.pending_trailers); } } else if (result.pending_trailers) { addTrailers(*result.pending_trailers); ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process trailers free#2", parent_.filter_name_); if (pending_trailers != result.pending_trailers) { free(result.pending_trailers); } } state_.iteration_state_ = IterationState::Ongoing; resumeIteration(); } void PlatformBridgeFilter::FilterBase::dumpState(std::ostream& os, int indent_level) { Buffer::Instance* buffer = this->buffer(); const char* spaces = spacesForLevel(indent_level); os << spaces << DUMP_MEMBER_AS(state_.iteration_state_, (state_.iteration_state_ == IterationState::Ongoing ? "ongoing" : "stopped")) << DUMP_MEMBER(state_.on_headers_called_) << DUMP_MEMBER(state_.headers_forwarded_) << DUMP_MEMBER(state_.on_data_called_) << DUMP_MEMBER(state_.data_forwarded_) << DUMP_MEMBER(state_.on_trailers_called_) << DUMP_MEMBER(state_.trailers_forwarded_) << DUMP_MEMBER(state_.on_resume_called_) << DUMP_NULLABLE_MEMBER(pending_headers_, "pending") << DUMP_NULLABLE_MEMBER(buffer, fmt::format("{} bytes", buffer->length())) << DUMP_NULLABLE_MEMBER(pending_trailers_, "pending") << DUMP_MEMBER(state_.stream_complete_) << std::endl; }; void PlatformBridgeFilter::RequestFilterBase::addData(envoy_data data) { Buffer::OwnedImpl inject_data; inject_data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(data)); parent_.decoder_callbacks_->addDecodedData(inject_data, /* watermark */ false); } void PlatformBridgeFilter::ResponseFilterBase::addData(envoy_data data) { Buffer::OwnedImpl inject_data; inject_data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(data)); parent_.encoder_callbacks_->addEncodedData(inject_data, /* watermark */ false); } void PlatformBridgeFilter::RequestFilterBase::addTrailers(envoy_headers trailers) { Http::HeaderMap& inject_trailers = parent_.decoder_callbacks_->addDecodedTrailers(); replaceHeaders(inject_trailers, trailers); } void PlatformBridgeFilter::ResponseFilterBase::addTrailers(envoy_headers trailers) { Http::HeaderMap& inject_trailers = parent_.encoder_callbacks_->addEncodedTrailers(); replaceHeaders(inject_trailers, trailers); } void PlatformBridgeFilter::RequestFilterBase::resumeIteration() { parent_.decoder_callbacks_->continueDecoding(); } void PlatformBridgeFilter::ResponseFilterBase::resumeIteration() { parent_.encoder_callbacks_->continueEncoding(); } // Technically-speaking to align with Envoy's internal API this method should take // a closure to execute with the available buffer, but since we control all usage, // this shortcut works for now. Buffer::Instance* PlatformBridgeFilter::RequestFilterBase::buffer() { Buffer::Instance* internal_buffer = nullptr; // This only exists to provide a mutable buffer, and that buffer is only used when iteration is // stopped. We check iteration state here before returning the buffer, to ensure this filter is // the one that stopped iteration. if (state_.iteration_state_ == IterationState::Stopped && parent_.decoder_callbacks_->decodingBuffer()) { parent_.decoder_callbacks_->modifyDecodingBuffer( [&internal_buffer](Buffer::Instance& mutable_buffer) { internal_buffer = &mutable_buffer; }); } return internal_buffer; } // Technically-speaking to align with Envoy's internal API this method should take // a closure to execute with the available buffer, but since we control all usage, // this shortcut works for now. Buffer::Instance* PlatformBridgeFilter::ResponseFilterBase::buffer() { Buffer::Instance* internal_buffer = nullptr; // This only exists to provide a mutable buffer, and that buffer is only used when iteration is // stopped. We check iteration state here before returning the buffer, to ensure this filter is // the one that stopped iteration. if (state_.iteration_state_ == IterationState::Stopped && parent_.encoder_callbacks_->encodingBuffer()) { parent_.encoder_callbacks_->modifyEncodingBuffer( [&internal_buffer](Buffer::Instance& mutable_buffer) { internal_buffer = &mutable_buffer; }); } return internal_buffer; } } // namespace PlatformBridge } // namespace HttpFilters } // namespace Extensions } // namespace Envoy
43.175487
100
0.734839
Yannic
8b0c422928ab095f72a9c9e4c116577beb2722c0
12,545
cpp
C++
homework2/prt/src/accel.cpp
QRWells/Games-202-Homework
5308f57ffe4a1b2d011e43bd0e9890ad6501146d
[ "MIT" ]
1
2022-03-12T11:48:30.000Z
2022-03-12T11:48:30.000Z
homework2/prt/src/accel.cpp
QRWells/Games-202-Homework
5308f57ffe4a1b2d011e43bd0e9890ad6501146d
[ "MIT" ]
null
null
null
homework2/prt/src/accel.cpp
QRWells/Games-202-Homework
5308f57ffe4a1b2d011e43bd0e9890ad6501146d
[ "MIT" ]
null
null
null
/* This file is part of Nori, a simple educational ray tracer Copyright (c) 2015 by Wenzel Jakob Nori is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License Version 3 as published by the Free Software Foundation. Nori is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <nori/accel.h> #include <Eigen/Geometry> #include <chrono> using namespace std::chrono; NORI_NAMESPACE_BEGIN void Accel::addMesh(Mesh *mesh) { if (m_num_meshes >= MAX_NUM_MESHES) throw NoriException("Accel: only %d meshes are supported!", MAX_NUM_MESHES); m_meshes[m_num_meshes] = mesh; m_bbox.expandBy(mesh->getBoundingBox()); m_num_meshes++; } void Accel::build() { if (m_num_meshes == 0) throw NoriException("No mesh found, could not build acceleration structure"); auto start = high_resolution_clock::now(); // delete old hierarchy if present delete m_root; uint32_t num_triangles = 0; for (uint32_t mesh_idx = 0; mesh_idx < m_num_meshes; mesh_idx++) { num_triangles += m_meshes[mesh_idx]->getTriangleCount(); } std::vector<uint32_t> triangles(num_triangles); std::vector<uint32_t> mesh_indices(num_triangles); uint32_t offset = 0; for (uint32_t current_mesh_idx = 0; current_mesh_idx < m_num_meshes; current_mesh_idx++) { uint32_t num_triangles_mesh = m_meshes[current_mesh_idx]->getTriangleCount(); for (uint32_t i = 0; i < num_triangles_mesh; i++) { triangles[offset + i] = i; mesh_indices[offset + i] = current_mesh_idx; } offset += num_triangles_mesh; } m_root = buildRecursive(m_bbox, triangles, mesh_indices, 0); printf("Octree build time: %ldms \n", duration_cast<milliseconds>(high_resolution_clock::now() - start).count()); printf("Num nodes: %d \n", m_num_nodes); printf("Num leaf nodes: %d \n", m_num_leaf_nodes); printf("Num non-empty leaf nodes: %d \n", m_num_nonempty_leaf_nodes); printf("Total number of saved triangles: %d \n", m_num_triangles_saved); printf("Avg triangles per node: %f \n", (float)m_num_triangles_saved / (float)m_num_nodes); printf("Recursion depth: %d \n", m_recursion_depth); } bool Accel::rayIntersect(const Ray3f &ray_, Intersection &its, bool shadowRay) const { bool foundIntersection; // Was an intersection found so far? uint32_t f = (uint32_t) -1; // Triangle index of the closest intersection Ray3f ray(ray_); /// Make a copy of the ray (we will need to update its '.maxt' value) foundIntersection = traverseRecursive(*m_root, ray, its, shadowRay, f); if (shadowRay) return foundIntersection; if (foundIntersection) { /* At this point, we now know that there is an intersection, and we know the triangle index of the closest such intersection. The following computes a number of additional properties which characterize the intersection (normals, texture coordinates, etc..) */ /* Find the barycentric coordinates */ Vector3f bary; bary << 1-its.uv.sum(), its.uv; /* References to all relevant mesh buffers */ const Mesh *mesh = its.mesh; const MatrixXf &V = mesh->getVertexPositions(); const MatrixXf &N = mesh->getVertexNormals(); const MatrixXf &UV = mesh->getVertexTexCoords(); const MatrixXu &F = mesh->getIndices(); /* Vertex indices of the triangle */ uint32_t idx0 = F(0, f), idx1 = F(1, f), idx2 = F(2, f); Point3f p0 = V.col(idx0), p1 = V.col(idx1), p2 = V.col(idx2); its.bary = bary; its.tri_index = Point3f(idx0, idx1, idx2); /* Compute the intersection positon accurately using barycentric coordinates */ its.p = bary.x() * p0 + bary.y() * p1 + bary.z() * p2; /* Compute proper texture coordinates if provided by the mesh */ if (UV.size() > 0) its.uv = bary.x() * UV.col(idx0) + bary.y() * UV.col(idx1) + bary.z() * UV.col(idx2); /* Compute the geometry frame */ its.geoFrame = Frame((p1-p0).cross(p2-p0).normalized()); if (N.size() > 0) { /* Compute the shading frame. Note that for simplicity, the current implementation doesn't attempt to provide tangents that are continuous across the surface. That means that this code will need to be modified to be able use anisotropic BRDFs, which need tangent continuity */ its.shFrame = Frame( (bary.x() * N.col(idx0) + bary.y() * N.col(idx1) + bary.z() * N.col(idx2)).normalized()); } else { its.shFrame = its.geoFrame; } } return foundIntersection; } Accel::Node* Accel::buildRecursive(const BoundingBox3f& bbox, std::vector<uint32_t>& triangle_indices, std::vector<uint32_t>& mesh_indices, uint32_t recursion_depth) { // a node is created in any case m_num_nodes++; uint32_t num_triangles = triangle_indices.size(); // return empty node if no triangles are left if (num_triangles == 0) { Node* node = new Node(); node->bbox = BoundingBox3f(bbox); // add to statistics m_num_leaf_nodes++; return node; } // create leaf node if 10 or less triangles are left or if the max recursion depth is reached. if (num_triangles <= MAX_TRIANGLES_PER_NODE || recursion_depth >= MAX_RECURSION_DEPTH) { Node* node = new Node(); node->num_triangles = num_triangles; node->triangle_indices = new uint32_t[num_triangles]; node->mesh_indices = new uint32_t [num_triangles]; for (uint32_t i = 0; i < num_triangles; i++) { node->triangle_indices[i] = triangle_indices[i]; node->mesh_indices[i] = mesh_indices[i]; } node->bbox = BoundingBox3f(bbox); // add to statistics m_num_leaf_nodes++; m_num_nonempty_leaf_nodes++; m_num_triangles_saved += num_triangles; return node; } // create new parent node Node* node = new Node(); node->bbox = BoundingBox3f(bbox); BoundingBox3f child_bboxes[8] = {}; subdivideBBox(bbox, child_bboxes); std::vector<std::vector<uint32_t>> child_triangle_indices(8); std::vector<std::vector<uint32_t>> child_mesh_indices(8); uint32_t child_num_triangles[8] = {}; // place every triangle in the children it overlaps with // for every child bbox for (uint32_t i = 0; i < 8; i++) { // for every triangle inside of the parent create triangle bounding box for (uint32_t j = 0; j < num_triangles; j++) { // for every triangle vertex expand triangle bbox uint32_t triangle_idx = triangle_indices[j]; uint32_t mesh_idx = mesh_indices[j]; BoundingBox3f triangle_bbox = m_meshes[mesh_idx]->getBoundingBox(triangle_idx); // check if triangle is in bbox, if so put triangle index into triangle list of child if (child_bboxes[i].overlaps(triangle_bbox)) { child_triangle_indices[i].emplace_back(triangle_idx); child_mesh_indices[i].emplace_back(mesh_idx); child_num_triangles[i]++; } } } // release memory to avoid stack overflow triangle_indices = std::vector<uint32_t>(); mesh_indices = std::vector<uint32_t>(); // for every child bbox Node* last_child = nullptr; for (uint32_t i = 0; i < 8; i++) { // first child if (i == 0) { node->child = buildRecursive(child_bboxes[i], child_triangle_indices[i], child_mesh_indices[i], recursion_depth + 1); last_child = node->child; // neighbour children } else { last_child->next = buildRecursive(child_bboxes[i], child_triangle_indices[i], child_mesh_indices[i], recursion_depth + 1); last_child = last_child->next; } m_recursion_depth = std::max(m_recursion_depth, recursion_depth + 1); } return node; } bool Accel::traverseRecursive(const Node& node, Ray3f &ray, Intersection &its, bool shadowRay, uint32_t& hit_idx) const { bool foundIntersection = false; // only check triangles of node and its children if ray intersects with node bbox if (!node.bbox.rayIntersect(ray)) { return false; } // search through all triangles in node for (uint32_t i = 0; i < node.num_triangles; ++i) { float u, v, t; uint32_t triangle_idx = node.triangle_indices[i]; uint32_t mesh_idx = node.mesh_indices[i]; if (m_meshes[mesh_idx]->rayIntersect(triangle_idx, ray, u, v, t) && t < ray.maxt) { /* An intersection was found! Can terminate immediately if this is a shadow ray query */ if (shadowRay) return true; ray.maxt = t; its.t = t; its.uv = Point2f(u, v); its.mesh = m_meshes[mesh_idx]; hit_idx = triangle_idx; foundIntersection = true; } } if (node.child) { std::pair<Node*, float> children[8]; Node* current_child = node.child; int i = 0; do { children[i] = std::pair<Node*, float>(current_child, current_child->bbox.distanceTo(ray.o)); current_child = current_child->next; i++; } while (current_child); std::sort(children, children + 8, [ray](const std::pair<Node*, float>& l, const std::pair<Node*, float>& r) { return l.second < r.second; }); for (auto child: children) { foundIntersection = traverseRecursive(*child.first, ray, its, shadowRay, hit_idx) || foundIntersection; if (shadowRay && foundIntersection) return true; } } return foundIntersection; } void Accel::subdivideBBox(const nori::BoundingBox3f &parent, nori::BoundingBox3f *bboxes) { Point3f extents = parent.getExtents(); Point3f x0_y0_z0 = parent.min; Point3f x1_y0_z0 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y(), parent.min.z()); Point3f x0_y1_z0 = Point3f(parent.min.x(), parent.min.y() + extents.y() / 2.f, parent.min.z()); Point3f x1_y1_z0 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.min.z()); Point3f x0_y0_z1 = Point3f(parent.min.x(), parent.min.y(), parent.min.z() + extents.z() / 2.f); Point3f x1_y0_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y(), parent.min.z() + extents.z() / 2.f); Point3f x0_y1_z1 = Point3f(parent.min.x(), parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f); Point3f x1_y1_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f); Point3f x2_y1_z1 = Point3f(parent.max.x(), parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f); Point3f x1_y2_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.max.y(), parent.min.z() + extents.z() / 2.f); Point3f x2_y2_z1 = Point3f(parent.max.x(), parent.max.y(), parent.min.z() + extents.z() / 2.f); Point3f x1_y1_z2 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.max.z()); Point3f x2_y1_z2 = Point3f(parent.max.x(), parent.min.y() + extents.y() / 2.f, parent.max.z()); Point3f x1_y2_z2 = Point3f(parent.min.x() + extents.x() / 2.f, parent.max.y(), parent.max.z()); Point3f x2_y2_z2 = Point3f(parent.max.x(), parent.max.y(), parent.max.z()); bboxes[0] = BoundingBox3f(x0_y0_z0, x1_y1_z1); bboxes[1] = BoundingBox3f(x1_y0_z0, x2_y1_z1); bboxes[2] = BoundingBox3f(x0_y1_z0, x1_y2_z1); bboxes[3] = BoundingBox3f(x1_y1_z0, x2_y2_z1); bboxes[4] = BoundingBox3f(x0_y0_z1, x1_y1_z2); bboxes[5] = BoundingBox3f(x1_y0_z1, x2_y1_z2); bboxes[6] = BoundingBox3f(x0_y1_z1, x1_y2_z2); bboxes[7] = BoundingBox3f(x1_y1_z1, x2_y2_z2); } NORI_NAMESPACE_END
40.208333
139
0.626305
QRWells
8b0d0d5092ad2c186dc45eff5b1fc496f9fe7987
20,236
cpp
C++
test/entt/meta/meta_data.cpp
matthew-nagy/entt
2ef98e1787d150fb2561edf3cc61270f46a41386
[ "MIT" ]
77
2021-09-22T17:00:44.000Z
2022-03-18T03:47:47.000Z
test/entt/meta/meta_data.cpp
matthew-nagy/entt
2ef98e1787d150fb2561edf3cc61270f46a41386
[ "MIT" ]
63
2021-09-26T05:24:55.000Z
2022-03-27T04:15:52.000Z
test/entt/meta/meta_data.cpp
matthew-nagy/entt
2ef98e1787d150fb2561edf3cc61270f46a41386
[ "MIT" ]
18
2021-09-26T17:52:07.000Z
2022-02-19T12:25:15.000Z
#include <cstdlib> #include <string> #include <utility> #include <gtest/gtest.h> #include <entt/core/hashed_string.hpp> #include <entt/core/type_traits.hpp> #include <entt/meta/factory.hpp> #include <entt/meta/meta.hpp> #include <entt/meta/node.hpp> #include <entt/meta/resolve.hpp> struct base_t { virtual ~base_t() = default; static void destroy(base_t &) { ++counter; } inline static int counter = 0; int value{3}; }; struct derived_t: base_t { derived_t() {} }; struct clazz_t { clazz_t() : i{0}, j{1}, base{} {} operator int() const { return h; } int i{0}; const int j{1}; base_t base{}; inline static int h{2}; inline static const int k{3}; }; struct setter_getter_t { setter_getter_t() : value{0} {} int setter(double val) { return value = static_cast<int>(val); } int getter() { return value; } int setter_with_ref(const int &val) { return value = val; } const int &getter_with_ref() { return value; } static int static_setter(setter_getter_t &type, int value) { return type.value = value; } static int static_getter(const setter_getter_t &type) { return type.value; } int value; }; struct multi_setter_t { multi_setter_t() : value{0} {} void from_double(double val) { value = val; } void from_string(const char *val) { value = std::atoi(val); } int value; }; struct array_t { static inline int global[3]; int local[5]; }; enum class property_t { random, value }; struct MetaData: ::testing::Test { void SetUp() override { using namespace entt::literals; entt::meta<double>() .type("double"_hs); entt::meta<base_t>() .type("base"_hs) .dtor<base_t::destroy>() .data<&base_t::value>("value"_hs); entt::meta<derived_t>() .type("derived"_hs) .base<base_t>() .dtor<derived_t::destroy>() .data<&base_t::value>("value_from_base"_hs); entt::meta<clazz_t>() .type("clazz"_hs) .data<&clazz_t::i, entt::as_ref_t>("i"_hs) .prop(3, 0) .data<&clazz_t::i, entt::as_cref_t>("ci"_hs) .data<&clazz_t::j>("j"_hs) .prop(true, 1) .data<&clazz_t::h>("h"_hs) .prop(property_t::random, 2) .data<&clazz_t::k>("k"_hs) .prop(property_t::value, 3) .data<&clazz_t::base>("base"_hs) .data<&clazz_t::i, entt::as_void_t>("void"_hs) .conv<int>(); entt::meta<setter_getter_t>() .type("setter_getter"_hs) .data<&setter_getter_t::static_setter, &setter_getter_t::static_getter>("x"_hs) .data<&setter_getter_t::setter, &setter_getter_t::getter>("y"_hs) .data<&setter_getter_t::static_setter, &setter_getter_t::getter>("z"_hs) .data<&setter_getter_t::setter_with_ref, &setter_getter_t::getter_with_ref>("w"_hs) .data<nullptr, &setter_getter_t::getter>("z_ro"_hs) .data<nullptr, &setter_getter_t::value>("value"_hs); entt::meta<multi_setter_t>() .type("multi_setter"_hs) .data<entt::value_list<&multi_setter_t::from_double, &multi_setter_t::from_string>, &multi_setter_t::value>("value"_hs); entt::meta<array_t>() .type("array"_hs) .data<&array_t::global>("global"_hs) .data<&array_t::local>("local"_hs); base_t::counter = 0; } void TearDown() override { entt::meta_reset(); } }; using MetaDataDeathTest = MetaData; TEST_F(MetaData, Functionalities) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("i"_hs); clazz_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "i"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), 3); ASSERT_EQ(curr.value(), 0); } ASSERT_FALSE(data.prop(2)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(3); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), 3); ASSERT_EQ(prop.value(), 0); } TEST_F(MetaData, Const) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("j"_hs); clazz_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "j"_hs); ASSERT_TRUE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 1); ASSERT_FALSE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 1); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), true); ASSERT_EQ(curr.value(), 1); } ASSERT_FALSE(data.prop(false)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(true); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), true); ASSERT_EQ(prop.value(), 1); } TEST_F(MetaData, Static) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("h"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "h"_hs); ASSERT_FALSE(data.is_const()); ASSERT_TRUE(data.is_static()); ASSERT_EQ(data.get({}).cast<int>(), 2); ASSERT_TRUE(data.set({}, 42)); ASSERT_EQ(data.get({}).cast<int>(), 42); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), property_t::random); ASSERT_EQ(curr.value(), 2); } ASSERT_FALSE(data.prop(property_t::value)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(property_t::random); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), property_t::random); ASSERT_EQ(prop.value(), 2); } TEST_F(MetaData, ConstStatic) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("k"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "k"_hs); ASSERT_TRUE(data.is_const()); ASSERT_TRUE(data.is_static()); ASSERT_EQ(data.get({}).cast<int>(), 3); ASSERT_FALSE(data.set({}, 42)); ASSERT_EQ(data.get({}).cast<int>(), 3); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), property_t::value); ASSERT_EQ(curr.value(), 3); } ASSERT_FALSE(data.prop(property_t::random)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(property_t::value); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), property_t::value); ASSERT_EQ(prop.value(), 3); } TEST_F(MetaData, GetMetaAnyArg) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; any.cast<clazz_t &>().i = 99; const auto value = entt::resolve<clazz_t>().data("i"_hs).get(any); ASSERT_TRUE(value); ASSERT_TRUE(static_cast<bool>(value.cast<int>())); ASSERT_EQ(value.cast<int>(), 99); } TEST_F(MetaData, GetInvalidArg) { using namespace entt::literals; auto instance = 0; ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).get(instance)); } TEST_F(MetaData, SetMetaAnyArg) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; entt::meta_any value{42}; ASSERT_EQ(any.cast<clazz_t>().i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, value)); ASSERT_EQ(any.cast<clazz_t>().i, 42); } TEST_F(MetaData, SetInvalidArg) { using namespace entt::literals; ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).set({}, 'c')); } TEST_F(MetaData, SetCast) { using namespace entt::literals; clazz_t instance{}; ASSERT_EQ(base_t::counter, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("base"_hs).set(instance, derived_t{})); ASSERT_EQ(base_t::counter, 1); } TEST_F(MetaData, SetConvert) { using namespace entt::literals; clazz_t instance{}; instance.h = 42; ASSERT_EQ(instance.i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(instance, instance)); ASSERT_EQ(instance.i, 42); } TEST_F(MetaData, SetByRef) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; int value{42}; ASSERT_EQ(any.cast<clazz_t>().i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, entt::make_meta<int &>(value))); ASSERT_EQ(any.cast<clazz_t>().i, 42); value = 3; auto wrapper = entt::make_meta<int &>(value); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, wrapper.as_ref())); ASSERT_EQ(any.cast<clazz_t>().i, 3); } TEST_F(MetaData, SetByConstRef) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; int value{42}; ASSERT_EQ(any.cast<clazz_t>().i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, entt::make_meta<const int &>(value))); ASSERT_EQ(any.cast<clazz_t>().i, 42); value = 3; auto wrapper = entt::make_meta<const int &>(value); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, wrapper.as_ref())); ASSERT_EQ(any.cast<clazz_t>().i, 3); } TEST_F(MetaData, SetterGetterAsFreeFunctions) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("x"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "x"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); } TEST_F(MetaData, SetterGetterAsMemberFunctions) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("y"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<double>()); ASSERT_EQ(data.id(), "y"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42.)); ASSERT_EQ(data.get(instance).cast<int>(), 42); ASSERT_TRUE(data.set(instance, 3)); ASSERT_EQ(data.get(instance).cast<int>(), 3); } TEST_F(MetaData, SetterGetterWithRefAsMemberFunctions) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("w"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "w"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); } TEST_F(MetaData, SetterGetterMixed) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("z"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "z"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); } TEST_F(MetaData, SetterGetterReadOnly) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("z_ro"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 0u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::meta_type{}); ASSERT_EQ(data.id(), "z_ro"_hs); ASSERT_TRUE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_FALSE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 0); } TEST_F(MetaData, SetterGetterReadOnlyDataMember) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("value"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 0u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::meta_type{}); ASSERT_EQ(data.id(), "value"_hs); ASSERT_TRUE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_FALSE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 0); } TEST_F(MetaData, MultiSetter) { using namespace entt::literals; auto data = entt::resolve<multi_setter_t>().data("value"_hs); multi_setter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 2u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<double>()); ASSERT_EQ(data.arg(1u), entt::resolve<const char *>()); ASSERT_EQ(data.arg(2u), entt::meta_type{}); ASSERT_EQ(data.id(), "value"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); ASSERT_TRUE(data.set(instance, 3.)); ASSERT_EQ(data.get(instance).cast<int>(), 3); ASSERT_FALSE(data.set(instance, std::string{"99"})); ASSERT_TRUE(data.set(instance, std::string{"99"}.c_str())); ASSERT_EQ(data.get(instance).cast<int>(), 99); } TEST_F(MetaData, ConstInstance) { using namespace entt::literals; clazz_t instance{}; ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(instance).try_cast<int>(), nullptr); ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(instance).try_cast<const int>(), nullptr); ASSERT_EQ(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance)).try_cast<int>(), nullptr); // as_ref_t adapts to the constness of the passed object and returns const references in case ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance)).try_cast<const int>(), nullptr); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).get(instance)); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(instance, 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance))); ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).set(std::as_const(instance), 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).get(instance)); ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).set(instance, 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).get(std::as_const(instance))); ASSERT_FALSE(entt::resolve<clazz_t>().data("ci"_hs).set(std::as_const(instance), 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs).get(instance)); ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs).set(instance, 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs).get(std::as_const(instance))); ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs).set(std::as_const(instance), 3)); } TEST_F(MetaData, ArrayStatic) { using namespace entt::literals; auto data = entt::resolve<array_t>().data("global"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int[3]>()); ASSERT_EQ(data.arg(0u), entt::resolve<int[3]>()); ASSERT_EQ(data.id(), "global"_hs); ASSERT_FALSE(data.is_const()); ASSERT_TRUE(data.is_static()); ASSERT_TRUE(data.type().is_array()); ASSERT_FALSE(data.get({})); } TEST_F(MetaData, Array) { using namespace entt::literals; auto data = entt::resolve<array_t>().data("local"_hs); array_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int[5]>()); ASSERT_EQ(data.arg(0u), entt::resolve<int[5]>()); ASSERT_EQ(data.id(), "local"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_TRUE(data.type().is_array()); ASSERT_FALSE(data.get(instance)); } TEST_F(MetaData, AsVoid) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("void"_hs); clazz_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(instance.i, 42); ASSERT_EQ(data.get(instance), entt::meta_any{std::in_place_type<void>}); } TEST_F(MetaData, AsRef) { using namespace entt::literals; clazz_t instance{}; auto data = entt::resolve<clazz_t>().data("i"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(instance.i, 0); data.get(instance).cast<int &>() = 3; ASSERT_EQ(instance.i, 3); } TEST_F(MetaData, AsConstRef) { using namespace entt::literals; clazz_t instance{}; auto data = entt::resolve<clazz_t>().data("ci"_hs); ASSERT_EQ(instance.i, 0); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.get(instance).cast<const int &>(), 0); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_EQ(instance.i, 0); } TEST_F(MetaDataDeathTest, AsConstRef) { using namespace entt::literals; clazz_t instance{}; auto data = entt::resolve<clazz_t>().data("ci"_hs); ASSERT_DEATH(data.get(instance).cast<int &>() = 3, ""); } TEST_F(MetaData, SetGetBaseData) { using namespace entt::literals; auto type = entt::resolve<derived_t>(); derived_t instance{}; ASSERT_TRUE(type.data("value"_hs)); ASSERT_EQ(instance.value, 3); ASSERT_TRUE(type.data("value"_hs).set(instance, 42)); ASSERT_EQ(type.data("value"_hs).get(instance).cast<int>(), 42); ASSERT_EQ(instance.value, 42); } TEST_F(MetaData, SetGetFromBase) { using namespace entt::literals; auto type = entt::resolve<derived_t>(); derived_t instance{}; ASSERT_TRUE(type.data("value_from_base"_hs)); ASSERT_EQ(instance.value, 3); ASSERT_TRUE(type.data("value_from_base"_hs).set(instance, 42)); ASSERT_EQ(type.data("value_from_base"_hs).get(instance).cast<int>(), 42); ASSERT_EQ(instance.value, 42); } TEST_F(MetaData, ReRegistration) { using namespace entt::literals; SetUp(); auto *node = entt::internal::meta_node<base_t>::resolve(); auto type = entt::resolve<base_t>(); ASSERT_NE(node->data, nullptr); ASSERT_EQ(node->data->next, nullptr); ASSERT_TRUE(type.data("value"_hs)); entt::meta<base_t>().data<&base_t::value>("field"_hs); ASSERT_NE(node->data, nullptr); ASSERT_EQ(node->data->next, nullptr); ASSERT_FALSE(type.data("value"_hs)); ASSERT_TRUE(type.data("field"_hs)); } TEST_F(MetaData, NameCollision) { using namespace entt::literals; ASSERT_NO_FATAL_FAILURE(entt::meta<clazz_t>().data<&clazz_t::j>("j"_hs)); ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs)); ASSERT_NO_FATAL_FAILURE(entt::meta<clazz_t>().data<&clazz_t::j>("cj"_hs)); ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs)); ASSERT_TRUE(entt::resolve<clazz_t>().data("cj"_hs)); } TEST_F(MetaDataDeathTest, NameCollision) { using namespace entt::literals; ASSERT_DEATH(entt::meta<clazz_t>().data<&clazz_t::j>("i"_hs), ""); }
29.200577
132
0.636687
matthew-nagy
8b0f1c8242522b4f5d8cb091c79f98b379c39b34
7,208
cc
C++
third_party/nucleus/io/reference_test.cc
fo40225/deepvariant
c2167e7c90f016905f309f118eb3897935ee7c5f
[ "BSD-3-Clause" ]
1
2019-05-20T11:55:45.000Z
2019-05-20T11:55:45.000Z
third_party/nucleus/io/reference_test.cc
fo40225/deepvariant
c2167e7c90f016905f309f118eb3897935ee7c5f
[ "BSD-3-Clause" ]
null
null
null
third_party/nucleus/io/reference_test.cc
fo40225/deepvariant
c2167e7c90f016905f309f118eb3897935ee7c5f
[ "BSD-3-Clause" ]
1
2017-12-06T17:30:18.000Z
2017-12-06T17:30:18.000Z
/* * Copyright 2018 Google LLC. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "third_party/nucleus/io/reference.h" #include <vector> #include <gmock/gmock-generated-matchers.h> #include <gmock/gmock-matchers.h> #include <gmock/gmock-more-matchers.h> #include "tensorflow/core/platform/test.h" #include "third_party/nucleus/io/reference_test.h" #include "third_party/nucleus/util/utils.h" #include "third_party/nucleus/vendor/status_matchers.h" #include "tensorflow/core/platform/logging.h" namespace nucleus { using ::testing::IsEmpty; using ::testing::Not; using ::testing::UnorderedElementsAre; TEST_P(GenomeReferenceTest, TestBasic) { EXPECT_THAT(Ref().ContigNames(), UnorderedElementsAre("chrM", "chr1", "chr2")); EXPECT_THAT(Ref().Contigs().size(), 3); const auto& chrm = *Ref().Contig("chrM").ValueOrDie(); EXPECT_EQ(100, chrm.n_bases()); EXPECT_EQ("chrM", chrm.name()); EXPECT_EQ(0, chrm.pos_in_fasta()); const auto& chr1 = *Ref().Contig("chr1").ValueOrDie(); EXPECT_EQ(76, chr1.n_bases()); EXPECT_EQ("chr1", chr1.name()); EXPECT_EQ(1, chr1.pos_in_fasta()); const auto& chr2 = *Ref().Contig("chr2").ValueOrDie(); EXPECT_EQ(121, chr2.n_bases()); EXPECT_EQ("chr2", chr2.name()); EXPECT_EQ(2, chr2.pos_in_fasta()); } TEST_P(GenomeReferenceTest, TestIsValidInterval) { // Checks that we can check that an unknown chromosome isn't valid. EXPECT_FALSE(Ref().IsValidInterval(MakeRange("unknown_chr", 0, 1))); for (const auto& chr : Ref().ContigNames()) { const auto n_bases = Ref().Contig(chr).ValueOrDie()->n_bases(); EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases))); for (int i = 0; i < n_bases; ++i) { EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, 0, i+1))); EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, i, i+1))); } EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, -10, 0))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, -1, 0))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 10, 9))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases + 1))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases + 100))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, n_bases, n_bases))); EXPECT_FALSE( Ref().IsValidInterval(MakeRange(chr, n_bases + 100, n_bases + 100))); } } TEST_P(GenomeReferenceTest, NotOKIfContigCalledWithBadName) { EXPECT_THAT(Ref().Contig("missing"), IsNotOKWithMessage("Unknown contig missing")); } TEST_P(GenomeReferenceTest, NotOKIfIntervalIsInvalid) { // Asking for bad chromosome values produces death. StatusOr<string> result = Ref().GetBases(MakeRange("missing", 0, 1)); EXPECT_THAT(result, IsNotOKWithCodeAndMessage( tensorflow::error::INVALID_ARGUMENT, "Invalid interval")); // Starting before 0 is detected. EXPECT_THAT(Ref().GetBases(MakeRange("chrM", -1, 1)), IsNotOKWithMessage("Invalid interval")); // chr1 exists, but this range's start is beyond the chr. EXPECT_THAT(Ref().GetBases(MakeRange("chr1", 1000, 1010)), IsNotOKWithMessage("Invalid interval")); // chr1 exists, but this range's end is beyond the chr. EXPECT_THAT(Ref().GetBases(MakeRange("chr1", 0, 1010)), IsNotOKWithMessage("Invalid interval")); } TEST_P(GenomeReferenceTest, TestHasContig) { EXPECT_TRUE(Ref().HasContig("chrM")); EXPECT_TRUE(Ref().HasContig("chr1")); EXPECT_TRUE(Ref().HasContig("chr2")); EXPECT_FALSE(Ref().HasContig("chr3")); EXPECT_FALSE(Ref().HasContig("chr")); EXPECT_FALSE(Ref().HasContig("")); } // Checks that GetBases work in all its forms for the given arguments. void CheckGetBases(const GenomeReference& ref, const string& chrom, const int64 start, const int64 end, const string& expected_bases) { StatusOr<string> query = ref.GetBases(MakeRange(chrom, start, end)); ASSERT_THAT(query, IsOK()); EXPECT_THAT(query.ValueOrDie(), expected_bases); } TEST_P(GenomeReferenceTest, TestReferenceBases) { CheckGetBases(Ref(), "chrM", 0, 100, "GATCACAGGTCTATCACCCTATTAACCACTCACGGGAGCTCTCCATGCATTTGGTATTTTC" "GTCTGGGGGGTGTGCACGCGATAGCATTGCGAGACGCTG"); CheckGetBases(Ref(), "chr1", 0, 76, "ACCACCATCCTCCGTGAAATCAATATCCCGCACAAGAGTGCTACTCTCCTAAATCCCTTCT" "CGTCCCCATGGATGA"); CheckGetBases(Ref(), "chr2", 0, 121, "CGCTNCGGGCCCATAACACTTGGGGGTAGCTAAAGTGAACTGTATCCGAC" "ATCTGGTTCCTACTTCAGGGCCATAAAGCCTAAATAGCCCACACGTTCCC" "CTTAAATAAGACATCACGATG"); } TEST_P(GenomeReferenceTest, TestGetBasesParts) { CheckGetBases(Ref(), "chrM", 0, 10, "GATCACAGGT"); CheckGetBases(Ref(), "chrM", 0, 9, "GATCACAGG"); CheckGetBases(Ref(), "chrM", 1, 9, "ATCACAGG"); CheckGetBases(Ref(), "chrM", 3, 7, "CACA"); CheckGetBases(Ref(), "chrM", 90, 100, "CGAGACGCTG"); CheckGetBases(Ref(), "chrM", 90, 99, "CGAGACGCT"); CheckGetBases(Ref(), "chrM", 91, 100, "GAGACGCTG"); CheckGetBases(Ref(), "chrM", 92, 100, "AGACGCTG"); CheckGetBases(Ref(), "chrM", 92, 99, "AGACGCT"); CheckGetBases(Ref(), "chrM", 92, 98, "AGACGC"); CheckGetBases(Ref(), "chrM", 0, 1, "G"); CheckGetBases(Ref(), "chrM", 1, 2, "A"); CheckGetBases(Ref(), "chrM", 2, 3, "T"); CheckGetBases(Ref(), "chrM", 3, 4, "C"); CheckGetBases(Ref(), "chrM", 4, 5, "A"); CheckGetBases(Ref(), "chrM", 5, 6, "C"); // crosses the boundary of the index when max_bin_size is 5 CheckGetBases(Ref(), "chrM", 4, 6, "AC"); // 0-bp interval requests should return the empty string. CheckGetBases(Ref(), "chrM", 0, 0, ""); CheckGetBases(Ref(), "chrM", 10, 10, ""); } } // namespace nucleus
38.752688
79
0.694784
fo40225
8b118d5923b101a336a65df34cb52e1d0087371b
36,090
hpp
C++
ReactNativeFrontend/ios/Pods/boost/boost/phoenix/support/preprocessed/vector_20.hpp
Harshitha91/Tmdb-react-native-node
e06e3f25a7ee6946ef07a1f524fdf62e48424293
[ "Apache-2.0" ]
12,278
2015-01-29T17:11:33.000Z
2022-03-31T21:12:00.000Z
ios/Pods/boost-for-react-native/boost/phoenix/support/preprocessed/vector_20.hpp
c7yrus/alyson-v3
5ad95a8f782f5f5d2fd543d44ca6a8b093395965
[ "Apache-2.0" ]
9,469
2015-01-30T05:33:07.000Z
2022-03-31T16:17:21.000Z
ios/Pods/boost-for-react-native/boost/phoenix/support/preprocessed/vector_20.hpp
c7yrus/alyson-v3
5ad95a8f782f5f5d2fd543d44ca6a8b093395965
[ "Apache-2.0" ]
892
2015-01-29T16:26:19.000Z
2022-03-20T07:44:30.000Z
/*============================================================================== Copyright (c) 2005-2010 Joel de Guzman Copyright (c) 2010 Thomas Heller Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ namespace boost { namespace phoenix { template <typename Dummy = void> struct vector0 { typedef mpl::int_<0> size_type; static const int size_value = 0; }; template <int> struct vector_chooser; template <> struct vector_chooser<0> { template <typename Dummy = void> struct apply { typedef vector0<> type; }; }; }} namespace boost { namespace phoenix { template <typename A0> struct vector1 { typedef A0 member_type0; A0 a0; typedef mpl::int_<1> size_type; static const int size_value = 1; typedef vector0<> args_type; args_type args() const { args_type r = {}; return r; } }; template <> struct vector_chooser<1> { template <typename A0> struct apply { typedef vector1<A0> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) , ( boost::phoenix::vector1 ) (A0) , (A0, a0) ) namespace boost { namespace phoenix { template <typename A0 , typename A1> struct vector2 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef mpl::int_<2> size_type; static const int size_value = 2; typedef vector1<A1> args_type; args_type args() const { args_type r = {a1}; return r; } }; template <> struct vector_chooser<2> { template <typename A0 , typename A1> struct apply { typedef vector2<A0 , A1> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) , ( boost::phoenix::vector2 ) (A0) (A1) , (A0, a0) (A1, a1) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2> struct vector3 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef mpl::int_<3> size_type; static const int size_value = 3; typedef vector2<A1 , A2> args_type; args_type args() const { args_type r = {a1 , a2}; return r; } }; template <> struct vector_chooser<3> { template <typename A0 , typename A1 , typename A2> struct apply { typedef vector3<A0 , A1 , A2> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) , ( boost::phoenix::vector3 ) (A0) (A1) (A2) , (A0, a0) (A1, a1) (A2, a2) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3> struct vector4 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef mpl::int_<4> size_type; static const int size_value = 4; typedef vector3<A1 , A2 , A3> args_type; args_type args() const { args_type r = {a1 , a2 , a3}; return r; } }; template <> struct vector_chooser<4> { template <typename A0 , typename A1 , typename A2 , typename A3> struct apply { typedef vector4<A0 , A1 , A2 , A3> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) , ( boost::phoenix::vector4 ) (A0) (A1) (A2) (A3) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4> struct vector5 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef mpl::int_<5> size_type; static const int size_value = 5; typedef vector4<A1 , A2 , A3 , A4> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4}; return r; } }; template <> struct vector_chooser<5> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4> struct apply { typedef vector5<A0 , A1 , A2 , A3 , A4> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) , ( boost::phoenix::vector5 ) (A0) (A1) (A2) (A3) (A4) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5> struct vector6 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef mpl::int_<6> size_type; static const int size_value = 6; typedef vector5<A1 , A2 , A3 , A4 , A5> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5}; return r; } }; template <> struct vector_chooser<6> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5> struct apply { typedef vector6<A0 , A1 , A2 , A3 , A4 , A5> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) , ( boost::phoenix::vector6 ) (A0) (A1) (A2) (A3) (A4) (A5) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6> struct vector7 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef mpl::int_<7> size_type; static const int size_value = 7; typedef vector6<A1 , A2 , A3 , A4 , A5 , A6> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6}; return r; } }; template <> struct vector_chooser<7> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6> struct apply { typedef vector7<A0 , A1 , A2 , A3 , A4 , A5 , A6> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) , ( boost::phoenix::vector7 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7> struct vector8 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef mpl::int_<8> size_type; static const int size_value = 8; typedef vector7<A1 , A2 , A3 , A4 , A5 , A6 , A7> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7}; return r; } }; template <> struct vector_chooser<8> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7> struct apply { typedef vector8<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) , ( boost::phoenix::vector8 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8> struct vector9 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef mpl::int_<9> size_type; static const int size_value = 9; typedef vector8<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8}; return r; } }; template <> struct vector_chooser<9> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8> struct apply { typedef vector9<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) , ( boost::phoenix::vector9 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9> struct vector10 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef mpl::int_<10> size_type; static const int size_value = 10; typedef vector9<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9}; return r; } }; template <> struct vector_chooser<10> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9> struct apply { typedef vector10<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) , ( boost::phoenix::vector10 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10> struct vector11 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef mpl::int_<11> size_type; static const int size_value = 11; typedef vector10<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10}; return r; } }; template <> struct vector_chooser<11> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10> struct apply { typedef vector11<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) , ( boost::phoenix::vector11 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11> struct vector12 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef mpl::int_<12> size_type; static const int size_value = 12; typedef vector11<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11}; return r; } }; template <> struct vector_chooser<12> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11> struct apply { typedef vector12<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) , ( boost::phoenix::vector12 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12> struct vector13 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef mpl::int_<13> size_type; static const int size_value = 13; typedef vector12<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12}; return r; } }; template <> struct vector_chooser<13> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12> struct apply { typedef vector13<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) , ( boost::phoenix::vector13 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13> struct vector14 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef mpl::int_<14> size_type; static const int size_value = 14; typedef vector13<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13}; return r; } }; template <> struct vector_chooser<14> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13> struct apply { typedef vector14<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) , ( boost::phoenix::vector14 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14> struct vector15 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef mpl::int_<15> size_type; static const int size_value = 15; typedef vector14<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14}; return r; } }; template <> struct vector_chooser<15> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14> struct apply { typedef vector15<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) , ( boost::phoenix::vector15 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15> struct vector16 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef mpl::int_<16> size_type; static const int size_value = 16; typedef vector15<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15}; return r; } }; template <> struct vector_chooser<16> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15> struct apply { typedef vector16<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) , ( boost::phoenix::vector16 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16> struct vector17 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef mpl::int_<17> size_type; static const int size_value = 17; typedef vector16<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16}; return r; } }; template <> struct vector_chooser<17> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16> struct apply { typedef vector17<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) , ( boost::phoenix::vector17 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17> struct vector18 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef mpl::int_<18> size_type; static const int size_value = 18; typedef vector17<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17}; return r; } }; template <> struct vector_chooser<18> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17> struct apply { typedef vector18<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) , ( boost::phoenix::vector18 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18> struct vector19 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef mpl::int_<19> size_type; static const int size_value = 19; typedef vector18<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18}; return r; } }; template <> struct vector_chooser<19> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18> struct apply { typedef vector19<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) , ( boost::phoenix::vector19 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19> struct vector20 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef A19 member_type19; A19 a19; typedef mpl::int_<20> size_type; static const int size_value = 20; typedef vector19<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18 , a19}; return r; } }; template <> struct vector_chooser<20> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19> struct apply { typedef vector20<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) , ( boost::phoenix::vector20 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) (A19, a19) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19 , typename A20> struct vector21 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef A19 member_type19; A19 a19; typedef A20 member_type20; A20 a20; typedef mpl::int_<21> size_type; static const int size_value = 21; typedef vector20<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19 , A20> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18 , a19 , a20}; return r; } }; template <> struct vector_chooser<21> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19 , typename A20> struct apply { typedef vector21<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19 , A20> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) (A20) , ( boost::phoenix::vector21 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) (A20) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) (A19, a19) (A20, a20) )
41.626298
723
0.577279
Harshitha91
8b1419c5048c61d29f73ce4bead4e87922947b39
2,330
hpp
C++
include/glm/gtx/scalar_multiplication.hpp
Tumurtogtokh/FLAMEGPU
466a2ddc34091b034bbfb968b2a3561c90b969f7
[ "MIT" ]
55
2018-11-22T05:09:21.000Z
2021-12-20T05:15:41.000Z
include/glm/gtx/scalar_multiplication.hpp
Tumurtogtokh/FLAMEGPU
466a2ddc34091b034bbfb968b2a3561c90b969f7
[ "MIT" ]
41
2015-08-25T07:50:55.000Z
2022-03-21T16:20:37.000Z
include/glm/gtx/scalar_multiplication.hpp
Tumurtogtokh/FLAMEGPU
466a2ddc34091b034bbfb968b2a3561c90b969f7
[ "MIT" ]
12
2016-01-15T23:20:19.000Z
2021-02-10T06:18:00.000Z
/// @ref gtx /// @file glm/gtx/scalar_multiplication.hpp /// @author Joshua Moerman /// /// Include <glm/gtx/scalar_multiplication.hpp> to use the features of this extension. /// /// Enables scalar multiplication for all types /// /// Since GLSL is very strict about types, the following (often used) combinations do not work: /// double * vec4 /// int * vec4 /// vec4 / int /// So we'll fix that! Of course "float * vec4" should remain the same (hence the enable_if magic) #pragma once #include "../detail/setup.hpp" #ifndef GLM_ENABLE_EXPERIMENTAL # error "GLM: GLM_GTX_scalar_multiplication is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." #endif #if !GLM_HAS_TEMPLATE_ALIASES && !(GLM_COMPILER & GLM_COMPILER_GCC) # error "GLM_GTX_scalar_multiplication requires C++11 support or alias templates and if not support for GCC" #endif #include "../vec2.hpp" #include "../vec3.hpp" #include "../vec4.hpp" #include "../mat2x2.hpp" #include <type_traits> namespace glm { template<typename T, typename Vec> using return_type_scalar_multiplication = typename std::enable_if< !std::is_same<T, float>::value // T may not be a float && std::is_arithmetic<T>::value, Vec // But it may be an int or double (no vec3 or mat3, ...) >::type; #define GLM_IMPLEMENT_SCAL_MULT(Vec) \ template<typename T> \ return_type_scalar_multiplication<T, Vec> \ operator*(T const& s, Vec rh){ \ return rh *= static_cast<float>(s); \ } \ \ template<typename T> \ return_type_scalar_multiplication<T, Vec> \ operator*(Vec lh, T const& s){ \ return lh *= static_cast<float>(s); \ } \ \ template<typename T> \ return_type_scalar_multiplication<T, Vec> \ operator/(Vec lh, T const& s){ \ return lh *= 1.0f / s; \ } GLM_IMPLEMENT_SCAL_MULT(vec2) GLM_IMPLEMENT_SCAL_MULT(vec3) GLM_IMPLEMENT_SCAL_MULT(vec4) GLM_IMPLEMENT_SCAL_MULT(mat2) GLM_IMPLEMENT_SCAL_MULT(mat2x3) GLM_IMPLEMENT_SCAL_MULT(mat2x4) GLM_IMPLEMENT_SCAL_MULT(mat3x2) GLM_IMPLEMENT_SCAL_MULT(mat3) GLM_IMPLEMENT_SCAL_MULT(mat3x4) GLM_IMPLEMENT_SCAL_MULT(mat4x2) GLM_IMPLEMENT_SCAL_MULT(mat4x3) GLM_IMPLEMENT_SCAL_MULT(mat4) #undef GLM_IMPLEMENT_SCAL_MULT } // namespace glm
30.657895
191
0.71588
Tumurtogtokh
8b1435f20761c2c9c1d907efb4f2e50052a91d47
2,602
hpp
C++
inference-engine/thirdparty/mkl-dnn/src/cpu/gemm_convolution_utils.hpp
tdp2110/dldt
87f321c5365ed813e849ea0ed987354ef2c39743
[ "Apache-2.0" ]
null
null
null
inference-engine/thirdparty/mkl-dnn/src/cpu/gemm_convolution_utils.hpp
tdp2110/dldt
87f321c5365ed813e849ea0ed987354ef2c39743
[ "Apache-2.0" ]
null
null
null
inference-engine/thirdparty/mkl-dnn/src/cpu/gemm_convolution_utils.hpp
tdp2110/dldt
87f321c5365ed813e849ea0ed987354ef2c39743
[ "Apache-2.0" ]
null
null
null
/******************************************************************************* * Copyright 2016-2019 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #ifndef CPU_JIT_GEMM_CONVOLUTION_UTILS_HPP #define CPU_JIT_GEMM_CONVOLUTION_UTILS_HPP #include "c_types_map.hpp" #include "memory_tracking.hpp" #include "mkldnn_thread.hpp" #include "cpu_convolution_pd.hpp" #include "cpu_engine.hpp" #include "jit_primitive_conf.hpp" namespace mkldnn { namespace impl { namespace cpu { namespace jit_gemm_convolution_utils { template <typename data_type_t> void im2col_3d(const jit_gemm_conv_conf_t &jcp, const data_type_t *im, data_type_t *col, int od); template <typename data_type_t> void im2col(const jit_gemm_conv_conf_t &jcp, const data_type_t *__restrict im, data_type_t *__restrict col, int hs, int hb, int ws, int wb); template <typename T> void im2col_u8(const jit_gemm_conv_conf_t &jcp, const T *__restrict im, T* __restrict imtr, uint8_t *__restrict col, int hs, int hb, int ws, int wb); template <typename T> void im2col_u8_3d(const jit_gemm_conv_conf_t &jcp, const T *__restrict im, uint8_t *__restrict col, int od); void col2im_s32(const jit_gemm_conv_conf_t &jcp, const int32_t *__restrict col, int32_t *__restrict im); void col2im_3d(const jit_gemm_conv_conf_t &jcp, const float *col, float *im, int od); void col2im(const jit_gemm_conv_conf_t &jcp, const float *col, float *im); status_t init_conf(jit_gemm_conv_conf_t &jcp, memory_tracking::registrar_t &scratchpad, const convolution_desc_t &cd, const memory_desc_wrapper &src_d, const memory_desc_wrapper &weights_d, const memory_desc_wrapper &dst_d, int max_threads); void bwd_weights_balance(int ithr, int nthr, int ngroups, int mb, int &ithr_g, int &nthr_g, int &ithr_mb, int &nthr_mb); void bwd_weights_reduction_par(int ithr, int nthr, const jit_gemm_conv_conf_t &jcp, const float *weights_reduce_ws, float *weights); } } } } #endif
35.162162
80
0.709839
tdp2110
8b14cdfd6ba1b04fda690b1fc6e400128f3edbd1
7,610
cc
C++
base/helpers.cc
TeamNuclear/external_chromium_org_third_party_webrtc
5bd5c72d7c01872fea80698dac196ff9a01dfcba
[ "DOC", "BSD-3-Clause" ]
1
2019-02-22T05:37:57.000Z
2019-02-22T05:37:57.000Z
base/helpers.cc
TeamNuclear/external_chromium_org_third_party_webrtc
5bd5c72d7c01872fea80698dac196ff9a01dfcba
[ "DOC", "BSD-3-Clause" ]
null
null
null
base/helpers.cc
TeamNuclear/external_chromium_org_third_party_webrtc
5bd5c72d7c01872fea80698dac196ff9a01dfcba
[ "DOC", "BSD-3-Clause" ]
2
2016-04-27T21:12:18.000Z
2016-12-25T05:26:28.000Z
/* * Copyright 2004 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "webrtc/base/helpers.h" #include <limits> #if defined(FEATURE_ENABLE_SSL) #include "webrtc/base/sslconfig.h" #if defined(SSL_USE_OPENSSL) #include <openssl/rand.h> #elif defined(SSL_USE_NSS_RNG) #include "pk11func.h" #else #if defined(WEBRTC_WIN) #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <ntsecapi.h> #endif // WEBRTC_WIN #endif // else #endif // FEATURE_ENABLED_SSL #include "webrtc/base/base64.h" #include "webrtc/base/basictypes.h" #include "webrtc/base/logging.h" #include "webrtc/base/scoped_ptr.h" #include "webrtc/base/timeutils.h" // Protect against max macro inclusion. #undef max namespace rtc { // Base class for RNG implementations. class RandomGenerator { public: virtual ~RandomGenerator() {} virtual bool Init(const void* seed, size_t len) = 0; virtual bool Generate(void* buf, size_t len) = 0; }; #if defined(SSL_USE_OPENSSL) // The OpenSSL RNG. Need to make sure it doesn't run out of entropy. class SecureRandomGenerator : public RandomGenerator { public: SecureRandomGenerator() : inited_(false) { } ~SecureRandomGenerator() { } virtual bool Init(const void* seed, size_t len) { // By default, seed from the system state. if (!inited_) { if (RAND_poll() <= 0) { return false; } inited_ = true; } // Allow app data to be mixed in, if provided. if (seed) { RAND_seed(seed, len); } return true; } virtual bool Generate(void* buf, size_t len) { if (!inited_ && !Init(NULL, 0)) { return false; } return (RAND_bytes(reinterpret_cast<unsigned char*>(buf), len) > 0); } private: bool inited_; }; #elif defined(SSL_USE_NSS_RNG) // The NSS RNG. class SecureRandomGenerator : public RandomGenerator { public: SecureRandomGenerator() {} ~SecureRandomGenerator() {} virtual bool Init(const void* seed, size_t len) { return true; } virtual bool Generate(void* buf, size_t len) { return (PK11_GenerateRandom(reinterpret_cast<unsigned char*>(buf), static_cast<int>(len)) == SECSuccess); } }; #else #if defined(WEBRTC_WIN) class SecureRandomGenerator : public RandomGenerator { public: SecureRandomGenerator() : advapi32_(NULL), rtl_gen_random_(NULL) {} ~SecureRandomGenerator() { FreeLibrary(advapi32_); } virtual bool Init(const void* seed, size_t seed_len) { // We don't do any additional seeding on Win32, we just use the CryptoAPI // RNG (which is exposed as a hidden function off of ADVAPI32 so that we // don't need to drag in all of CryptoAPI) if (rtl_gen_random_) { return true; } advapi32_ = LoadLibrary(L"advapi32.dll"); if (!advapi32_) { return false; } rtl_gen_random_ = reinterpret_cast<RtlGenRandomProc>( GetProcAddress(advapi32_, "SystemFunction036")); if (!rtl_gen_random_) { FreeLibrary(advapi32_); return false; } return true; } virtual bool Generate(void* buf, size_t len) { if (!rtl_gen_random_ && !Init(NULL, 0)) { return false; } return (rtl_gen_random_(buf, static_cast<int>(len)) != FALSE); } private: typedef BOOL (WINAPI *RtlGenRandomProc)(PVOID, ULONG); HINSTANCE advapi32_; RtlGenRandomProc rtl_gen_random_; }; #elif !defined(FEATURE_ENABLE_SSL) // No SSL implementation -- use rand() class SecureRandomGenerator : public RandomGenerator { public: virtual bool Init(const void* seed, size_t len) { if (len >= 4) { srand(*reinterpret_cast<const int*>(seed)); } else { srand(*reinterpret_cast<const char*>(seed)); } return true; } virtual bool Generate(void* buf, size_t len) { char* bytes = reinterpret_cast<char*>(buf); for (size_t i = 0; i < len; ++i) { bytes[i] = static_cast<char>(rand()); } return true; } }; #else #error No SSL implementation has been selected! #endif // WEBRTC_WIN #endif // A test random generator, for predictable output. class TestRandomGenerator : public RandomGenerator { public: TestRandomGenerator() : seed_(7) { } ~TestRandomGenerator() { } virtual bool Init(const void* seed, size_t len) { return true; } virtual bool Generate(void* buf, size_t len) { for (size_t i = 0; i < len; ++i) { static_cast<uint8*>(buf)[i] = static_cast<uint8>(GetRandom()); } return true; } private: int GetRandom() { return ((seed_ = seed_ * 214013L + 2531011L) >> 16) & 0x7fff; } int seed_; }; // TODO: Use Base64::Base64Table instead. static const char BASE64[64] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' }; namespace { // This round about way of creating a global RNG is to safe-guard against // indeterminant static initialization order. scoped_ptr<RandomGenerator>& GetGlobalRng() { LIBJINGLE_DEFINE_STATIC_LOCAL(scoped_ptr<RandomGenerator>, global_rng, (new SecureRandomGenerator())); return global_rng; } RandomGenerator& Rng() { return *GetGlobalRng(); } } // namespace void SetRandomTestMode(bool test) { if (!test) { GetGlobalRng().reset(new SecureRandomGenerator()); } else { GetGlobalRng().reset(new TestRandomGenerator()); } } bool InitRandom(int seed) { return InitRandom(reinterpret_cast<const char*>(&seed), sizeof(seed)); } bool InitRandom(const char* seed, size_t len) { if (!Rng().Init(seed, len)) { LOG(LS_ERROR) << "Failed to init random generator!"; return false; } return true; } std::string CreateRandomString(size_t len) { std::string str; CreateRandomString(len, &str); return str; } bool CreateRandomString(size_t len, const char* table, int table_size, std::string* str) { str->clear(); scoped_ptr<uint8[]> bytes(new uint8[len]); if (!Rng().Generate(bytes.get(), len)) { LOG(LS_ERROR) << "Failed to generate random string!"; return false; } str->reserve(len); for (size_t i = 0; i < len; ++i) { str->push_back(table[bytes[i] % table_size]); } return true; } bool CreateRandomString(size_t len, std::string* str) { return CreateRandomString(len, BASE64, 64, str); } bool CreateRandomString(size_t len, const std::string& table, std::string* str) { return CreateRandomString(len, table.c_str(), static_cast<int>(table.size()), str); } uint32 CreateRandomId() { uint32 id; if (!Rng().Generate(&id, sizeof(id))) { LOG(LS_ERROR) << "Failed to generate random id!"; } return id; } uint64 CreateRandomId64() { return static_cast<uint64>(CreateRandomId()) << 32 | CreateRandomId(); } uint32 CreateRandomNonZeroId() { uint32 id; do { id = CreateRandomId(); } while (id == 0); return id; } double CreateRandomDouble() { return CreateRandomId() / (std::numeric_limits<uint32>::max() + std::numeric_limits<double>::epsilon()); } } // namespace rtc
25.622896
77
0.64297
TeamNuclear
8b14fb85d5de2f5499b548bd5fba6b0883c8e558
2,897
cc
C++
chromeos/memory/pagemap.cc
mghgroup/Glide-Browser
6a4c1eaa6632ec55014fee87781c6bbbb92a2af5
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
chromeos/memory/pagemap.cc
mghgroup/Glide-Browser
6a4c1eaa6632ec55014fee87781c6bbbb92a2af5
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
chromeos/memory/pagemap.cc
mghgroup/Glide-Browser
6a4c1eaa6632ec55014fee87781c6bbbb92a2af5
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
2
2021-01-05T23:43:46.000Z
2021-01-07T23:36:34.000Z
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/memory/pagemap.h" #include <fcntl.h> #include <sys/stat.h> #include <sys/types.h> #include <string> #include "base/memory/aligned_memory.h" #include "base/posix/eintr_wrapper.h" #include "base/process/process_metrics.h" #include "base/strings/stringprintf.h" #include "base/threading/scoped_blocking_call.h" namespace chromeos { namespace memory { namespace { constexpr char kPagemapFileFormat[] = "/proc/%d/pagemap"; } Pagemap::~Pagemap() = default; Pagemap::Pagemap(pid_t pid) { if (pid) { std::string pagemap_file = base::StringPrintf(kPagemapFileFormat, pid); fd_.reset(HANDLE_EINTR(open(pagemap_file.c_str(), O_RDONLY))); } } bool Pagemap::IsValid() const { return fd_.is_valid(); } bool Pagemap::GetEntries(uint64_t address, uint64_t length, std::vector<PagemapEntry>* entries) const { base::ScopedBlockingCall scoped_blocking_call(FROM_HERE, base::BlockingType::WILL_BLOCK); DCHECK(IsValid()); DCHECK(entries); const size_t kPageSize = base::GetPageSize(); DCHECK(base::IsPageAligned(address)); DCHECK(base::IsPageAligned(length)); // The size of each pagemap entry to calculate our offset in the file. uint64_t num_pages = length / kPageSize; if (entries->size() != num_pages) { // Shrink or grow entries to the correct length if it was not already. entries->resize(num_pages); entries->shrink_to_fit(); // If we made it smaller shrink capacity. } uint64_t pagemap_offset = (address / kPageSize) * sizeof(PagemapEntry); uint64_t pagemap_len = num_pages * sizeof(PagemapEntry); memset(entries->data(), 0, pagemap_len); // The caller was expected to provide a buffer large enough for the number of // pages in the region. uint64_t total_read = 0; while (total_read < pagemap_len) { ssize_t bytes_read = HANDLE_EINTR( pread(fd_.get(), reinterpret_cast<char*>(entries->data()) + total_read, pagemap_len - total_read, pagemap_offset + total_read)); if (bytes_read <= 0) { return false; } total_read += bytes_read; } return true; } bool Pagemap::GetNumberOfPagesInCore(uint64_t address, uint64_t length, uint64_t* pages_in_core) const { DCHECK(pages_in_core); *pages_in_core = 0; std::vector<Pagemap::PagemapEntry> entries(length / base::GetPageSize()); if (!GetEntries(address, length, &entries)) { return false; } for (const Pagemap::PagemapEntry& entry : entries) { if (entry.page_present) (*pages_in_core)++; } return true; } } // namespace memory } // namespace chromeos
28.401961
80
0.669313
mghgroup
8b16464ad45baf650cbef0b4316b226b640b5558
741,483
cpp
C++
makepad/ChakraCore/lib/Backend/GlobOpt.cpp
makepaddev/makepad
25d2f18c8a7c190fd1b199762817b6514118e045
[ "MIT" ]
null
null
null
makepad/ChakraCore/lib/Backend/GlobOpt.cpp
makepaddev/makepad
25d2f18c8a7c190fd1b199762817b6514118e045
[ "MIT" ]
null
null
null
makepad/ChakraCore/lib/Backend/GlobOpt.cpp
makepaddev/makepad
25d2f18c8a7c190fd1b199762817b6514118e045
[ "MIT" ]
null
null
null
//------------------------------------------------------------------------------------------------------- // Copyright (C) Microsoft Corporation and contributors. All rights reserved. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- #include "Backend.h" #if ENABLE_DEBUG_CONFIG_OPTIONS #define TESTTRACE_PHASE_INSTR(phase, instr, ...) \ if(PHASE_TESTTRACE(phase, this->func)) \ { \ char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; \ Output::Print( \ _u("Testtrace: %s function %s (%s): "), \ Js::PhaseNames[phase], \ instr->m_func->GetJITFunctionBody()->GetDisplayName(), \ instr->m_func->GetDebugNumberSet(debugStringBuffer)); \ Output::Print(__VA_ARGS__); \ Output::Flush(); \ } #else // ENABLE_DEBUG_CONFIG_OPTIONS #define TESTTRACE_PHASE_INSTR(phase, instr, ...) #endif // ENABLE_DEBUG_CONFIG_OPTIONS #if DBG_DUMP #define DO_MEMOP_TRACE() (PHASE_TRACE(Js::MemOpPhase, this->func) ||\ PHASE_TRACE(Js::MemSetPhase, this->func) ||\ PHASE_TRACE(Js::MemCopyPhase, this->func)) #define DO_MEMOP_TRACE_PHASE(phase) (PHASE_TRACE(Js::MemOpPhase, this->func) || PHASE_TRACE(Js::phase ## Phase, this->func)) #define OUTPUT_MEMOP_TRACE(loop, instr, ...) {\ char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];\ Output::Print(15, _u("Function: %s%s, Loop: %u: "), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), loop->GetLoopNumber());\ Output::Print(__VA_ARGS__);\ IR::Instr* __instr__ = instr;\ if(__instr__) __instr__->DumpByteCodeOffset();\ if(__instr__) Output::Print(_u(" (%s)"), Js::OpCodeUtil::GetOpCodeName(__instr__->m_opcode));\ Output::Print(_u("\n"));\ Output::Flush(); \ } #define TRACE_MEMOP(loop, instr, ...) \ if (DO_MEMOP_TRACE()) {\ Output::Print(_u("TRACE MemOp:"));\ OUTPUT_MEMOP_TRACE(loop, instr, __VA_ARGS__)\ } #define TRACE_MEMOP_VERBOSE(loop, instr, ...) if(CONFIG_FLAG(Verbose)) {TRACE_MEMOP(loop, instr, __VA_ARGS__)} #define TRACE_MEMOP_PHASE(phase, loop, instr, ...) \ if (DO_MEMOP_TRACE_PHASE(phase))\ {\ Output::Print(_u("TRACE ") _u(#phase) _u(":"));\ OUTPUT_MEMOP_TRACE(loop, instr, __VA_ARGS__)\ } #define TRACE_MEMOP_PHASE_VERBOSE(phase, loop, instr, ...) if(CONFIG_FLAG(Verbose)) {TRACE_MEMOP_PHASE(phase, loop, instr, __VA_ARGS__)} #else #define DO_MEMOP_TRACE() #define DO_MEMOP_TRACE_PHASE(phase) #define OUTPUT_MEMOP_TRACE(loop, instr, ...) #define TRACE_MEMOP(loop, instr, ...) #define TRACE_MEMOP_VERBOSE(loop, instr, ...) #define TRACE_MEMOP_PHASE(phase, loop, instr, ...) #define TRACE_MEMOP_PHASE_VERBOSE(phase, loop, instr, ...) #endif class AutoRestoreVal { private: Value *const originalValue; Value *const tempValue; Value * *const valueRef; public: AutoRestoreVal(Value *const originalValue, Value * *const tempValueRef) : originalValue(originalValue), tempValue(*tempValueRef), valueRef(tempValueRef) { } ~AutoRestoreVal() { if(*valueRef == tempValue) { *valueRef = originalValue; } } PREVENT_COPY(AutoRestoreVal); }; GlobOpt::GlobOpt(Func * func) : func(func), intConstantToStackSymMap(nullptr), intConstantToValueMap(nullptr), currentValue(FirstNewValueNumber), prePassLoop(nullptr), alloc(nullptr), isCallHelper(false), inInlinedBuiltIn(false), rootLoopPrePass(nullptr), noImplicitCallUsesToInsert(nullptr), valuesCreatedForClone(nullptr), valuesCreatedForMerge(nullptr), instrCountSinceLastCleanUp(0), isRecursiveCallOnLandingPad(false), updateInductionVariableValueNumber(false), isPerformingLoopBackEdgeCompensation(false), currentRegion(nullptr), changedSymsAfterIncBailoutCandidate(nullptr), doTypeSpec( !IsTypeSpecPhaseOff(func)), doAggressiveIntTypeSpec( doTypeSpec && DoAggressiveIntTypeSpec(func)), doAggressiveMulIntTypeSpec( doTypeSpec && !PHASE_OFF(Js::AggressiveMulIntTypeSpecPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsAggressiveMulIntTypeSpecDisabled(func->IsLoopBody()))), doDivIntTypeSpec( doAggressiveIntTypeSpec && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsDivIntTypeSpecDisabled(func->IsLoopBody()))), doLossyIntTypeSpec( doTypeSpec && DoLossyIntTypeSpec(func)), doFloatTypeSpec( doTypeSpec && DoFloatTypeSpec(func)), doArrayCheckHoist( DoArrayCheckHoist(func)), doArrayMissingValueCheckHoist( doArrayCheckHoist && DoArrayMissingValueCheckHoist(func)), doArraySegmentHoist( doArrayCheckHoist && DoArraySegmentHoist(ValueType::GetObject(ObjectType::Int32Array), func)), doJsArraySegmentHoist( doArraySegmentHoist && DoArraySegmentHoist(ValueType::GetObject(ObjectType::Array), func)), doArrayLengthHoist( doArrayCheckHoist && DoArrayLengthHoist(func)), doEliminateArrayAccessHelperCall( doArrayCheckHoist && !PHASE_OFF(Js::EliminateArrayAccessHelperCallPhase, func)), doTrackRelativeIntBounds( doAggressiveIntTypeSpec && DoPathDependentValues() && !PHASE_OFF(Js::Phase::TrackRelativeIntBoundsPhase, func)), doBoundCheckElimination( doTrackRelativeIntBounds && !PHASE_OFF(Js::Phase::BoundCheckEliminationPhase, func)), doBoundCheckHoist( doEliminateArrayAccessHelperCall && doBoundCheckElimination && DoConstFold() && !PHASE_OFF(Js::Phase::BoundCheckHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsBoundCheckHoistDisabled(func->IsLoopBody()))), doLoopCountBasedBoundCheckHoist( doBoundCheckHoist && !PHASE_OFF(Js::Phase::LoopCountBasedBoundCheckHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsLoopCountBasedBoundCheckHoistDisabled(func->IsLoopBody()))), doPowIntIntTypeSpec( doAggressiveIntTypeSpec && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsPowIntIntTypeSpecDisabled())), doTagChecks( (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsTagCheckDisabled())), isAsmJSFunc(func->GetJITFunctionBody()->IsAsmJsMode()) { } void GlobOpt::BackwardPass(Js::Phase tag) { BEGIN_CODEGEN_PHASE(this->func, tag); ::BackwardPass backwardPass(this->func, this, tag); backwardPass.Optimize(); END_CODEGEN_PHASE(this->func, tag); } void GlobOpt::Optimize() { this->objectTypeSyms = nullptr; this->func->argInsCount = this->func->GetInParamsCount() - 1; //Don't include "this" pointer in the count. if (!func->DoGlobOpt()) { this->lengthEquivBv = nullptr; this->argumentsEquivBv = nullptr; this->callerEquivBv = nullptr; // Still need to run the dead store phase to calculate the live reg on back edge this->BackwardPass(Js::DeadStorePhase); CannotAllocateArgumentsObjectOnStack(); return; } { this->lengthEquivBv = this->func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::length, nullptr); // Used to kill live "length" properties this->argumentsEquivBv = func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::arguments, nullptr); // Used to kill live "arguments" properties this->callerEquivBv = func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::caller, nullptr); // Used to kill live "caller" properties // The backward phase needs the glob opt's allocator to allocate the propertyTypeValueMap // in GlobOpt::EnsurePropertyTypeValue and ranges of instructions where int overflow may be ignored. // (see BackwardPass::TrackIntUsage) PageAllocator * pageAllocator = this->func->m_alloc->GetPageAllocator(); NoRecoverMemoryJitArenaAllocator localAlloc(_u("BE-GlobOpt"), pageAllocator, Js::Throw::OutOfMemory); this->alloc = &localAlloc; NoRecoverMemoryJitArenaAllocator localTempAlloc(_u("BE-GlobOpt temp"), pageAllocator, Js::Throw::OutOfMemory); this->tempAlloc = &localTempAlloc; // The forward passes use info (upwardExposedUses) from the backward pass. This info // isn't available for some of the symbols created during the backward pass, or the forward pass. // Keep track of the last symbol for which we're guaranteed to have data. this->maxInitialSymID = this->func->m_symTable->GetMaxSymID(); this->BackwardPass(Js::BackwardPhase); this->ForwardPass(); } this->BackwardPass(Js::DeadStorePhase); this->TailDupPass(); } bool GlobOpt::ShouldExpectConventionalArrayIndexValue(IR::IndirOpnd *const indirOpnd) { Assert(indirOpnd); if(!indirOpnd->GetIndexOpnd()) { return indirOpnd->GetOffset() >= 0; } IR::RegOpnd *const indexOpnd = indirOpnd->GetIndexOpnd(); if(indexOpnd->m_sym->m_isNotInt) { // Typically, single-def or any sym-specific information for type-specialized syms should not be used because all of // their defs will not have been accounted for until after the forward pass. But m_isNotInt is only ever changed from // false to true, so it's okay in this case. return false; } StackSym *indexVarSym = indexOpnd->m_sym; if(indexVarSym->IsTypeSpec()) { indexVarSym = indexVarSym->GetVarEquivSym(nullptr); Assert(indexVarSym); } else if(!IsLoopPrePass()) { // Don't use single-def info or const flags for type-specialized syms, as all of their defs will not have been accounted // for until after the forward pass. Also, don't use the const flags in a loop prepass because the const flags may not // be up-to-date. StackSym *const indexSym = indexOpnd->m_sym; if(indexSym->IsIntConst()) { return indexSym->GetIntConstValue() >= 0; } } Value *const indexValue = CurrentBlockData()->FindValue(indexVarSym); if(!indexValue) { // Treat it as Uninitialized, assume it's going to be valid return true; } ValueInfo *const indexValueInfo = indexValue->GetValueInfo(); int32 indexConstantValue; if(indexValueInfo->TryGetIntConstantValue(&indexConstantValue)) { return indexConstantValue >= 0; } if(indexValueInfo->IsUninitialized()) { // Assume it's going to be valid return true; } return indexValueInfo->HasBeenNumber() && !indexValueInfo->HasBeenFloat(); } // // Either result is float or 1/x or cst1/cst2 where cst1%cst2 != 0 // ValueType GlobOpt::GetDivValueType(IR::Instr* instr, Value* src1Val, Value* src2Val, bool specialize) { ValueInfo *src1ValueInfo = (src1Val ? src1Val->GetValueInfo() : nullptr); ValueInfo *src2ValueInfo = (src2Val ? src2Val->GetValueInfo() : nullptr); if (instr->IsProfiledInstr() && instr->m_func->HasProfileInfo()) { ValueType resultType = instr->m_func->GetReadOnlyProfileInfo()->GetDivProfileInfo(static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId)); if (resultType.IsLikelyInt()) { if (specialize && src1ValueInfo && src2ValueInfo && ((src1ValueInfo->IsInt() && src2ValueInfo->IsInt()) || (this->DoDivIntTypeSpec() && src1ValueInfo->IsLikelyInt() && src2ValueInfo->IsLikelyInt()))) { return ValueType::GetInt(true); } return resultType; } // Consider: Checking that the sources are numbers. if (resultType.IsLikelyFloat()) { return ValueType::Float; } return resultType; } int32 src1IntConstantValue; if(!src1ValueInfo || !src1ValueInfo->TryGetIntConstantValue(&src1IntConstantValue)) { return ValueType::Number; } if (src1IntConstantValue == 1) { return ValueType::Float; } int32 src2IntConstantValue; if(!src2Val || !src2ValueInfo->TryGetIntConstantValue(&src2IntConstantValue)) { return ValueType::Number; } if (src2IntConstantValue // Avoid divide by zero && !(src1IntConstantValue == 0x80000000 && src2IntConstantValue == -1) // Avoid integer overflow && (src1IntConstantValue % src2IntConstantValue) != 0) { return ValueType::Float; } return ValueType::Number; } void GlobOpt::ForwardPass() { BEGIN_CODEGEN_PHASE(this->func, Js::ForwardPhase); #if DBG_DUMP if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::GlobOptPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId())) { this->func->DumpHeader(); } if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::GlobOptPhase)) { this->TraceSettings(); } #endif // GetConstantCount() gives us the right size to pick for the SparseArray, but we may need more if we've inlined // functions with constants. There will be a gap in the symbol numbering between the main constants and // the inlined ones, so we'll most likely need a new array chunk. Make the min size of the array chunks be 64 // in case we have a main function with very few constants and a bunch of constants from inlined functions. this->byteCodeConstantValueArray = SparseArray<Value>::New(this->alloc, max(this->func->GetJITFunctionBody()->GetConstCount(), 64U)); this->byteCodeConstantValueNumbersBv = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->tempBv = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->prePassCopyPropSym = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->slotSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->byteCodeUses = nullptr; this->propertySymUse = nullptr; // changedSymsAfterIncBailoutCandidate helps track building incremental bailout in ForwardPass this->changedSymsAfterIncBailoutCandidate = JitAnew(alloc, BVSparse<JitArenaAllocator>, alloc); #if DBG this->byteCodeUsesBeforeOpt = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase) && this->DoFunctionFieldCopyProp()) { Output::Print(_u("TRACE: CanDoFieldCopyProp Func: ")); this->func->DumpFullFunctionName(); Output::Print(_u("\n")); } #endif OpndList localNoImplicitCallUsesToInsert(alloc); this->noImplicitCallUsesToInsert = &localNoImplicitCallUsesToInsert; IntConstantToStackSymMap localIntConstantToStackSymMap(alloc); this->intConstantToStackSymMap = &localIntConstantToStackSymMap; IntConstantToValueMap localIntConstantToValueMap(alloc); this->intConstantToValueMap = &localIntConstantToValueMap; Int64ConstantToValueMap localInt64ConstantToValueMap(alloc); this->int64ConstantToValueMap = &localInt64ConstantToValueMap; AddrConstantToValueMap localAddrConstantToValueMap(alloc); this->addrConstantToValueMap = &localAddrConstantToValueMap; StringConstantToValueMap localStringConstantToValueMap(alloc); this->stringConstantToValueMap = &localStringConstantToValueMap; SymIdToInstrMap localPrePassInstrMap(alloc); this->prePassInstrMap = &localPrePassInstrMap; ValueSetByValueNumber localValuesCreatedForClone(alloc, 64); this->valuesCreatedForClone = &localValuesCreatedForClone; ValueNumberPairToValueMap localValuesCreatedForMerge(alloc, 64); this->valuesCreatedForMerge = &localValuesCreatedForMerge; #if DBG BVSparse<JitArenaAllocator> localFinishedStackLiteralInitFld(alloc); this->finishedStackLiteralInitFld = &localFinishedStackLiteralInitFld; #endif FOREACH_BLOCK_IN_FUNC_EDITING(block, this->func) { this->OptBlock(block); } NEXT_BLOCK_IN_FUNC_EDITING; if (!PHASE_OFF(Js::MemOpPhase, this->func)) { ProcessMemOp(); } this->noImplicitCallUsesToInsert = nullptr; this->intConstantToStackSymMap = nullptr; this->intConstantToValueMap = nullptr; this->int64ConstantToValueMap = nullptr; this->addrConstantToValueMap = nullptr; this->stringConstantToValueMap = nullptr; #if DBG this->finishedStackLiteralInitFld = nullptr; uint freedCount = 0; uint spilledCount = 0; #endif FOREACH_BLOCK_IN_FUNC(block, this->func) { #if DBG if (block->GetDataUseCount() == 0) { freedCount++; } else { spilledCount++; } #endif block->SetDataUseCount(0); if (block->cloneStrCandidates) { JitAdelete(this->alloc, block->cloneStrCandidates); block->cloneStrCandidates = nullptr; } } NEXT_BLOCK_IN_FUNC; // Make sure we free most of them. Assert(freedCount >= spilledCount); // this->alloc will be freed right after return, no need to free it here this->changedSymsAfterIncBailoutCandidate = nullptr; END_CODEGEN_PHASE(this->func, Js::ForwardPhase); } void GlobOpt::OptBlock(BasicBlock *block) { if (this->func->m_fg->RemoveUnreachableBlock(block, this)) { GOPT_TRACE(_u("Removing unreachable block #%d\n"), block->GetBlockNum()); return; } Loop * loop = block->loop; if (loop && block->isLoopHeader) { if (loop != this->prePassLoop) { OptLoops(loop); if (!this->IsLoopPrePass() && DoFieldPRE(loop)) { // Note: !IsLoopPrePass means this was a root loop pre-pass. FieldPre() is called once per loop. this->FieldPRE(loop); // Re-optimize the landing pad BasicBlock *landingPad = loop->landingPad; this->isRecursiveCallOnLandingPad = true; this->OptBlock(landingPad); this->isRecursiveCallOnLandingPad = false; } } } this->currentBlock = block; PrepareLoopArrayCheckHoist(); block->MergePredBlocksValueMaps(this); this->intOverflowCurrentlyMattersInRange = true; this->intOverflowDoesNotMatterRange = this->currentBlock->intOverflowDoesNotMatterRange; if (loop && DoFieldHoisting(loop)) { if (block->isLoopHeader) { if (!this->IsLoopPrePass()) { this->PrepareFieldHoisting(loop); } else if (loop == this->rootLoopPrePass) { this->PreparePrepassFieldHoisting(loop); } } } else { Assert(!TrackHoistableFields() || !HasHoistableFields(CurrentBlockData())); if (!DoFieldCopyProp() && !DoFieldRefOpts()) { this->KillAllFields(CurrentBlockData()->liveFields); } } this->tempAlloc->Reset(); if(loop && block->isLoopHeader) { loop->firstValueNumberInLoop = this->currentValue; } GOPT_TRACE_BLOCK(block, true); FOREACH_INSTR_IN_BLOCK_EDITING(instr, instrNext, block) { GOPT_TRACE_INSTRTRACE(instr); BailOutInfo* oldBailOutInfo = nullptr; bool isCheckAuxBailoutNeeded = this->func->IsJitInDebugMode() && !this->IsLoopPrePass(); if (isCheckAuxBailoutNeeded && instr->HasAuxBailOut() && !instr->HasBailOutInfo()) { oldBailOutInfo = instr->GetBailOutInfo(); Assert(oldBailOutInfo); } bool isInstrRemoved = false; instrNext = this->OptInstr(instr, &isInstrRemoved); // If we still have instrs with only aux bail out, convert aux bail out back to regular bail out and fill it. // During OptInstr some instr can be moved out to a different block, in this case bailout info is going to be replaced // with e.g. loop bailout info which is filled as part of processing that block, thus we don't need to fill it here. if (isCheckAuxBailoutNeeded && !isInstrRemoved && instr->HasAuxBailOut() && !instr->HasBailOutInfo()) { if (instr->GetBailOutInfo() == oldBailOutInfo) { instr->PromoteAuxBailOut(); FillBailOutInfo(block, instr->GetBailOutInfo()); } else { AssertMsg(instr->GetBailOutInfo(), "With aux bailout, the bailout info should not be removed by OptInstr."); } } } NEXT_INSTR_IN_BLOCK_EDITING; GOPT_TRACE_BLOCK(block, false); if (block->loop) { if (IsLoopPrePass()) { if (DoBoundCheckHoist()) { DetectUnknownChangesToInductionVariables(&block->globOptData); } } else { isPerformingLoopBackEdgeCompensation = true; Assert(this->tempBv->IsEmpty()); BVSparse<JitArenaAllocator> tempBv2(this->tempAlloc); // On loop back-edges, we need to restore the state of the type specialized // symbols to that of the loop header. FOREACH_SUCCESSOR_BLOCK(succ, block) { if (succ->isLoopHeader && succ->loop->IsDescendentOrSelf(block->loop)) { BVSparse<JitArenaAllocator> *liveOnBackEdge = block->loop->regAlloc.liveOnBackEdgeSyms; this->tempBv->Minus(block->loop->varSymsOnEntry, block->globOptData.liveVarSyms); this->tempBv->And(liveOnBackEdge); this->ToVar(this->tempBv, block); // Lossy int in the loop header, and no int on the back-edge - need a lossy conversion to int this->tempBv->Minus(block->loop->lossyInt32SymsOnEntry, block->globOptData.liveInt32Syms); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block, true /* lossy */); // Lossless int in the loop header, and no lossless int on the back-edge - need a lossless conversion to int this->tempBv->Minus(block->loop->int32SymsOnEntry, block->loop->lossyInt32SymsOnEntry); tempBv2.Minus(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms); this->tempBv->Minus(&tempBv2); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block, false /* lossy */); this->tempBv->Minus(block->loop->float64SymsOnEntry, block->globOptData.liveFloat64Syms); this->tempBv->And(liveOnBackEdge); this->ToFloat64(this->tempBv, block); #ifdef ENABLE_SIMDJS // SIMD_JS // Compensate on backedge if sym is live on loop entry but not on backedge this->tempBv->Minus(block->loop->simd128F4SymsOnEntry, block->globOptData.liveSimd128F4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block, TySimd128F4, IR::BailOutSimd128F4Only); this->tempBv->Minus(block->loop->simd128I4SymsOnEntry, block->globOptData.liveSimd128I4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block, TySimd128I4, IR::BailOutSimd128I4Only); #endif // For ints and floats, go aggressive and type specialize in the landing pad any symbol which was specialized on // entry to the loop body (in the loop header), and is still specialized on this tail, but wasn't specialized in // the landing pad. // Lossy int in the loop header and no int in the landing pad - need a lossy conversion to int // (entry.lossyInt32 - landingPad.int32) this->tempBv->Minus(block->loop->lossyInt32SymsOnEntry, block->loop->landingPad->globOptData.liveInt32Syms); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block->loop->landingPad, true /* lossy */); // Lossless int in the loop header, and no lossless int in the landing pad - need a lossless conversion to int // ((entry.int32 - entry.lossyInt32) - (landingPad.int32 - landingPad.lossyInt32)) this->tempBv->Minus(block->loop->int32SymsOnEntry, block->loop->lossyInt32SymsOnEntry); tempBv2.Minus( block->loop->landingPad->globOptData.liveInt32Syms, block->loop->landingPad->globOptData.liveLossyInt32Syms); this->tempBv->Minus(&tempBv2); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block->loop->landingPad, false /* lossy */); // ((entry.float64 - landingPad.float64) & block.float64) this->tempBv->Minus(block->loop->float64SymsOnEntry, block->loop->landingPad->globOptData.liveFloat64Syms); this->tempBv->And(block->globOptData.liveFloat64Syms); this->tempBv->And(liveOnBackEdge); this->ToFloat64(this->tempBv, block->loop->landingPad); #ifdef ENABLE_SIMDJS // SIMD_JS // compensate on landingpad if live on loopEntry and Backedge. this->tempBv->Minus(block->loop->simd128F4SymsOnEntry, block->loop->landingPad->globOptData.liveSimd128F4Syms); this->tempBv->And(block->globOptData.liveSimd128F4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block->loop->landingPad, TySimd128F4, IR::BailOutSimd128F4Only); this->tempBv->Minus(block->loop->simd128I4SymsOnEntry, block->loop->landingPad->globOptData.liveSimd128I4Syms); this->tempBv->And(block->globOptData.liveSimd128I4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block->loop->landingPad, TySimd128I4, IR::BailOutSimd128I4Only); #endif // Now that we're done with the liveFields within this loop, trim the set to those syms // that the backward pass told us were live out of the loop. // This assumes we have no further need of the liveFields within the loop. if (block->loop->liveOutFields) { block->globOptData.liveFields->And(block->loop->liveOutFields); } } } NEXT_SUCCESSOR_BLOCK; this->tempBv->ClearAll(); isPerformingLoopBackEdgeCompensation = false; } } #if DBG // The set of live lossy int32 syms should be a subset of all live int32 syms this->tempBv->And(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms); Assert(this->tempBv->Count() == block->globOptData.liveLossyInt32Syms->Count()); // The set of live lossy int32 syms should be a subset of live var or float syms (var or float sym containing the lossless // value of the sym should be live) this->tempBv->Or(block->globOptData.liveVarSyms, block->globOptData.liveFloat64Syms); this->tempBv->And(block->globOptData.liveLossyInt32Syms); Assert(this->tempBv->Count() == block->globOptData.liveLossyInt32Syms->Count()); this->tempBv->ClearAll(); Assert(this->currentBlock == block); #endif } void GlobOpt::OptLoops(Loop *loop) { Assert(loop != nullptr); #if DBG if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase) && !DoFunctionFieldCopyProp() && DoFieldCopyProp(loop)) { Output::Print(_u("TRACE: CanDoFieldCopyProp Loop: ")); this->func->DumpFullFunctionName(); uint loopNumber = loop->GetLoopNumber(); Assert(loopNumber != Js::LoopHeader::NoLoop); Output::Print(_u(" Loop: %d\n"), loopNumber); } #endif Loop *previousLoop = this->prePassLoop; this->prePassLoop = loop; if (previousLoop == nullptr) { Assert(this->rootLoopPrePass == nullptr); this->rootLoopPrePass = loop; this->prePassInstrMap->Clear(); if (loop->parent == nullptr) { // Outer most loop... this->prePassCopyPropSym->ClearAll(); } } if (loop->symsUsedBeforeDefined == nullptr) { loop->symsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->likelyIntSymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->likelyNumberSymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->forceFloat64SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); #ifdef ENABLE_SIMDJS loop->likelySimd128F4SymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->likelySimd128I4SymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->forceSimd128F4SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->forceSimd128I4SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); #endif loop->symsDefInLoop = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->fieldKilled = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->fieldPRESymStore = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->allFieldsKilled = false; } else { loop->symsUsedBeforeDefined->ClearAll(); loop->likelyIntSymsUsedBeforeDefined->ClearAll(); loop->likelyNumberSymsUsedBeforeDefined->ClearAll(); loop->forceFloat64SymsOnEntry->ClearAll(); #ifdef ENABLE_SIMDJS loop->likelySimd128F4SymsUsedBeforeDefined->ClearAll(); loop->likelySimd128I4SymsUsedBeforeDefined->ClearAll(); loop->forceSimd128F4SymsOnEntry->ClearAll(); loop->forceSimd128I4SymsOnEntry->ClearAll(); #endif loop->symsDefInLoop->ClearAll(); loop->fieldKilled->ClearAll(); loop->allFieldsKilled = false; loop->initialValueFieldMap.Reset(); } FOREACH_BLOCK_IN_LOOP(block, loop) { block->SetDataUseCount(block->GetSuccList()->Count()); OptBlock(block); } NEXT_BLOCK_IN_LOOP; if (previousLoop == nullptr) { Assert(this->rootLoopPrePass == loop); this->rootLoopPrePass = nullptr; } this->prePassLoop = previousLoop; } void GlobOpt::TailDupPass() { FOREACH_LOOP_IN_FUNC_EDITING(loop, this->func) { BasicBlock* header = loop->GetHeadBlock(); BasicBlock* loopTail = nullptr; FOREACH_PREDECESSOR_BLOCK(pred, header) { if (loop->IsDescendentOrSelf(pred->loop)) { loopTail = pred; break; } } NEXT_PREDECESSOR_BLOCK; if (loopTail) { AssertMsg(loopTail->GetLastInstr()->IsBranchInstr(), "LastInstr of loop should always be a branch no?"); if (!loopTail->GetPredList()->HasOne()) { TryTailDup(loopTail->GetLastInstr()->AsBranchInstr()); } } } NEXT_LOOP_IN_FUNC_EDITING; } bool GlobOpt::TryTailDup(IR::BranchInstr *tailBranch) { if (PHASE_OFF(Js::TailDupPhase, tailBranch->m_func->GetTopFunc())) { return false; } if (tailBranch->IsConditional()) { return false; } IR::Instr *instr; uint instrCount = 0; for (instr = tailBranch->GetPrevRealInstrOrLabel(); !instr->IsLabelInstr(); instr = instr->GetPrevRealInstrOrLabel()) { if (instr->HasBailOutInfo()) { break; } if (!OpCodeAttr::CanCSE(instr->m_opcode)) { // Consider: We could be more aggressive here break; } instrCount++; if (instrCount > 1) { // Consider: If copy handled single-def tmps renaming, we could do more instrs break; } } if (!instr->IsLabelInstr()) { return false; } IR::LabelInstr *mergeLabel = instr->AsLabelInstr(); IR::Instr *mergeLabelPrev = mergeLabel->m_prev; // Skip unreferenced labels while (mergeLabelPrev->IsLabelInstr() && mergeLabelPrev->AsLabelInstr()->labelRefs.Empty()) { mergeLabelPrev = mergeLabelPrev->m_prev; } BasicBlock* labelBlock = mergeLabel->GetBasicBlock(); uint origPredCount = labelBlock->GetPredList()->Count(); uint dupCount = 0; // We are good to go. Let's do the tail duplication. FOREACH_SLISTCOUNTED_ENTRY_EDITING(IR::BranchInstr*, branchEntry, &mergeLabel->labelRefs, iter) { if (branchEntry->IsUnconditional() && !branchEntry->IsMultiBranch() && branchEntry != mergeLabelPrev && branchEntry != tailBranch) { for (instr = mergeLabel->m_next; instr != tailBranch; instr = instr->m_next) { branchEntry->InsertBefore(instr->Copy()); } instr = branchEntry; branchEntry->ReplaceTarget(mergeLabel, tailBranch->GetTarget()); while(!instr->IsLabelInstr()) { instr = instr->m_prev; } BasicBlock* branchBlock = instr->AsLabelInstr()->GetBasicBlock(); labelBlock->RemovePred(branchBlock, func->m_fg); func->m_fg->AddEdge(branchBlock, tailBranch->GetTarget()->GetBasicBlock()); dupCount++; } } NEXT_SLISTCOUNTED_ENTRY_EDITING; // If we've duplicated everywhere, tail block is dead and should be removed. if (dupCount == origPredCount) { AssertMsg(mergeLabel->labelRefs.Empty(), "Should not remove block with referenced label."); func->m_fg->RemoveBlock(labelBlock, nullptr, true); } return true; } void GlobOpt::ToVar(BVSparse<JitArenaAllocator> *bv, BasicBlock *block) { FOREACH_BITSET_IN_SPARSEBV(id, bv) { StackSym *stackSym = this->func->m_symTable->FindStackSym(id); IR::RegOpnd *newOpnd = IR::RegOpnd::New(stackSym, TyVar, this->func); IR::Instr *lastInstr = block->GetLastInstr(); if (lastInstr->IsBranchInstr() || lastInstr->m_opcode == Js::OpCode::BailTarget) { // If branch is using this symbol, hoist the operand as the ToVar load will get // inserted right before the branch. IR::Opnd *src1 = lastInstr->GetSrc1(); if (src1) { if (src1->IsRegOpnd() && src1->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc1(Js::OpCode::Ld_A); } IR::Opnd *src2 = lastInstr->GetSrc2(); if (src2) { if (src2->IsRegOpnd() && src2->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc2(Js::OpCode::Ld_A); } } } this->ToVar(lastInstr, newOpnd, block, nullptr, false); } else { IR::Instr *lastNextInstr = lastInstr->m_next; this->ToVar(lastNextInstr, newOpnd, block, nullptr, false); } } NEXT_BITSET_IN_SPARSEBV; } void GlobOpt::ToInt32(BVSparse<JitArenaAllocator> *bv, BasicBlock *block, bool lossy, IR::Instr *insertBeforeInstr) { return this->ToTypeSpec(bv, block, TyInt32, IR::BailOutIntOnly, lossy, insertBeforeInstr); } void GlobOpt::ToFloat64(BVSparse<JitArenaAllocator> *bv, BasicBlock *block) { return this->ToTypeSpec(bv, block, TyFloat64, IR::BailOutNumberOnly); } void GlobOpt::ToTypeSpec(BVSparse<JitArenaAllocator> *bv, BasicBlock *block, IRType toType, IR::BailOutKind bailOutKind, bool lossy, IR::Instr *insertBeforeInstr) { FOREACH_BITSET_IN_SPARSEBV(id, bv) { StackSym *stackSym = this->func->m_symTable->FindStackSym(id); IRType fromType = TyIllegal; // Win8 bug: 757126. If we are trying to type specialize the arguments object, // let's make sure stack args optimization is not enabled. This is a problem, particularly, // if the instruction comes from an unreachable block. In other cases, the pass on the // instruction itself should disable arguments object optimization. if(block->globOptData.argObjSyms && block->globOptData.IsArgumentsSymID(id)) { CannotAllocateArgumentsObjectOnStack(); } if (block->globOptData.liveVarSyms->Test(id)) { fromType = TyVar; } else if (block->globOptData.liveInt32Syms->Test(id) && !block->globOptData.liveLossyInt32Syms->Test(id)) { fromType = TyInt32; stackSym = stackSym->GetInt32EquivSym(this->func); } else if (block->globOptData.liveFloat64Syms->Test(id)) { fromType = TyFloat64; stackSym = stackSym->GetFloat64EquivSym(this->func); } else { #ifdef ENABLE_SIMDJS Assert(block->globOptData.IsLiveAsSimd128(stackSym)); if (block->globOptData.IsLiveAsSimd128F4(stackSym)) { fromType = TySimd128F4; stackSym = stackSym->GetSimd128F4EquivSym(this->func); } else { fromType = TySimd128I4; stackSym = stackSym->GetSimd128I4EquivSym(this->func); } #else Assert(UNREACHED); #endif } IR::RegOpnd *newOpnd = IR::RegOpnd::New(stackSym, fromType, this->func); IR::Instr *lastInstr = block->GetLastInstr(); if (!insertBeforeInstr && lastInstr->IsBranchInstr()) { // If branch is using this symbol, hoist the operand as the ToInt32 load will get // inserted right before the branch. IR::Instr *instrPrev = lastInstr->m_prev; IR::Opnd *src1 = lastInstr->GetSrc1(); if (src1) { if (src1->IsRegOpnd() && src1->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc1(Js::OpCode::Ld_A); } IR::Opnd *src2 = lastInstr->GetSrc2(); if (src2) { if (src2->IsRegOpnd() && src2->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc2(Js::OpCode::Ld_A); } } // Did we insert anything? if (lastInstr->m_prev != instrPrev) { // If we had ByteCodeUses right before the branch, move them back down. IR::Instr *insertPoint = lastInstr; for (IR::Instr *instrBytecode = instrPrev; instrBytecode->m_opcode == Js::OpCode::ByteCodeUses; instrBytecode = instrBytecode->m_prev) { instrBytecode->Unlink(); insertPoint->InsertBefore(instrBytecode); insertPoint = instrBytecode; } } } } this->ToTypeSpecUse(nullptr, newOpnd, block, nullptr, nullptr, toType, bailOutKind, lossy, insertBeforeInstr); } NEXT_BITSET_IN_SPARSEBV; } PRECandidatesList * GlobOpt::FindPossiblePRECandidates(Loop *loop, JitArenaAllocator *alloc) { // Find the set of PRE candidates BasicBlock *loopHeader = loop->GetHeadBlock(); PRECandidatesList *candidates = nullptr; bool firstBackEdge = true; FOREACH_PREDECESSOR_BLOCK(blockPred, loopHeader) { if (!loop->IsDescendentOrSelf(blockPred->loop)) { // Not a loop back-edge continue; } if (firstBackEdge) { candidates = this->FindBackEdgePRECandidates(blockPred, alloc); } else { blockPred->globOptData.RemoveUnavailableCandidates(candidates); } } NEXT_PREDECESSOR_BLOCK; return candidates; } BOOL GlobOpt::PreloadPRECandidate(Loop *loop, GlobHashBucket* candidate) { // Insert a load for each field PRE candidate. PropertySym *propertySym = candidate->value->AsPropertySym(); StackSym *objPtrSym = propertySym->m_stackSym; // If objPtr isn't live, we'll retry later. // Another PRE candidate may insert a load for it. if (!loop->landingPad->globOptData.IsLive(objPtrSym)) { return false; } BasicBlock *landingPad = loop->landingPad; Value *value = candidate->element; Sym *symStore = value->GetValueInfo()->GetSymStore(); // The symStore can't be live into the loop // The symStore needs to still have the same value Assert(symStore && symStore->IsStackSym()); if (loop->landingPad->globOptData.IsLive(symStore)) { // May have already been hoisted: // o.x = t1; // o.y = t1; return false; } Value *landingPadValue = landingPad->globOptData.FindValue(propertySym); // Value should be added as initial value or already be there. Assert(landingPadValue); IR::Instr * ldInstr = this->prePassInstrMap->Lookup(propertySym->m_id, nullptr); Assert(ldInstr); // Create instr to put in landing pad for compensation Assert(IsPREInstrCandidateLoad(ldInstr->m_opcode)); IR::SymOpnd *ldSrc = ldInstr->GetSrc1()->AsSymOpnd(); if (ldSrc->m_sym != propertySym) { // It's possible that the propertySym but have equivalent objPtrs. Verify their values. Value *val1 = CurrentBlockData()->FindValue(ldSrc->m_sym->AsPropertySym()->m_stackSym); Value *val2 = CurrentBlockData()->FindValue(propertySym->m_stackSym); if (!val1 || !val2 || val1->GetValueNumber() != val2->GetValueNumber()) { return false; } } ldInstr = ldInstr->Copy(); // Consider: Shouldn't be necessary once we have copy-prop in prepass... ldInstr->GetSrc1()->AsSymOpnd()->m_sym = propertySym; ldSrc = ldInstr->GetSrc1()->AsSymOpnd(); if (ldSrc->IsPropertySymOpnd()) { IR::PropertySymOpnd *propSymOpnd = ldSrc->AsPropertySymOpnd(); IR::PropertySymOpnd *newPropSymOpnd; newPropSymOpnd = propSymOpnd->AsPropertySymOpnd()->CopyWithoutFlowSensitiveInfo(this->func); ldInstr->ReplaceSrc1(newPropSymOpnd); } if (ldInstr->GetDst()->AsRegOpnd()->m_sym != symStore) { ldInstr->ReplaceDst(IR::RegOpnd::New(symStore->AsStackSym(), TyVar, this->func)); } ldInstr->GetSrc1()->SetIsJITOptimizedReg(true); ldInstr->GetDst()->SetIsJITOptimizedReg(true); landingPad->globOptData.liveVarSyms->Set(symStore->m_id); loop->fieldPRESymStore->Set(symStore->m_id); ValueType valueType(ValueType::Uninitialized); Value *initialValue = nullptr; if (loop->initialValueFieldMap.TryGetValue(propertySym, &initialValue)) { if (ldInstr->IsProfiledInstr()) { if (initialValue->GetValueNumber() == value->GetValueNumber()) { if (value->GetValueInfo()->IsUninitialized()) { valueType = ldInstr->AsProfiledInstr()->u.FldInfo().valueType; } else { valueType = value->GetValueInfo()->Type(); } } else { valueType = ValueType::Uninitialized; } ldInstr->AsProfiledInstr()->u.FldInfo().valueType = valueType; } } else { valueType = landingPadValue->GetValueInfo()->Type(); } loop->symsUsedBeforeDefined->Set(symStore->m_id); if (valueType.IsLikelyNumber()) { loop->likelyNumberSymsUsedBeforeDefined->Set(symStore->m_id); if (DoAggressiveIntTypeSpec() ? valueType.IsLikelyInt() : valueType.IsInt()) { // Can only force int conversions in the landing pad based on likely-int values if aggressive int type // specialization is enabled loop->likelyIntSymsUsedBeforeDefined->Set(symStore->m_id); } } // Insert in landing pad if (ldInstr->HasAnyImplicitCalls()) { IR::Instr * bailInstr = EnsureDisableImplicitCallRegion(loop); bailInstr->InsertBefore(ldInstr); } else if (loop->endDisableImplicitCall) { loop->endDisableImplicitCall->InsertBefore(ldInstr); } else { loop->landingPad->InsertAfter(ldInstr); } ldInstr->ClearByteCodeOffset(); ldInstr->SetByteCodeOffset(landingPad->GetFirstInstr()); #if DBG_DUMP if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldPREPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId())) { Output::Print(_u("** TRACE: Field PRE: field pre-loaded in landing pad of loop head #%-3d: "), loop->GetHeadBlock()->GetBlockNum()); ldInstr->Dump(); Output::Print(_u("\n")); } #endif return true; } void GlobOpt::PreloadPRECandidates(Loop *loop, PRECandidatesList *candidates) { // Insert loads in landing pad for field PRE candidates. Iterate while(changed) // for the o.x.y cases. BOOL changed = true; if (!candidates) { return; } Assert(loop->landingPad->GetFirstInstr() == loop->landingPad->GetLastInstr()); while (changed) { changed = false; FOREACH_SLIST_ENTRY_EDITING(GlobHashBucket*, candidate, (SList<GlobHashBucket*>*)candidates, iter) { if (this->PreloadPRECandidate(loop, candidate)) { changed = true; iter.RemoveCurrent(); } } NEXT_SLIST_ENTRY_EDITING; } } void GlobOpt::FieldPRE(Loop *loop) { if (!DoFieldPRE(loop)) { return; } PRECandidatesList *candidates; JitArenaAllocator *alloc = this->tempAlloc; candidates = this->FindPossiblePRECandidates(loop, alloc); this->PreloadPRECandidates(loop, candidates); } void GlobOpt::InsertValueCompensation( BasicBlock *const predecessor, const SymToValueInfoMap &symsRequiringCompensationToMergedValueInfoMap) { Assert(predecessor); Assert(symsRequiringCompensationToMergedValueInfoMap.Count() != 0); IR::Instr *insertBeforeInstr = predecessor->GetLastInstr(); Func *const func = insertBeforeInstr->m_func; bool setLastInstrInPredecessor; if(insertBeforeInstr->IsBranchInstr() || insertBeforeInstr->m_opcode == Js::OpCode::BailTarget) { // Don't insert code between the branch and the corresponding ByteCodeUses instructions while(insertBeforeInstr->m_prev->m_opcode == Js::OpCode::ByteCodeUses) { insertBeforeInstr = insertBeforeInstr->m_prev; } setLastInstrInPredecessor = false; } else { // Insert at the end of the block and set the last instruction Assert(insertBeforeInstr->m_next); insertBeforeInstr = insertBeforeInstr->m_next; // Instruction after the last instruction in the predecessor setLastInstrInPredecessor = true; } GlobOptBlockData &predecessorBlockData = predecessor->globOptData; GlobOptBlockData &successorBlockData = *CurrentBlockData(); struct DelayChangeValueInfo { Value* predecessorValue; ArrayValueInfo* valueInfo; void ChangeValueInfo(BasicBlock* predecessor, GlobOpt* g) { g->ChangeValueInfo( predecessor, predecessorValue, valueInfo, false /*allowIncompatibleType*/, true /*compensated*/); } }; JsUtil::List<DelayChangeValueInfo, ArenaAllocator> delayChangeValueInfo(alloc); for(auto it = symsRequiringCompensationToMergedValueInfoMap.GetIterator(); it.IsValid(); it.MoveNext()) { const auto &entry = it.Current(); Sym *const sym = entry.Key(); Value *const predecessorValue = predecessorBlockData.FindValue(sym); Assert(predecessorValue); ValueInfo *const predecessorValueInfo = predecessorValue->GetValueInfo(); // Currently, array value infos are the only ones that require compensation based on values Assert(predecessorValueInfo->IsAnyOptimizedArray()); const ArrayValueInfo *const predecessorArrayValueInfo = predecessorValueInfo->AsArrayValueInfo(); StackSym *const predecessorHeadSegmentSym = predecessorArrayValueInfo->HeadSegmentSym(); StackSym *const predecessorHeadSegmentLengthSym = predecessorArrayValueInfo->HeadSegmentLengthSym(); StackSym *const predecessorLengthSym = predecessorArrayValueInfo->LengthSym(); ValueInfo *const mergedValueInfo = entry.Value(); const ArrayValueInfo *const mergedArrayValueInfo = mergedValueInfo->AsArrayValueInfo(); StackSym *const mergedHeadSegmentSym = mergedArrayValueInfo->HeadSegmentSym(); StackSym *const mergedHeadSegmentLengthSym = mergedArrayValueInfo->HeadSegmentLengthSym(); StackSym *const mergedLengthSym = mergedArrayValueInfo->LengthSym(); Assert(!mergedHeadSegmentSym || predecessorHeadSegmentSym); Assert(!mergedHeadSegmentLengthSym || predecessorHeadSegmentLengthSym); Assert(!mergedLengthSym || predecessorLengthSym); bool compensated = false; if(mergedHeadSegmentSym && predecessorHeadSegmentSym != mergedHeadSegmentSym) { IR::Instr *const newInstr = IR::Instr::New( Js::OpCode::Ld_A, IR::RegOpnd::New(mergedHeadSegmentSym, mergedHeadSegmentSym->GetType(), func), IR::RegOpnd::New(predecessorHeadSegmentSym, predecessorHeadSegmentSym->GetType(), func), func); newInstr->GetDst()->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->SetIsJITOptimizedReg(true); newInstr->SetByteCodeOffset(insertBeforeInstr); insertBeforeInstr->InsertBefore(newInstr); compensated = true; } if(mergedHeadSegmentLengthSym && predecessorHeadSegmentLengthSym != mergedHeadSegmentLengthSym) { IR::Instr *const newInstr = IR::Instr::New( Js::OpCode::Ld_I4, IR::RegOpnd::New(mergedHeadSegmentLengthSym, mergedHeadSegmentLengthSym->GetType(), func), IR::RegOpnd::New(predecessorHeadSegmentLengthSym, predecessorHeadSegmentLengthSym->GetType(), func), func); newInstr->GetDst()->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->SetIsJITOptimizedReg(true); newInstr->SetByteCodeOffset(insertBeforeInstr); insertBeforeInstr->InsertBefore(newInstr); compensated = true; // Merge the head segment length value Assert(predecessorBlockData.liveVarSyms->Test(predecessorHeadSegmentLengthSym->m_id)); predecessorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id); successorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id); Value *const predecessorHeadSegmentLengthValue = predecessorBlockData.FindValue(predecessorHeadSegmentLengthSym); Assert(predecessorHeadSegmentLengthValue); predecessorBlockData.SetValue(predecessorHeadSegmentLengthValue, mergedHeadSegmentLengthSym); Value *const mergedHeadSegmentLengthValue = successorBlockData.FindValue(mergedHeadSegmentLengthSym); if(mergedHeadSegmentLengthValue) { Assert(mergedHeadSegmentLengthValue->GetValueNumber() != predecessorHeadSegmentLengthValue->GetValueNumber()); if(predecessorHeadSegmentLengthValue->GetValueInfo() != mergedHeadSegmentLengthValue->GetValueInfo()) { mergedHeadSegmentLengthValue->SetValueInfo( ValueInfo::MergeLikelyIntValueInfo( this->alloc, mergedHeadSegmentLengthValue, predecessorHeadSegmentLengthValue, mergedHeadSegmentLengthValue->GetValueInfo()->Type() .Merge(predecessorHeadSegmentLengthValue->GetValueInfo()->Type()))); } } else { successorBlockData.SetValue(CopyValue(predecessorHeadSegmentLengthValue), mergedHeadSegmentLengthSym); } } if(mergedLengthSym && predecessorLengthSym != mergedLengthSym) { IR::Instr *const newInstr = IR::Instr::New( Js::OpCode::Ld_I4, IR::RegOpnd::New(mergedLengthSym, mergedLengthSym->GetType(), func), IR::RegOpnd::New(predecessorLengthSym, predecessorLengthSym->GetType(), func), func); newInstr->GetDst()->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->SetIsJITOptimizedReg(true); newInstr->SetByteCodeOffset(insertBeforeInstr); insertBeforeInstr->InsertBefore(newInstr); compensated = true; // Merge the length value Assert(predecessorBlockData.liveVarSyms->Test(predecessorLengthSym->m_id)); predecessorBlockData.liveVarSyms->Set(mergedLengthSym->m_id); successorBlockData.liveVarSyms->Set(mergedLengthSym->m_id); Value *const predecessorLengthValue = predecessorBlockData.FindValue(predecessorLengthSym); Assert(predecessorLengthValue); predecessorBlockData.SetValue(predecessorLengthValue, mergedLengthSym); Value *const mergedLengthValue = successorBlockData.FindValue(mergedLengthSym); if(mergedLengthValue) { Assert(mergedLengthValue->GetValueNumber() != predecessorLengthValue->GetValueNumber()); if(predecessorLengthValue->GetValueInfo() != mergedLengthValue->GetValueInfo()) { mergedLengthValue->SetValueInfo( ValueInfo::MergeLikelyIntValueInfo( this->alloc, mergedLengthValue, predecessorLengthValue, mergedLengthValue->GetValueInfo()->Type().Merge(predecessorLengthValue->GetValueInfo()->Type()))); } } else { successorBlockData.SetValue(CopyValue(predecessorLengthValue), mergedLengthSym); } } if(compensated) { // Save the new ValueInfo for later. // We don't want other symbols needing compensation to see this new one delayChangeValueInfo.Add({ predecessorValue, ArrayValueInfo::New( alloc, predecessorValueInfo->Type(), mergedHeadSegmentSym ? mergedHeadSegmentSym : predecessorHeadSegmentSym, mergedHeadSegmentLengthSym ? mergedHeadSegmentLengthSym : predecessorHeadSegmentLengthSym, mergedLengthSym ? mergedLengthSym : predecessorLengthSym, predecessorValueInfo->GetSymStore()) }); } } // Once we've compensated all the symbols, update the new ValueInfo. delayChangeValueInfo.Map([predecessor, this](int, DelayChangeValueInfo d) { d.ChangeValueInfo(predecessor, this); }); if(setLastInstrInPredecessor) { predecessor->SetLastInstr(insertBeforeInstr->m_prev); } } bool GlobOpt::AreFromSameBytecodeFunc(IR::RegOpnd const* src1, IR::RegOpnd const* dst) const { Assert(this->func->m_symTable->FindStackSym(src1->m_sym->m_id) == src1->m_sym); Assert(this->func->m_symTable->FindStackSym(dst->m_sym->m_id) == dst->m_sym); if (dst->m_sym->HasByteCodeRegSlot() && src1->m_sym->HasByteCodeRegSlot()) { return src1->m_sym->GetByteCodeFunc() == dst->m_sym->GetByteCodeFunc(); } return false; } /* * This is for scope object removal along with Heap Arguments optimization. * We track several instructions to facilitate the removal of scope object. * - LdSlotArr - This instr is tracked to keep track of the formals array (the dest) * - InlineeStart - To keep track of the stack syms for the formals of the inlinee. */ void GlobOpt::TrackInstrsForScopeObjectRemoval(IR::Instr * instr) { IR::Opnd* dst = instr->GetDst(); IR::Opnd* src1 = instr->GetSrc1(); if (instr->m_opcode == Js::OpCode::Ld_A && src1->IsRegOpnd()) { AssertMsg(!instr->m_func->IsStackArgsEnabled() || !src1->IsScopeObjOpnd(instr->m_func), "There can be no aliasing for scope object."); } // The following is to track formals array for Stack Arguments optimization with Formals if (instr->m_func->IsStackArgsEnabled() && !this->IsLoopPrePass()) { if (instr->m_opcode == Js::OpCode::LdSlotArr) { if (instr->GetSrc1()->IsScopeObjOpnd(instr->m_func)) { AssertMsg(!instr->m_func->GetJITFunctionBody()->HasImplicitArgIns(), "No mapping is required in this case. So it should already be generating ArgIns."); instr->m_func->TrackFormalsArraySym(dst->GetStackSym()->m_id); } } else if (instr->m_opcode == Js::OpCode::InlineeStart) { Assert(instr->m_func->IsInlined()); Js::ArgSlot actualsCount = instr->m_func->actualCount - 1; Js::ArgSlot formalsCount = instr->m_func->GetJITFunctionBody()->GetInParamsCount() - 1; Func * func = instr->m_func; Func * inlinerFunc = func->GetParentFunc(); //Inliner's func IR::Instr * argOutInstr = instr->GetSrc2()->GetStackSym()->GetInstrDef(); //The argout immediately before the InlineeStart will be the ArgOut for NewScObject //So we don't want to track the stack sym for this argout.- Skipping it here. if (instr->m_func->IsInlinedConstructor()) { //PRE might introduce a second defintion for the Src1. So assert for the opcode only when it has single definition. Assert(argOutInstr->GetSrc1()->GetStackSym()->GetInstrDef() == nullptr || argOutInstr->GetSrc1()->GetStackSym()->GetInstrDef()->m_opcode == Js::OpCode::NewScObjectNoCtor); argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef(); } if (formalsCount < actualsCount) { Js::ArgSlot extraActuals = actualsCount - formalsCount; //Skipping extra actuals passed for (Js::ArgSlot i = 0; i < extraActuals; i++) { argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef(); } } StackSym * undefinedSym = nullptr; for (Js::ArgSlot param = formalsCount; param > 0; param--) { StackSym * argOutSym = nullptr; if (argOutInstr->GetSrc1()) { if (argOutInstr->GetSrc1()->IsRegOpnd()) { argOutSym = argOutInstr->GetSrc1()->GetStackSym(); } else { // We will always have ArgOut instr - so the source operand will not be removed. argOutSym = StackSym::New(inlinerFunc); IR::Opnd * srcOpnd = argOutInstr->GetSrc1(); IR::Opnd * dstOpnd = IR::RegOpnd::New(argOutSym, TyVar, inlinerFunc); IR::Instr * assignInstr = IR::Instr::New(Js::OpCode::Ld_A, dstOpnd, srcOpnd, inlinerFunc); instr->InsertBefore(assignInstr); } } Assert(!func->HasStackSymForFormal(param - 1)); if (param <= actualsCount) { Assert(argOutSym); func->TrackStackSymForFormalIndex(param - 1, argOutSym); argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef(); } else { /*When param is out of range of actuals count, load undefined*/ // TODO: saravind: This will insert undefined for each of the param not having an actual. - Clean up this by having a sym for undefined on func ? Assert(formalsCount > actualsCount); if (undefinedSym == nullptr) { undefinedSym = StackSym::New(inlinerFunc); IR::Opnd * srcOpnd = IR::AddrOpnd::New(inlinerFunc->GetScriptContextInfo()->GetUndefinedAddr(), IR::AddrOpndKindDynamicMisc, inlinerFunc); IR::Opnd * dstOpnd = IR::RegOpnd::New(undefinedSym, TyVar, inlinerFunc); IR::Instr * assignUndefined = IR::Instr::New(Js::OpCode::Ld_A, dstOpnd, srcOpnd, inlinerFunc); instr->InsertBefore(assignUndefined); } func->TrackStackSymForFormalIndex(param - 1, undefinedSym); } } } } } void GlobOpt::OptArguments(IR::Instr *instr) { IR::Opnd* dst = instr->GetDst(); IR::Opnd* src1 = instr->GetSrc1(); IR::Opnd* src2 = instr->GetSrc2(); TrackInstrsForScopeObjectRemoval(instr); if (!TrackArgumentsObject()) { return; } if (instr->HasAnyLoadHeapArgsOpCode()) { #ifdef ENABLE_DEBUG_CONFIG_OPTIONS if (instr->m_func->IsStackArgsEnabled()) { if (instr->GetSrc1()->IsRegOpnd() && instr->m_func->GetJITFunctionBody()->GetInParamsCount() > 1) { StackSym * scopeObjSym = instr->GetSrc1()->GetStackSym(); Assert(scopeObjSym); Assert(scopeObjSym->GetInstrDef()->m_opcode == Js::OpCode::InitCachedScope || scopeObjSym->GetInstrDef()->m_opcode == Js::OpCode::NewScopeObject); Assert(instr->m_func->GetScopeObjSym() == scopeObjSym); if (PHASE_VERBOSE_TRACE1(Js::StackArgFormalsOptPhase)) { Output::Print(_u("StackArgFormals : %s (%d) :Setting scopeObjSym in forward pass. \n"), instr->m_func->GetJITFunctionBody()->GetDisplayName(), instr->m_func->GetJITFunctionBody()->GetFunctionNumber()); Output::Flush(); } } } #endif if (instr->m_func->GetJITFunctionBody()->GetInParamsCount() != 1 && !instr->m_func->IsStackArgsEnabled()) { CannotAllocateArgumentsObjectOnStack(); } else { CurrentBlockData()->TrackArgumentsSym(dst->AsRegOpnd()); } return; } // Keep track of arguments objects and its aliases // LdHeapArguments loads the arguments object and Ld_A tracks the aliases. if ((instr->m_opcode == Js::OpCode::Ld_A || instr->m_opcode == Js::OpCode::BytecodeArgOutCapture) && (src1->IsRegOpnd() && CurrentBlockData()->IsArgumentsOpnd(src1))) { // In the debug mode, we don't want to optimize away the aliases. Since we may have to show them on the inspection. if (((!AreFromSameBytecodeFunc(src1->AsRegOpnd(), dst->AsRegOpnd()) || this->currentBlock->loop) && instr->m_opcode != Js::OpCode::BytecodeArgOutCapture) || this->func->IsJitInDebugMode()) { CannotAllocateArgumentsObjectOnStack(); return; } if(!dst->AsRegOpnd()->GetStackSym()->m_nonEscapingArgObjAlias) { CurrentBlockData()->TrackArgumentsSym(dst->AsRegOpnd()); } return; } if (!CurrentBlockData()->TestAnyArgumentsSym()) { // There are no syms to track yet, don't start tracking arguments sym. return; } // Avoid loop prepass if (this->currentBlock->loop && this->IsLoopPrePass()) { return; } SymID id = 0; switch(instr->m_opcode) { case Js::OpCode::LdElemI_A: case Js::OpCode::TypeofElem: { Assert(src1->IsIndirOpnd()); IR::RegOpnd *indexOpnd = src1->AsIndirOpnd()->GetIndexOpnd(); if (indexOpnd && CurrentBlockData()->IsArgumentsSymID(indexOpnd->m_sym->m_id)) { // Pathological test cases such as a[arguments] CannotAllocateArgumentsObjectOnStack(); return; } IR::RegOpnd *baseOpnd = src1->AsIndirOpnd()->GetBaseOpnd(); id = baseOpnd->m_sym->m_id; if (CurrentBlockData()->IsArgumentsSymID(id)) { instr->usesStackArgumentsObject = true; } break; } case Js::OpCode::LdLen_A: { Assert(src1->IsRegOpnd()); if(CurrentBlockData()->IsArgumentsOpnd(src1)) { instr->usesStackArgumentsObject = true; } break; } case Js::OpCode::ArgOut_A_InlineBuiltIn: { if (CurrentBlockData()->IsArgumentsOpnd(src1)) { instr->usesStackArgumentsObject = true; } if (CurrentBlockData()->IsArgumentsOpnd(src1) && src1->AsRegOpnd()->m_sym->GetInstrDef()->m_opcode == Js::OpCode::BytecodeArgOutCapture) { // Apply inlining results in such usage - this is to ignore this sym that is def'd by ByteCodeArgOutCapture // It's needed because we do not have block level merging of arguments object and this def due to inlining can turn off stack args opt. IR::Instr* builtinStart = instr->GetNextRealInstr(); if (builtinStart->m_opcode == Js::OpCode::InlineBuiltInStart) { IR::Opnd* builtinOpnd = builtinStart->GetSrc1(); if (builtinStart->GetSrc1()->IsAddrOpnd()) { Assert(builtinOpnd->AsAddrOpnd()->m_isFunction); Js::BuiltinFunction builtinFunction = Js::JavascriptLibrary::GetBuiltInForFuncInfo(((FixedFieldInfo*)builtinOpnd->AsAddrOpnd()->m_metadata)->GetFuncInfoAddr(), func->GetThreadContextInfo()); if (builtinFunction == Js::BuiltinFunction::JavascriptFunction_Apply) { CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd()); } } else if (builtinOpnd->IsRegOpnd()) { if (builtinOpnd->AsRegOpnd()->m_sym->m_builtInIndex == Js::BuiltinFunction::JavascriptFunction_Apply) { CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd()); } } } } break; } case Js::OpCode::BailOnNotStackArgs: case Js::OpCode::ArgOut_A_FromStackArgs: case Js::OpCode::BytecodeArgOutUse: { if (src1 && CurrentBlockData()->IsArgumentsOpnd(src1)) { instr->usesStackArgumentsObject = true; } break; } default: { // Super conservative here, if we see the arguments or any of its alias being used in any // other opcode just don't do this optimization. Revisit this to optimize further if we see any common // case is missed. if (src1) { if (src1->IsRegOpnd() || src1->IsSymOpnd() || src1->IsIndirOpnd()) { if (CurrentBlockData()->IsArgumentsOpnd(src1)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } } } if (src2) { if (src2->IsRegOpnd() || src2->IsSymOpnd() || src2->IsIndirOpnd()) { if (CurrentBlockData()->IsArgumentsOpnd(src2)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } } } // We should look at dst last to correctly handle cases where it's the same as one of the src operands. if (dst) { if (dst->IsIndirOpnd() || dst->IsSymOpnd()) { if (CurrentBlockData()->IsArgumentsOpnd(dst)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } } else if (dst->IsRegOpnd()) { if (this->currentBlock->loop && CurrentBlockData()->IsArgumentsOpnd(dst)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } CurrentBlockData()->ClearArgumentsSym(dst->AsRegOpnd()); } } } break; } return; } void GlobOpt::MarkArgumentsUsedForBranch(IR::Instr * instr) { // If it's a conditional branch instruction and the operand used for branching is one of the arguments // to the function, tag the m_argUsedForBranch of the functionBody so that it can be used later for inlining decisions. if (instr->IsBranchInstr() && !instr->AsBranchInstr()->IsUnconditional()) { IR::BranchInstr * bInstr = instr->AsBranchInstr(); IR::Opnd *src1 = bInstr->GetSrc1(); IR::Opnd *src2 = bInstr->GetSrc2(); // These are used because we don't want to rely on src1 or src2 to always be the register/constant IR::RegOpnd *regOpnd = nullptr; if (!src2 && (instr->m_opcode == Js::OpCode::BrFalse_A || instr->m_opcode == Js::OpCode::BrTrue_A) && src1->IsRegOpnd()) { regOpnd = src1->AsRegOpnd(); } // We need to check for (0===arg) and (arg===0); this is especially important since some minifiers // change all instances of one to the other. else if (src2 && src2->IsConstOpnd() && src1->IsRegOpnd()) { regOpnd = src1->AsRegOpnd(); } else if (src2 && src2->IsRegOpnd() && src1->IsConstOpnd()) { regOpnd = src2->AsRegOpnd(); } if (regOpnd != nullptr) { if (regOpnd->m_sym->IsSingleDef()) { IR::Instr * defInst = regOpnd->m_sym->GetInstrDef(); IR::Opnd *defSym = defInst->GetSrc1(); if (defSym && defSym->IsSymOpnd() && defSym->AsSymOpnd()->m_sym->IsStackSym() && defSym->AsSymOpnd()->m_sym->AsStackSym()->IsParamSlotSym()) { uint16 param = defSym->AsSymOpnd()->m_sym->AsStackSym()->GetParamSlotNum(); // We only support functions with 13 arguments to ensure optimal size of callSiteInfo if (param < Js::Constants::MaximumArgumentCountForConstantArgumentInlining) { this->func->GetJITOutput()->SetArgUsedForBranch((uint8)param); } } } } } } const InductionVariable* GlobOpt::GetInductionVariable(SymID sym, Loop *loop) { if (loop->inductionVariables) { for (auto it = loop->inductionVariables->GetIterator(); it.IsValid(); it.MoveNext()) { InductionVariable* iv = &it.CurrentValueReference(); if (!iv->IsChangeDeterminate() || !iv->IsChangeUnidirectional()) { continue; } if (iv->Sym()->m_id == sym) { return iv; } } } return nullptr; } bool GlobOpt::IsSymIDInductionVariable(SymID sym, Loop *loop) { return GetInductionVariable(sym, loop) != nullptr; } SymID GlobOpt::GetVarSymID(StackSym *sym) { if (sym && sym->m_type != TyVar) { sym = sym->GetVarEquivSym(nullptr); } if (!sym) { return Js::Constants::InvalidSymID; } return sym->m_id; } bool GlobOpt::IsAllowedForMemOpt(IR::Instr* instr, bool isMemset, IR::RegOpnd *baseOpnd, IR::Opnd *indexOpnd) { Assert(instr); if (!baseOpnd || !indexOpnd) { return false; } Loop* loop = this->currentBlock->loop; const ValueType baseValueType(baseOpnd->GetValueType()); const ValueType indexValueType(indexOpnd->GetValueType()); // Validate the array and index types if ( !indexValueType.IsInt() || !( baseValueType.IsTypedIntOrFloatArray() || baseValueType.IsArray() ) ) { #if DBG_DUMP wchar indexValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; indexValueType.ToString(indexValueTypeStr); wchar baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); TRACE_MEMOP_VERBOSE(loop, instr, _u("Index[%s] or Array[%s] value type is invalid"), indexValueTypeStr, baseValueTypeStr); #endif return false; } // The following is conservative and works around a bug in induction variable analysis. if (baseOpnd->IsArrayRegOpnd()) { IR::ArrayRegOpnd *baseArrayOp = baseOpnd->AsArrayRegOpnd(); bool hasBoundChecksRemoved = ( baseArrayOp->EliminatedLowerBoundCheck() && baseArrayOp->EliminatedUpperBoundCheck() && !instr->extractedUpperBoundCheckWithoutHoisting && !instr->loadedArrayHeadSegment && !instr->loadedArrayHeadSegmentLength ); if (!hasBoundChecksRemoved) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Missing bounds check optimization")); return false; } } if (!baseValueType.IsTypedArray()) { // Check if the instr can kill the value type of the array JsArrayKills arrayKills = CheckJsArrayKills(instr); if (arrayKills.KillsValueType(baseValueType)) { TRACE_MEMOP_VERBOSE(loop, instr, _u("The array (s%d) can lose its value type"), GetVarSymID(baseOpnd->GetStackSym())); return false; } } // Process the Index Operand if (!this->OptIsInvariant(baseOpnd, this->currentBlock, loop, CurrentBlockData()->FindValue(baseOpnd->m_sym), false, true)) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Base (s%d) is not invariant"), GetVarSymID(baseOpnd->GetStackSym())); return false; } // Validate the index Assert(indexOpnd->GetStackSym()); SymID indexSymID = GetVarSymID(indexOpnd->GetStackSym()); const InductionVariable* iv = GetInductionVariable(indexSymID, loop); if (!iv) { // If the index is not an induction variable return TRACE_MEMOP_VERBOSE(loop, instr, _u("Index (s%d) is not an induction variable"), indexSymID); return false; } Assert(iv->IsChangeDeterminate() && iv->IsChangeUnidirectional()); const IntConstantBounds & bounds = iv->ChangeBounds(); if (loop->memOpInfo) { // Only accept induction variables that increments by 1 Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 }; inductionVariableChangeInfo = loop->memOpInfo->inductionVariableChangeInfoMap->Lookup(indexSymID, inductionVariableChangeInfo); if ( (bounds.LowerBound() != 1 && bounds.LowerBound() != -1) || (bounds.UpperBound() != bounds.LowerBound()) || inductionVariableChangeInfo.unroll > 1 // Must be 0 (not seen yet) or 1 (already seen) ) { TRACE_MEMOP_VERBOSE(loop, instr, _u("The index does not change by 1: %d><%d, unroll=%d"), bounds.LowerBound(), bounds.UpperBound(), inductionVariableChangeInfo.unroll); return false; } // Check if the index is the same in all MemOp optimization in this loop if (!loop->memOpInfo->candidates->Empty()) { Loop::MemOpCandidate* previousCandidate = loop->memOpInfo->candidates->Head(); // All MemOp operations within the same loop must use the same index if (previousCandidate->index != indexSymID) { TRACE_MEMOP_VERBOSE(loop, instr, _u("The index is not the same as other MemOp in the loop")); return false; } } } return true; } bool GlobOpt::CollectMemcopyLdElementI(IR::Instr *instr, Loop *loop) { Assert(instr->GetSrc1()->IsIndirOpnd()); IR::IndirOpnd *src1 = instr->GetSrc1()->AsIndirOpnd(); IR::Opnd *indexOpnd = src1->GetIndexOpnd(); IR::RegOpnd *baseOpnd = src1->GetBaseOpnd()->AsRegOpnd(); SymID baseSymID = GetVarSymID(baseOpnd->GetStackSym()); if (!IsAllowedForMemOpt(instr, false, baseOpnd, indexOpnd)) { return false; } SymID inductionSymID = GetVarSymID(indexOpnd->GetStackSym()); Assert(IsSymIDInductionVariable(inductionSymID, loop)); loop->EnsureMemOpVariablesInitialized(); bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID); IR::Opnd * dst = instr->GetDst(); if (!dst->IsRegOpnd() || !dst->AsRegOpnd()->GetStackSym()->IsSingleDef()) { return false; } Loop::MemCopyCandidate* memcopyInfo = memcopyInfo = JitAnewStruct(this->func->GetTopFunc()->m_fg->alloc, Loop::MemCopyCandidate); memcopyInfo->ldBase = baseSymID; memcopyInfo->ldCount = 1; memcopyInfo->count = 0; memcopyInfo->bIndexAlreadyChanged = isIndexPreIncr; memcopyInfo->base = Js::Constants::InvalidSymID; //need to find the stElem first memcopyInfo->index = inductionSymID; memcopyInfo->transferSym = dst->AsRegOpnd()->GetStackSym(); loop->memOpInfo->candidates->Prepend(memcopyInfo); return true; } bool GlobOpt::CollectMemsetStElementI(IR::Instr *instr, Loop *loop) { Assert(instr->GetDst()->IsIndirOpnd()); IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd(); IR::Opnd *indexOp = dst->GetIndexOpnd(); IR::RegOpnd *baseOp = dst->GetBaseOpnd()->AsRegOpnd(); if (!IsAllowedForMemOpt(instr, true, baseOp, indexOp)) { return false; } SymID baseSymID = GetVarSymID(baseOp->GetStackSym()); IR::Opnd *srcDef = instr->GetSrc1(); StackSym *srcSym = nullptr; if (srcDef->IsRegOpnd()) { IR::RegOpnd* opnd = srcDef->AsRegOpnd(); if (this->OptIsInvariant(opnd, this->currentBlock, loop, CurrentBlockData()->FindValue(opnd->m_sym), true, true)) { srcSym = opnd->GetStackSym(); } } BailoutConstantValue constant = {TyIllegal, 0}; if (srcDef->IsFloatConstOpnd()) { constant.InitFloatConstValue(srcDef->AsFloatConstOpnd()->m_value); } else if (srcDef->IsIntConstOpnd()) { constant.InitIntConstValue(srcDef->AsIntConstOpnd()->GetValue(), srcDef->AsIntConstOpnd()->GetType()); } else if (srcDef->IsAddrOpnd()) { constant.InitVarConstValue(srcDef->AsAddrOpnd()->m_address); } else if(!srcSym) { TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Source is not an invariant")); return false; } // Process the Index Operand Assert(indexOp->GetStackSym()); SymID inductionSymID = GetVarSymID(indexOp->GetStackSym()); Assert(IsSymIDInductionVariable(inductionSymID, loop)); loop->EnsureMemOpVariablesInitialized(); bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID); Loop::MemSetCandidate* memsetInfo = JitAnewStruct(this->func->GetTopFunc()->m_fg->alloc, Loop::MemSetCandidate); memsetInfo->base = baseSymID; memsetInfo->index = inductionSymID; memsetInfo->constant = constant; memsetInfo->srcSym = srcSym; memsetInfo->count = 1; memsetInfo->bIndexAlreadyChanged = isIndexPreIncr; loop->memOpInfo->candidates->Prepend(memsetInfo); return true; } bool GlobOpt::CollectMemcopyStElementI(IR::Instr *instr, Loop *loop) { if (!loop->memOpInfo || loop->memOpInfo->candidates->Empty()) { // There is no ldElem matching this stElem return false; } Assert(instr->GetDst()->IsIndirOpnd()); IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd(); IR::Opnd *indexOp = dst->GetIndexOpnd(); IR::RegOpnd *baseOp = dst->GetBaseOpnd()->AsRegOpnd(); SymID baseSymID = GetVarSymID(baseOp->GetStackSym()); if (!instr->GetSrc1()->IsRegOpnd()) { return false; } IR::RegOpnd* src1 = instr->GetSrc1()->AsRegOpnd(); if (!src1->GetIsDead()) { // This must be the last use of the register. // It will invalidate `var m = a[i]; b[i] = m;` but this is not a very interesting case. TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Source (s%d) is still alive after StElemI"), baseSymID); return false; } if (!IsAllowedForMemOpt(instr, false, baseOp, indexOp)) { return false; } SymID srcSymID = GetVarSymID(src1->GetStackSym()); // Prepare the memcopyCandidate entry Loop::MemOpCandidate* previousCandidate = loop->memOpInfo->candidates->Head(); if (!previousCandidate->IsMemCopy()) { return false; } Loop::MemCopyCandidate* memcopyInfo = previousCandidate->AsMemCopy(); // The previous candidate has to have been created by the matching ldElem if ( memcopyInfo->base != Js::Constants::InvalidSymID || GetVarSymID(memcopyInfo->transferSym) != srcSymID ) { TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("No matching LdElem found (s%d)"), baseSymID); return false; } Assert(indexOp->GetStackSym()); SymID inductionSymID = GetVarSymID(indexOp->GetStackSym()); Assert(IsSymIDInductionVariable(inductionSymID, loop)); bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID); if (isIndexPreIncr != memcopyInfo->bIndexAlreadyChanged) { // The index changed between the load and the store TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Index value changed between ldElem and stElem")); return false; } // Consider: Can we remove the count field? memcopyInfo->count++; memcopyInfo->base = baseSymID; return true; } bool GlobOpt::CollectMemOpLdElementI(IR::Instr *instr, Loop *loop) { Assert(instr->m_opcode == Js::OpCode::LdElemI_A); return (!PHASE_OFF(Js::MemCopyPhase, this->func) && CollectMemcopyLdElementI(instr, loop)); } bool GlobOpt::CollectMemOpStElementI(IR::Instr *instr, Loop *loop) { Assert(instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict); Assert(instr->GetSrc1()); return (!PHASE_OFF(Js::MemSetPhase, this->func) && CollectMemsetStElementI(instr, loop)) || (!PHASE_OFF(Js::MemCopyPhase, this->func) && CollectMemcopyStElementI(instr, loop)); } bool GlobOpt::CollectMemOpInfo(IR::Instr *instrBegin, IR::Instr *instr, Value *src1Val, Value *src2Val) { Assert(this->currentBlock->loop); Loop *loop = this->currentBlock->loop; if (!loop->blockList.HasTwo()) { // We support memcopy and memset for loops which have only two blocks. return false; } if (loop->GetLoopFlags().isInterpreted && !loop->GetLoopFlags().memopMinCountReached) { TRACE_MEMOP_VERBOSE(loop, instr, _u("minimum loop count not reached")) loop->doMemOp = false; return false; } Assert(loop->doMemOp); bool isIncr = true, isChangedByOne = false; switch (instr->m_opcode) { case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: if (!CollectMemOpStElementI(instr, loop)) { loop->doMemOp = false; return false; } break; case Js::OpCode::LdElemI_A: if (!CollectMemOpLdElementI(instr, loop)) { loop->doMemOp = false; return false; } break; case Js::OpCode::Decr_A: isIncr = false; case Js::OpCode::Incr_A: isChangedByOne = true; goto MemOpCheckInductionVariable; case Js::OpCode::Sub_I4: case Js::OpCode::Sub_A: isIncr = false; case Js::OpCode::Add_A: case Js::OpCode::Add_I4: { MemOpCheckInductionVariable: StackSym *sym = instr->GetSrc1()->GetStackSym(); if (!sym) { sym = instr->GetSrc2()->GetStackSym(); } SymID inductionSymID = GetVarSymID(sym); if (IsSymIDInductionVariable(inductionSymID, this->currentBlock->loop)) { if (!isChangedByOne) { IR::Opnd *src1, *src2; src1 = instr->GetSrc1(); src2 = instr->GetSrc2(); if (src2->IsRegOpnd()) { Value *val = CurrentBlockData()->FindValue(src2->AsRegOpnd()->m_sym); if (val) { ValueInfo *vi = val->GetValueInfo(); int constValue; if (vi && vi->TryGetIntConstantValue(&constValue)) { if (constValue == 1) { isChangedByOne = true; } } } } else if (src2->IsIntConstOpnd()) { if (src2->AsIntConstOpnd()->GetValue() == 1) { isChangedByOne = true; } } } loop->EnsureMemOpVariablesInitialized(); if (!isChangedByOne) { Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { Js::Constants::InvalidLoopUnrollFactor, 0 }; if (!loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID)) { loop->memOpInfo->inductionVariableChangeInfoMap->Add(inductionSymID, inductionVariableChangeInfo); } else { loop->memOpInfo->inductionVariableChangeInfoMap->Item(inductionSymID, inductionVariableChangeInfo); } } else { if (!loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID)) { Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 1, isIncr }; loop->memOpInfo->inductionVariableChangeInfoMap->Add(inductionSymID, inductionVariableChangeInfo); } else { Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 }; inductionVariableChangeInfo = loop->memOpInfo->inductionVariableChangeInfoMap->Lookup(inductionSymID, inductionVariableChangeInfo); inductionVariableChangeInfo.unroll++; inductionVariableChangeInfo.isIncremental = isIncr; loop->memOpInfo->inductionVariableChangeInfoMap->Item(inductionSymID, inductionVariableChangeInfo); } } break; } // Fallthrough if not an induction variable } default: FOREACH_INSTR_IN_RANGE(chkInstr, instrBegin->m_next, instr) { if (IsInstrInvalidForMemOp(chkInstr, loop, src1Val, src2Val)) { loop->doMemOp = false; return false; } // Make sure this instruction doesn't use the memcopy transfer sym before it is checked by StElemI if (loop->memOpInfo && !loop->memOpInfo->candidates->Empty()) { Loop::MemOpCandidate* prevCandidate = loop->memOpInfo->candidates->Head(); if (prevCandidate->IsMemCopy()) { Loop::MemCopyCandidate* memcopyCandidate = prevCandidate->AsMemCopy(); if (memcopyCandidate->base == Js::Constants::InvalidSymID) { if (chkInstr->HasSymUse(memcopyCandidate->transferSym)) { loop->doMemOp = false; TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, chkInstr, _u("Found illegal use of LdElemI value(s%d)"), GetVarSymID(memcopyCandidate->transferSym)); return false; } } } } } NEXT_INSTR_IN_RANGE; } return true; } bool GlobOpt::IsInstrInvalidForMemOp(IR::Instr *instr, Loop *loop, Value *src1Val, Value *src2Val) { // List of instruction that are valid with memop (ie: instr that gets removed if memop is emitted) if ( this->currentBlock != loop->GetHeadBlock() && !instr->IsLabelInstr() && instr->IsRealInstr() && instr->m_opcode != Js::OpCode::IncrLoopBodyCount && instr->m_opcode != Js::OpCode::StLoopBodyCount && instr->m_opcode != Js::OpCode::Ld_A && instr->m_opcode != Js::OpCode::Ld_I4 && !(instr->IsBranchInstr() && instr->AsBranchInstr()->IsUnconditional()) ) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Instruction not accepted for memop")); return true; } // Check prev instr because it could have been added by an optimization and we won't see it here. if (OpCodeAttr::FastFldInstr(instr->m_opcode) || (instr->m_prev && OpCodeAttr::FastFldInstr(instr->m_prev->m_opcode))) { // Refuse any operations interacting with Fields TRACE_MEMOP_VERBOSE(loop, instr, _u("Field interaction detected")); return true; } if (Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::ElementSlot) { // Refuse any operations interacting with slots TRACE_MEMOP_VERBOSE(loop, instr, _u("Slot interaction detected")); return true; } if (this->MayNeedBailOnImplicitCall(instr, src1Val, src2Val)) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Implicit call bailout detected")); return true; } return false; } void GlobOpt::TryReplaceLdLen(IR::Instr *& instr) { // Change LdFld on arrays, strings, and 'arguments' to LdLen when we're accessing the .length field if ((instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->m_opcode == Js::OpCode::ProfiledLdFld) || instr->m_opcode == Js::OpCode::LdFld || instr->m_opcode == Js::OpCode::ScopedLdFld) { IR::SymOpnd * opnd = instr->GetSrc1()->AsSymOpnd(); Sym *sym = opnd->m_sym; if (sym->IsPropertySym()) { PropertySym *originalPropertySym = sym->AsPropertySym(); // only on .length if (this->lengthEquivBv != nullptr && this->lengthEquivBv->Test(originalPropertySym->m_id)) { IR::RegOpnd* newopnd = IR::RegOpnd::New(originalPropertySym->m_stackSym, IRType::TyVar, instr->m_func); ValueInfo *const objectValueInfo = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym)->GetValueInfo(); // Only for things we'd emit a fast path for if ( objectValueInfo->IsLikelyAnyArray() || objectValueInfo->HasHadStringTag() || objectValueInfo->IsLikelyString() || newopnd->IsArgumentsObject() || (CurrentBlockData()->argObjSyms && CurrentBlockData()->IsArgumentsOpnd(newopnd)) ) { // We need to properly transfer over the information from the old operand, which is // a SymOpnd, to the new one, which is a RegOpnd. Unfortunately, the types mean the // normal copy methods won't work here, so we're going to directly copy data. newopnd->SetIsJITOptimizedReg(opnd->GetIsJITOptimizedReg()); newopnd->SetValueType(objectValueInfo->Type()); newopnd->SetIsDead(opnd->GetIsDead()); // Now that we have the operand we need, we can go ahead and make the new instr. IR::Instr *newinstr = IR::Instr::New(Js::OpCode::LdLen_A, instr->m_func); instr->TransferTo(newinstr); newinstr->UnlinkSrc1(); newinstr->SetSrc1(newopnd); instr->InsertAfter(newinstr); instr->Remove(); instr = newinstr; } } } } } IR::Instr * GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved) { Assert(instr->m_func->IsTopFunc() || instr->m_func->isGetterSetter || instr->m_func->callSiteIdInParentFunc != UINT16_MAX); IR::Opnd *src1, *src2; Value *src1Val = nullptr, *src2Val = nullptr, *dstVal = nullptr; Value *src1IndirIndexVal = nullptr, *dstIndirIndexVal = nullptr; IR::Instr *instrPrev = instr->m_prev; IR::Instr *instrNext = instr->m_next; if (instr->IsLabelInstr() && this->func->HasTry() && this->func->DoOptimizeTry()) { this->currentRegion = instr->AsLabelInstr()->GetRegion(); Assert(this->currentRegion); } if(PrepareForIgnoringIntOverflow(instr)) { if(!IsLoopPrePass()) { *isInstrRemoved = true; currentBlock->RemoveInstr(instr); } return instrNext; } if (!instr->IsRealInstr() || instr->IsByteCodeUsesInstr() || instr->m_opcode == Js::OpCode::Conv_Bool) { return instrNext; } if (instr->m_opcode == Js::OpCode::Yield) { // TODO[generators][ianhall]: Can this and the FillBailOutInfo call below be moved to after Src1 and Src2 so that Yield can be optimized right up to the actual yield? CurrentBlockData()->KillStateForGeneratorYield(); } // Change LdFld on arrays, strings, and 'arguments' to LdLen when we're accessing the .length field this->TryReplaceLdLen(instr); // Consider: Do we ever get post-op bailout here, and if so is the FillBailOutInfo call in the right place? if (instr->HasBailOutInfo() && !this->IsLoopPrePass()) { this->FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo()); } this->instrCountSinceLastCleanUp++; instr = this->PreOptPeep(instr); this->OptArguments(instr); //StackArguments Optimization - We bail out if the index is out of range of actuals. if ((instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) && instr->DoStackArgsOpt(this->func) && !this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, IR::BailOnStackArgsOutOfActualsRange); } #if DBG PropertySym *propertySymUseBefore = nullptr; Assert(this->byteCodeUses == nullptr); this->byteCodeUsesBeforeOpt->ClearAll(); GlobOpt::TrackByteCodeSymUsed(instr, this->byteCodeUsesBeforeOpt, &propertySymUseBefore); Assert(noImplicitCallUsesToInsert->Count() == 0); #endif this->ignoredIntOverflowForCurrentInstr = false; this->ignoredNegativeZeroForCurrentInstr = false; src1 = instr->GetSrc1(); src2 = instr->GetSrc2(); if (src1) { src1Val = this->OptSrc(src1, &instr, &src1IndirIndexVal); instr = this->SetTypeCheckBailOut(instr->GetSrc1(), instr, nullptr); if (src2) { src2Val = this->OptSrc(src2, &instr); } } if(instr->GetDst() && instr->GetDst()->IsIndirOpnd()) { this->OptSrc(instr->GetDst(), &instr, &dstIndirIndexVal); } MarkArgumentsUsedForBranch(instr); CSEOptimize(this->currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal); OptimizeChecks(instr); OptArraySrc(&instr); OptNewScObject(&instr, src1Val); instr = this->OptPeep(instr, src1Val, src2Val); if (instr->m_opcode == Js::OpCode::Nop || (instr->m_opcode == Js::OpCode::CheckThis && instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isSafeThis)) { instrNext = instr->m_next; InsertNoImplicitCallUses(instr); if (this->byteCodeUses) { this->InsertByteCodeUses(instr); } *isInstrRemoved = true; this->currentBlock->RemoveInstr(instr); return instrNext; } else if (instr->m_opcode == Js::OpCode::GetNewScObject && !this->IsLoopPrePass() && src1Val->GetValueInfo()->IsPrimitive()) { // Constructor returned (src1) a primitive value, so fold this into "dst = Ld_A src2", where src2 is the new object that // was passed into the constructor as its 'this' parameter instr->FreeSrc1(); instr->SetSrc1(instr->UnlinkSrc2()); instr->m_opcode = Js::OpCode::Ld_A; src1Val = src2Val; src2Val = nullptr; } else if ((instr->m_opcode == Js::OpCode::TryCatch && this->func->DoOptimizeTry()) || (instr->m_opcode == Js::OpCode::TryFinally && this->func->DoOptimizeTry())) { ProcessTryHandler(instr); } else if (instr->m_opcode == Js::OpCode::BrOnException || instr->m_opcode == Js::OpCode::BrOnNoException) { if (this->ProcessExceptionHandlingEdges(instr)) { *isInstrRemoved = true; return instrNext; } } bool isAlreadyTypeSpecialized = false; if (!IsLoopPrePass() && instr->HasBailOutInfo()) { if (instr->GetBailOutKind() == IR::BailOutExpectingInteger) { isAlreadyTypeSpecialized = TypeSpecializeBailoutExpectedInteger(instr, src1Val, &dstVal); } else if (instr->GetBailOutKind() == IR::BailOutExpectingString) { if (instr->GetSrc1()->IsRegOpnd()) { if (!src1Val || !src1Val->GetValueInfo()->IsLikelyString()) { // Disable SwitchOpt if the source is definitely not a string - This may be realized only in Globopt Assert(IsSwitchOptEnabled()); throw Js::RejitException(RejitReason::DisableSwitchOptExpectingString); } } } } bool forceInvariantHoisting = false; const bool ignoreIntOverflowInRangeForInstr = instr->ignoreIntOverflowInRange; // Save it since the instr can change if (!isAlreadyTypeSpecialized) { bool redoTypeSpec; instr = this->TypeSpecialization(instr, &src1Val, &src2Val, &dstVal, &redoTypeSpec, &forceInvariantHoisting); if(redoTypeSpec && instr->m_opcode != Js::OpCode::Nop) { forceInvariantHoisting = false; instr = this->TypeSpecialization(instr, &src1Val, &src2Val, &dstVal, &redoTypeSpec, &forceInvariantHoisting); Assert(!redoTypeSpec); } if (instr->m_opcode == Js::OpCode::Nop) { InsertNoImplicitCallUses(instr); if (this->byteCodeUses) { this->InsertByteCodeUses(instr); } instrNext = instr->m_next; *isInstrRemoved = true; this->currentBlock->RemoveInstr(instr); return instrNext; } } if (ignoreIntOverflowInRangeForInstr) { VerifyIntSpecForIgnoringIntOverflow(instr); } // Track calls after any pre-op bailouts have been inserted before the call, because they will need to restore out params. this->TrackCalls(instr); if (instr->GetSrc1()) { this->UpdateObjPtrValueType(instr->GetSrc1(), instr); } IR::Opnd *dst = instr->GetDst(); if (dst) { // Copy prop dst uses and mark live/available type syms before tracking kills. CopyPropDstUses(dst, instr, src1Val); } // Track mark temp object before we process the dst so we can generate pre-op bailout instr = this->TrackMarkTempObject(instrPrev->m_next, instr); bool removed = OptTagChecks(instr); if (removed) { *isInstrRemoved = true; return instrNext; } dstVal = this->OptDst(&instr, dstVal, src1Val, src2Val, dstIndirIndexVal, src1IndirIndexVal); dst = instr->GetDst(); instrNext = instr->m_next; if (dst) { if (this->func->HasTry() && this->func->DoOptimizeTry()) { this->InsertToVarAtDefInTryRegion(instr, dst); } instr = this->SetTypeCheckBailOut(dst, instr, nullptr); this->UpdateObjPtrValueType(dst, instr); } BVSparse<JitArenaAllocator> instrByteCodeStackSymUsedAfter(this->alloc); PropertySym *propertySymUseAfter = nullptr; if (this->byteCodeUses != nullptr) { GlobOpt::TrackByteCodeSymUsed(instr, &instrByteCodeStackSymUsedAfter, &propertySymUseAfter); } #if DBG else { GlobOpt::TrackByteCodeSymUsed(instr, &instrByteCodeStackSymUsedAfter, &propertySymUseAfter); instrByteCodeStackSymUsedAfter.Equal(this->byteCodeUsesBeforeOpt); Assert(propertySymUseAfter == propertySymUseBefore); } #endif bool isHoisted = false; if (this->currentBlock->loop && !this->IsLoopPrePass()) { isHoisted = this->TryHoistInvariant(instr, this->currentBlock, dstVal, src1Val, src2Val, true, false, forceInvariantHoisting); } src1 = instr->GetSrc1(); if (!this->IsLoopPrePass() && src1) { // instr const, nonConst => canonicalize by swapping operands // This simplifies lowering. (somewhat machine dependent) // Note that because of Var overflows, src1 may not have been constant prop'd to an IntConst this->PreLowerCanonicalize(instr, &src1Val, &src2Val); } if (!PHASE_OFF(Js::MemOpPhase, this->func) && !isHoisted && !(instr->IsJitProfilingInstr()) && this->currentBlock->loop && !IsLoopPrePass() && !func->IsJitInDebugMode() && (func->HasProfileInfo() && !func->GetReadOnlyProfileInfo()->IsMemOpDisabled()) && this->currentBlock->loop->doMemOp) { CollectMemOpInfo(instrPrev, instr, src1Val, src2Val); } InsertNoImplicitCallUses(instr); if (this->byteCodeUses != nullptr) { // Optimization removed some uses from the instruction. // Need to insert fake uses so we can get the correct live register to restore in bailout. this->byteCodeUses->Minus(&instrByteCodeStackSymUsedAfter); if (this->propertySymUse == propertySymUseAfter) { this->propertySymUse = nullptr; } this->InsertByteCodeUses(instr); } if (!this->IsLoopPrePass() && !isHoisted && this->IsImplicitCallBailOutCurrentlyNeeded(instr, src1Val, src2Val)) { IR::BailOutKind kind = IR::BailOutOnImplicitCalls; if(instr->HasBailOutInfo()) { Assert(instr->GetBailOutInfo()->bailOutOffset == instr->GetByteCodeOffset()); const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if((bailOutKind & ~IR::BailOutKindBits) != IR::BailOutOnImplicitCallsPreOp) { Assert(!(bailOutKind & ~IR::BailOutKindBits)); instr->SetBailOutKind(bailOutKind + IR::BailOutOnImplicitCallsPreOp); } } else if (instr->forcePreOpBailOutIfNeeded || this->isRecursiveCallOnLandingPad) { // We can't have a byte code reg slot as dst to generate a // pre-op implicit call after we have processed the dst. // Consider: This might miss an opportunity to use a copy prop sym to restore // some other byte code reg if the dst is that copy prop that we already killed. Assert(!instr->GetDst() || !instr->GetDst()->IsRegOpnd() || instr->GetDst()->AsRegOpnd()->GetIsJITOptimizedReg() || !instr->GetDst()->AsRegOpnd()->m_sym->HasByteCodeRegSlot()); this->GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp); } else { // Capture value of the bailout after the operation is done. this->GenerateBailAfterOperation(&instr, kind); } } if (CurrentBlockData()->capturedValuesCandidate && !this->IsLoopPrePass()) { this->CommitCapturedValuesCandidate(); } return instrNext; } bool GlobOpt::OptTagChecks(IR::Instr *instr) { if (PHASE_OFF(Js::OptTagChecksPhase, this->func) || !this->DoTagChecks()) { return false; } StackSym *stackSym = nullptr; IR::SymOpnd *symOpnd = nullptr; IR::RegOpnd *regOpnd = nullptr; switch(instr->m_opcode) { case Js::OpCode::LdFld: case Js::OpCode::LdMethodFld: case Js::OpCode::CheckFixedFld: case Js::OpCode::CheckPropertyGuardAndLoadType: symOpnd = instr->GetSrc1()->AsSymOpnd(); stackSym = symOpnd->m_sym->AsPropertySym()->m_stackSym; break; case Js::OpCode::BailOnNotObject: case Js::OpCode::BailOnNotArray: if (instr->GetSrc1()->IsRegOpnd()) { regOpnd = instr->GetSrc1()->AsRegOpnd(); stackSym = regOpnd->m_sym; } break; case Js::OpCode::StFld: symOpnd = instr->GetDst()->AsSymOpnd(); stackSym = symOpnd->m_sym->AsPropertySym()->m_stackSym; break; } if (stackSym) { Value *value = CurrentBlockData()->FindValue(stackSym); if (value) { ValueInfo *valInfo = value->GetValueInfo(); if (valInfo->GetSymStore() && valInfo->GetSymStore()->IsStackSym() && valInfo->GetSymStore()->AsStackSym()->IsFromByteCodeConstantTable()) { return false; } ValueType valueType = value->GetValueInfo()->Type(); if (instr->m_opcode == Js::OpCode::BailOnNotObject) { if (valueType.CanBeTaggedValue()) { // We're not adding new information to the value other than changing the value type. Preserve any existing // information and just change the value type. ChangeValueType(nullptr, value, valueType.SetCanBeTaggedValue(false), true /*preserveSubClassInfo*/); return false; } if (this->byteCodeUses) { this->InsertByteCodeUses(instr); } this->currentBlock->RemoveInstr(instr); return true; } if (valueType.CanBeTaggedValue() && !valueType.HasBeenNumber() && !this->IsLoopPrePass()) { ValueType newValueType = valueType.SetCanBeTaggedValue(false); // Split out the tag check as a separate instruction. IR::Instr *bailOutInstr; bailOutInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotObject, IR::BailOutOnTaggedValue, instr, instr->m_func); if (!this->IsLoopPrePass()) { FillBailOutInfo(this->currentBlock, bailOutInstr->GetBailOutInfo()); } IR::RegOpnd *srcOpnd = regOpnd; if (!srcOpnd) { srcOpnd = IR::RegOpnd::New(stackSym, stackSym->GetType(), instr->m_func); AnalysisAssert(symOpnd); if (symOpnd->GetIsJITOptimizedReg()) { srcOpnd->SetIsJITOptimizedReg(true); } } bailOutInstr->SetSrc1(srcOpnd); bailOutInstr->GetSrc1()->SetValueType(valueType); instr->InsertBefore(bailOutInstr); if (this->currentBlock->loop) { // Try hoisting the BailOnNotObject instr. // But since this isn't the current instr being optimized, we need to play tricks with // the byteCodeUse fields... TrackByteCodeUsesForInstrAddedInOptInstr(bailOutInstr, [&]() { TryHoistInvariant(bailOutInstr, this->currentBlock, nullptr, value, nullptr, true, false, false, IR::BailOutOnTaggedValue); }); } if (symOpnd) { symOpnd->SetPropertyOwnerValueType(newValueType); } else { regOpnd->SetValueType(newValueType); } ChangeValueType(nullptr, value, newValueType, false); } } } return false; } bool GlobOpt::TypeSpecializeBailoutExpectedInteger(IR::Instr* instr, Value* src1Val, Value** dstVal) { bool isAlreadyTypeSpecialized = false; if(instr->GetSrc1()->IsRegOpnd()) { if (!src1Val || !src1Val->GetValueInfo()->IsLikelyInt() || instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) { Assert(IsSwitchOptEnabledForIntTypeSpec()); throw Js::RejitException(RejitReason::DisableSwitchOptExpectingInteger); } // Attach the BailOutExpectingInteger to FromVar and Remove the bail out info on the Ld_A (Begin Switch) instr. this->ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, TyInt32, IR::BailOutExpectingInteger, false, instr); //TypeSpecialize the dst of Ld_A TypeSpecializeIntDst(instr, instr->m_opcode, src1Val, src1Val, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, dstVal); isAlreadyTypeSpecialized = true; } instr->ClearBailOutInfo(); return isAlreadyTypeSpecialized; } Value* GlobOpt::OptDst( IR::Instr ** pInstr, Value *dstVal, Value *src1Val, Value *src2Val, Value *dstIndirIndexVal, Value *src1IndirIndexVal) { IR::Instr *&instr = *pInstr; IR::Opnd *opnd = instr->GetDst(); if (opnd) { if (opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd()) { this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd()); } else if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::InitComputedProperty) { this->KillObjectHeaderInlinedTypeSyms(this->currentBlock, false); } if (opnd->IsIndirOpnd() && !this->IsLoopPrePass()) { IR::RegOpnd *baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd(); const ValueType baseValueType(baseOpnd->GetValueType()); if (( baseValueType.IsLikelyNativeArray() || #ifdef _M_IX86 ( !AutoSystemInfo::Data.SSE2Available() && baseValueType.IsLikelyObject() && ( baseValueType.GetObjectType() == ObjectType::Float32Array || baseValueType.GetObjectType() == ObjectType::Float64Array ) ) #else false #endif ) && instr->GetSrc1()->IsVar()) { if(instr->m_opcode == Js::OpCode::StElemC) { // StElemC has different code that handles native array conversion or missing value stores. Add a bailout // for those cases. Assert(baseValueType.IsLikelyNativeArray()); Assert(!instr->HasBailOutInfo()); GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly); } else if(instr->HasBailOutInfo()) { // The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast // path. Note that the removed bailouts should not be necessary for correctness. Bailout on native array // conversion will be handled automatically as normal. IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if(bailOutKind & IR::BailOutOnArrayAccessHelperCall) { bailOutKind -= IR::BailOutOnArrayAccessHelperCall; } if(bailOutKind == IR::BailOutOnImplicitCallsPreOp) { bailOutKind -= IR::BailOutOnImplicitCallsPreOp; } if(bailOutKind) { instr->SetBailOutKind(bailOutKind); } else { instr->ClearBailOutInfo(); } } } } } this->ProcessKills(instr); if (opnd) { if (dstVal == nullptr) { dstVal = ValueNumberDst(pInstr, src1Val, src2Val); } if (this->IsLoopPrePass()) { // Keep track of symbols defined in the loop. if (opnd->IsRegOpnd()) { StackSym *symDst = opnd->AsRegOpnd()->m_sym; rootLoopPrePass->symsDefInLoop->Set(symDst->m_id); } } else if (dstVal) { opnd->SetValueType(dstVal->GetValueInfo()->Type()); if(currentBlock->loop && !IsLoopPrePass() && (instr->m_opcode == Js::OpCode::Ld_A || instr->m_opcode == Js::OpCode::Ld_I4) && instr->GetSrc1()->IsRegOpnd() && !func->IsJitInDebugMode() && func->DoGlobOptsForGeneratorFunc()) { // Look for the following patterns: // // Pattern 1: // s1[liveOnBackEdge] = s3[dead] // // Pattern 2: // s3 = operation(s1[liveOnBackEdge], s2) // s1[liveOnBackEdge] = s3 // // In both patterns, s1 and s3 have the same value by the end. Prefer to use s1 as the sym store instead of s3 // since s1 is live on back-edge, as otherwise, their lifetimes overlap, requiring two registers to hold the // value instead of one. do { IR::RegOpnd *const src = instr->GetSrc1()->AsRegOpnd(); StackSym *srcVarSym = src->m_sym; if(srcVarSym->IsTypeSpec()) { srcVarSym = srcVarSym->GetVarEquivSym(nullptr); Assert(srcVarSym); } if(dstVal->GetValueInfo()->GetSymStore() != srcVarSym) { break; } IR::RegOpnd *const dst = opnd->AsRegOpnd(); StackSym *dstVarSym = dst->m_sym; if(dstVarSym->IsTypeSpec()) { dstVarSym = dstVarSym->GetVarEquivSym(nullptr); Assert(dstVarSym); } if(!currentBlock->loop->regAlloc.liveOnBackEdgeSyms->Test(dstVarSym->m_id)) { break; } Value *const srcValue = CurrentBlockData()->FindValue(srcVarSym); if(srcValue->GetValueNumber() != dstVal->GetValueNumber()) { break; } if(!src->GetIsDead()) { IR::Instr *const prevInstr = instr->GetPrevRealInstrOrLabel(); IR::Opnd *const prevDst = prevInstr->GetDst(); if(!prevDst || !src->IsEqualInternal(prevDst) || !( (prevInstr->GetSrc1() && dst->IsEqual(prevInstr->GetSrc1())) || (prevInstr->GetSrc2() && dst->IsEqual(prevInstr->GetSrc2())) )) { break; } } this->SetSymStoreDirect(dstVal->GetValueInfo(), dstVarSym); } while(false); } } this->ValueNumberObjectType(opnd, instr); } this->CSEAddInstr(this->currentBlock, *pInstr, dstVal, src1Val, src2Val, dstIndirIndexVal, src1IndirIndexVal); return dstVal; } void GlobOpt::CopyPropDstUses(IR::Opnd *opnd, IR::Instr *instr, Value *src1Val) { if (opnd->IsSymOpnd()) { IR::SymOpnd *symOpnd = opnd->AsSymOpnd(); if (symOpnd->m_sym->IsPropertySym()) { PropertySym * originalPropertySym = symOpnd->m_sym->AsPropertySym(); Value *const objectValue = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym); symOpnd->SetPropertyOwnerValueType(objectValue ? objectValue->GetValueInfo()->Type() : ValueType::Uninitialized); this->FieldHoistOptDst(instr, originalPropertySym, src1Val); PropertySym * sym = this->CopyPropPropertySymObj(symOpnd, instr); if (sym != originalPropertySym && !this->IsLoopPrePass()) { // Consider: This doesn't detect hoistability of a property sym after object pointer copy prop // on loop prepass. But if it so happened that the property sym is hoisted, we might as well do so. this->FieldHoistOptDst(instr, sym, src1Val); } } } } void GlobOpt::SetLoopFieldInitialValue(Loop *loop, IR::Instr *instr, PropertySym *propertySym, PropertySym *originalPropertySym) { Value *initialValue = nullptr; StackSym *symStore; if (loop->allFieldsKilled || loop->fieldKilled->Test(originalPropertySym->m_id)) { return; } Assert(!loop->fieldKilled->Test(propertySym->m_id)); // Value already exists if (CurrentBlockData()->FindValue(propertySym)) { return; } // If this initial value was already added, we would find in the current value table. Assert(!loop->initialValueFieldMap.TryGetValue(propertySym, &initialValue)); // If propertySym is live in landingPad, we don't need an initial value. if (loop->landingPad->globOptData.liveFields->Test(propertySym->m_id)) { return; } Value *landingPadObjPtrVal, *currentObjPtrVal; landingPadObjPtrVal = loop->landingPad->globOptData.FindValue(propertySym->m_stackSym); currentObjPtrVal = CurrentBlockData()->FindValue(propertySym->m_stackSym); if (!currentObjPtrVal || !landingPadObjPtrVal || currentObjPtrVal->GetValueNumber() != landingPadObjPtrVal->GetValueNumber()) { // objPtr has a different value in the landing pad. return; } // The opnd's value type has not yet been initialized. Since the property sym doesn't have a value, it effectively has an // Uninitialized value type. Use the profiled value type from the instruction. const ValueType profiledValueType = instr->IsProfiledInstr() ? instr->AsProfiledInstr()->u.FldInfo().valueType : ValueType::Uninitialized; Assert(!profiledValueType.IsDefinite()); // Hence the values created here don't need to be tracked for kills initialValue = this->NewGenericValue(profiledValueType, propertySym); symStore = StackSym::New(this->func); initialValue->GetValueInfo()->SetSymStore(symStore); loop->initialValueFieldMap.Add(propertySym, initialValue->Copy(this->alloc, initialValue->GetValueNumber())); // Copy the initial value into the landing pad, but without a symStore Value *landingPadInitialValue = Value::New(this->alloc, initialValue->GetValueNumber(), ValueInfo::New(this->alloc, initialValue->GetValueInfo()->Type())); loop->landingPad->globOptData.SetValue(landingPadInitialValue, propertySym); loop->landingPad->globOptData.liveFields->Set(propertySym->m_id); #if DBG_DUMP if (PHASE_TRACE(Js::FieldPREPhase, this->func)) { Output::Print(_u("** TRACE: Field PRE initial value for loop head #%d. Val:%d symStore:"), loop->GetHeadBlock()->GetBlockNum(), initialValue->GetValueNumber()); symStore->Dump(); Output::Print(_u("\n Instr: ")); instr->Dump(); } #endif // Add initial value to all the previous blocks in the loop. FOREACH_BLOCK_BACKWARD_IN_RANGE(block, this->currentBlock->GetPrev(), loop->GetHeadBlock()) { if (block->GetDataUseCount() == 0) { // All successor blocks have been processed, no point in adding the value. continue; } Value *newValue = initialValue->Copy(this->alloc, initialValue->GetValueNumber()); block->globOptData.SetValue(newValue, propertySym); block->globOptData.liveFields->Set(propertySym->m_id); block->globOptData.SetValue(newValue, symStore); block->globOptData.liveVarSyms->Set(symStore->m_id); } NEXT_BLOCK_BACKWARD_IN_RANGE; CurrentBlockData()->SetValue(initialValue, symStore); CurrentBlockData()->liveVarSyms->Set(symStore->m_id); CurrentBlockData()->liveFields->Set(propertySym->m_id); } // Examine src, apply copy prop and value number it Value* GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, IR::IndirOpnd *parentIndirOpnd) { IR::Instr * &instr = *pInstr; Assert(!indirIndexValRef || !*indirIndexValRef); Assert( parentIndirOpnd ? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd() : opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd()); Sym *sym; Value *val; PropertySym *originalPropertySym = nullptr; switch(opnd->GetKind()) { case IR::OpndKindIntConst: val = this->GetIntConstantValue(opnd->AsIntConstOpnd()->AsInt32(), instr); opnd->SetValueType(val->GetValueInfo()->Type()); return val; case IR::OpndKindInt64Const: val = this->GetIntConstantValue(opnd->AsInt64ConstOpnd()->GetValue(), instr); opnd->SetValueType(val->GetValueInfo()->Type()); return val; case IR::OpndKindFloatConst: { const FloatConstType floatValue = opnd->AsFloatConstOpnd()->m_value; int32 int32Value; if(Js::JavascriptNumber::TryGetInt32Value(floatValue, &int32Value)) { val = GetIntConstantValue(int32Value, instr); } else { val = NewFloatConstantValue(floatValue); } opnd->SetValueType(val->GetValueInfo()->Type()); return val; } case IR::OpndKindAddr: { IR::AddrOpnd *addrOpnd = opnd->AsAddrOpnd(); if (addrOpnd->m_isFunction) { AssertMsg(!PHASE_OFF(Js::FixedMethodsPhase, instr->m_func), "Fixed function address operand with fixed method calls phase disabled?"); val = NewFixedFunctionValue((Js::JavascriptFunction *)addrOpnd->m_address, addrOpnd); opnd->SetValueType(val->GetValueInfo()->Type()); return val; } else if (addrOpnd->IsVar() && Js::TaggedInt::Is(addrOpnd->m_address)) { val = this->GetIntConstantValue(Js::TaggedInt::ToInt32(addrOpnd->m_address), instr); opnd->SetValueType(val->GetValueInfo()->Type()); return val; } val = this->GetVarConstantValue(addrOpnd); return val; } case IR::OpndKindSym: { // Clear the opnd's value type up-front, so that this code cannot accidentally use the value type set from a previous // OptSrc on the same instruction (for instance, from an earlier loop prepass). The value type will be set from the // value if available, before returning from this function. opnd->SetValueType(ValueType::Uninitialized); sym = opnd->AsSymOpnd()->m_sym; // Don't create a new value for ArgSlots and don't copy prop them away. if (sym->IsStackSym() && sym->AsStackSym()->IsArgSlotSym()) { return nullptr; } // Unless we have profile info, don't create a new value for ArgSlots and don't copy prop them away. if (sym->IsStackSym() && sym->AsStackSym()->IsParamSlotSym()) { if (!instr->m_func->IsLoopBody() && instr->m_func->HasProfileInfo()) { // Skip "this" pointer. int paramSlotNum = sym->AsStackSym()->GetParamSlotNum() - 2; if (paramSlotNum >= 0) { const auto parameterType = instr->m_func->GetReadOnlyProfileInfo()->GetParameterInfo(static_cast<Js::ArgSlot>(paramSlotNum)); val = NewGenericValue(parameterType); opnd->SetValueType(val->GetValueInfo()->Type()); return val; } } return nullptr; } if (!sym->IsPropertySym()) { break; } originalPropertySym = sym->AsPropertySym(); // Dont give a vale to 'arguments' property sym to prevent field copy prop of 'arguments' if (originalPropertySym->AsPropertySym()->m_propertyId == Js::PropertyIds::arguments && originalPropertySym->AsPropertySym()->m_fieldKind == PropertyKindData) { return nullptr; } Value *const objectValue = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym); opnd->AsSymOpnd()->SetPropertyOwnerValueType( objectValue ? objectValue->GetValueInfo()->Type() : ValueType::Uninitialized); if (!FieldHoistOptSrc(opnd->AsSymOpnd(), instr, originalPropertySym)) { sym = this->CopyPropPropertySymObj(opnd->AsSymOpnd(), instr); // Consider: This doesn't detect hoistability of a property sym after object pointer copy prop // on loop prepass. But if it so happened that the property sym is hoisted, we might as well do so. if (originalPropertySym == sym || this->IsLoopPrePass() || !FieldHoistOptSrc(opnd->AsSymOpnd(), instr, sym->AsPropertySym())) { if (!DoFieldCopyProp()) { if (opnd->AsSymOpnd()->IsPropertySymOpnd()) { this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd()); } return nullptr; } switch (instr->m_opcode) { // These need the symbolic reference to the field, don't copy prop the value of the field case Js::OpCode::DeleteFld: case Js::OpCode::DeleteRootFld: case Js::OpCode::DeleteFldStrict: case Js::OpCode::DeleteRootFldStrict: case Js::OpCode::ScopedDeleteFld: case Js::OpCode::ScopedDeleteFldStrict: case Js::OpCode::LdMethodFromFlags: case Js::OpCode::BrOnNoProperty: case Js::OpCode::BrOnHasProperty: case Js::OpCode::LdMethodFldPolyInlineMiss: case Js::OpCode::StSlotChkUndecl: return nullptr; }; if (instr->CallsGetter()) { return nullptr; } if (this->IsLoopPrePass() && this->DoFieldPRE(this->rootLoopPrePass)) { if (!this->prePassLoop->allFieldsKilled && !this->prePassLoop->fieldKilled->Test(sym->m_id)) { this->SetLoopFieldInitialValue(this->rootLoopPrePass, instr, sym->AsPropertySym(), originalPropertySym); } if (this->IsPREInstrCandidateLoad(instr->m_opcode)) { // Foreach property sym, remember the first instruction that loads it. // Can this be done in one call? if (!this->prePassInstrMap->ContainsKey(sym->m_id)) { this->prePassInstrMap->AddNew(sym->m_id, instr); } } } break; } } // We field hoisted, we can continue as a reg. opnd = instr->GetSrc1(); } case IR::OpndKindReg: // Clear the opnd's value type up-front, so that this code cannot accidentally use the value type set from a previous // OptSrc on the same instruction (for instance, from an earlier loop prepass). The value type will be set from the // value if available, before returning from this function. opnd->SetValueType(ValueType::Uninitialized); sym = opnd->AsRegOpnd()->m_sym; CurrentBlockData()->MarkTempLastUse(instr, opnd->AsRegOpnd()); if (sym->AsStackSym()->IsTypeSpec()) { sym = sym->AsStackSym()->GetVarEquivSym(this->func); } break; case IR::OpndKindIndir: this->OptimizeIndirUses(opnd->AsIndirOpnd(), &instr, indirIndexValRef); return nullptr; default: return nullptr; } val = CurrentBlockData()->FindValue(sym); if (val) { Assert(CurrentBlockData()->IsLive(sym) || (sym->IsPropertySym())); if (instr) { opnd = this->CopyProp(opnd, instr, val, parentIndirOpnd); } // Check if we freed the operand. if (opnd == nullptr) { return nullptr; } // In a loop prepass, determine stack syms that are used before they are defined in the root loop for which the prepass // is being done. This information is used to do type specialization conversions in the landing pad where appropriate. if(IsLoopPrePass() && sym->IsStackSym() && !rootLoopPrePass->symsUsedBeforeDefined->Test(sym->m_id) && rootLoopPrePass->landingPad->globOptData.IsLive(sym) && !isAsmJSFunc) // no typespec in asmjs and hence skipping this { Value *const landingPadValue = rootLoopPrePass->landingPad->globOptData.FindValue(sym); if(landingPadValue && val->GetValueNumber() == landingPadValue->GetValueNumber()) { rootLoopPrePass->symsUsedBeforeDefined->Set(sym->m_id); ValueInfo *landingPadValueInfo = landingPadValue->GetValueInfo(); if(landingPadValueInfo->IsLikelyNumber()) { rootLoopPrePass->likelyNumberSymsUsedBeforeDefined->Set(sym->m_id); if(DoAggressiveIntTypeSpec() ? landingPadValueInfo->IsLikelyInt() : landingPadValueInfo->IsInt()) { // Can only force int conversions in the landing pad based on likely-int values if aggressive int type // specialization is enabled. rootLoopPrePass->likelyIntSymsUsedBeforeDefined->Set(sym->m_id); } } #ifdef ENABLE_SIMDJS // SIMD_JS // For uses before defs, we set likelySimd128*SymsUsedBeforeDefined bits for syms that have landing pad value info that allow type-spec to happen in the loop body. // The BV will be added to loop header if the backedge has a live matching type-spec value. We then compensate in the loop header to unbox the value. // This allows type-spec in the landing pad instead of boxing/unboxing on each iteration. if (Js::IsSimd128Opcode(instr->m_opcode)) { // Simd ops are strongly typed. We type-spec only if the type is likely/Definitely the expected type or if we have object which can come from merging different Simd types. // Simd value must be initialized properly on all paths before the loop entry. Cannot be merged with Undefined/Null. ThreadContext::SimdFuncSignature funcSignature; instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, funcSignature); Assert(funcSignature.valid); ValueType expectedType = funcSignature.args[opnd == instr->GetSrc1() ? 0 : 1]; if (expectedType.IsSimd128Float32x4()) { if ( (landingPadValueInfo->IsLikelySimd128Float32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && !landingPadValueInfo->HasBeenUndefined() && !landingPadValueInfo->HasBeenNull() ) { rootLoopPrePass->likelySimd128F4SymsUsedBeforeDefined->Set(sym->m_id); } } else if (expectedType.IsSimd128Int32x4()) { if ( (landingPadValueInfo->IsLikelySimd128Int32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && !landingPadValueInfo->HasBeenUndefined() && !landingPadValueInfo->HasBeenNull() ) { rootLoopPrePass->likelySimd128I4SymsUsedBeforeDefined->Set(sym->m_id); } } } else if (instr->m_opcode == Js::OpCode::ExtendArg_A && opnd == instr->GetSrc1() && instr->GetDst()->GetValueType().IsSimd128()) { // Extended_Args for Simd ops are annotated with the expected type by the inliner. Use this info to find out if type-spec is supposed to happen. ValueType expectedType = instr->GetDst()->GetValueType(); if ((landingPadValueInfo->IsLikelySimd128Float32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && expectedType.IsSimd128Float32x4()) { rootLoopPrePass->likelySimd128F4SymsUsedBeforeDefined->Set(sym->m_id); } else if ((landingPadValueInfo->IsLikelySimd128Int32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && expectedType.IsSimd128Int32x4()) { rootLoopPrePass->likelySimd128I4SymsUsedBeforeDefined->Set(sym->m_id); } } #endif } } } else if ((instr->TransfersSrcValue() || OpCodeAttr::CanCSE(instr->m_opcode)) && (opnd == instr->GetSrc1() || opnd == instr->GetSrc2())) { if (sym->IsPropertySym()) { val = this->CreateFieldSrcValue(sym->AsPropertySym(), originalPropertySym, &opnd, instr); } else { val = this->NewGenericValue(ValueType::Uninitialized, opnd); } } if (opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd()) { TryOptimizeInstrWithFixedDataProperty(&instr); this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd()); } if (val) { ValueType valueType(val->GetValueInfo()->Type()); // This block uses local profiling data to optimize the case of a native array being passed to a function that fills it with other types. When the function is inlined // into different call paths which use different types this can cause a perf hit by performing unnecessary array conversions, so only perform this optimization when // the function is not inlined. if (valueType.IsLikelyNativeArray() && !valueType.IsObject() && instr->IsProfiledInstr() && !instr->m_func->IsInlined()) { // See if we have profile data for the array type IR::ProfiledInstr *const profiledInstr = instr->AsProfiledInstr(); ValueType profiledArrayType; switch(instr->m_opcode) { case Js::OpCode::LdElemI_A: if(instr->GetSrc1()->IsIndirOpnd() && opnd == instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()) { profiledArrayType = profiledInstr->u.ldElemInfo->GetArrayType(); } break; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: if(instr->GetDst()->IsIndirOpnd() && opnd == instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()) { profiledArrayType = profiledInstr->u.stElemInfo->GetArrayType(); } break; case Js::OpCode::LdLen_A: if(instr->GetSrc1()->IsRegOpnd() && opnd == instr->GetSrc1()) { profiledArrayType = profiledInstr->u.ldElemInfo->GetArrayType(); } break; } if(profiledArrayType.IsLikelyObject() && profiledArrayType.GetObjectType() == valueType.GetObjectType() && (profiledArrayType.HasVarElements() || (valueType.HasIntElements() && profiledArrayType.HasFloatElements()))) { // Merge array type we pulled from profile with type propagated by dataflow. valueType = valueType.Merge(profiledArrayType).SetHasNoMissingValues(valueType.HasNoMissingValues()); ChangeValueType(this->currentBlock, CurrentBlockData()->FindValue(opnd->AsRegOpnd()->m_sym), valueType, false); } } opnd->SetValueType(valueType); if(!IsLoopPrePass() && opnd->IsSymOpnd() && valueType.IsDefinite()) { if (opnd->AsSymOpnd()->m_sym->IsPropertySym()) { // A property sym can only be guaranteed to have a definite value type when implicit calls are disabled from the // point where the sym was defined with the definite value type. Insert an instruction to indicate to the // dead-store pass that implicit calls need to be kept disabled until after this instruction. Assert(DoFieldCopyProp()); CaptureNoImplicitCallUses(opnd, false, instr); } } } else { opnd->SetValueType(ValueType::Uninitialized); } return val; } /* * GlobOpt::TryOptimizeInstrWithFixedDataProperty * Converts Ld[Root]Fld instr to * * CheckFixedFld * * Dst = Ld_A <int Constant value> * This API assumes that the source operand is a Sym/PropertySym kind. */ void GlobOpt::TryOptimizeInstrWithFixedDataProperty(IR::Instr ** const pInstr) { Assert(pInstr); IR::Instr * &instr = *pInstr; IR::Opnd * src1 = instr->GetSrc1(); Assert(src1 && src1->IsSymOpnd() && src1->AsSymOpnd()->IsPropertySymOpnd()); if(PHASE_OFF(Js::UseFixedDataPropsPhase, instr->m_func)) { return; } if (!this->IsLoopPrePass() && !this->isRecursiveCallOnLandingPad && OpCodeAttr::CanLoadFixedFields(instr->m_opcode)) { instr->TryOptimizeInstrWithFixedDataProperty(&instr, this); } } // Constant prop if possible, otherwise if this value already resides in another // symbol, reuse this previous symbol. This should help register allocation. IR::Opnd * GlobOpt::CopyProp(IR::Opnd *opnd, IR::Instr *instr, Value *val, IR::IndirOpnd *parentIndirOpnd) { Assert( parentIndirOpnd ? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd() : opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd()); if (this->IsLoopPrePass()) { // Transformations are not legal in prepass... return opnd; } if (!this->func->DoGlobOptsForGeneratorFunc()) { // Don't copy prop in generator functions because non-bytecode temps that span a yield // cannot be saved and restored by the current bail-out mechanics utilized by generator // yield/resume. // TODO[generators][ianhall]: Enable copy-prop at least for in between yields. return opnd; } if (instr->m_opcode == Js::OpCode::CheckFixedFld || instr->m_opcode == Js::OpCode::CheckPropertyGuardAndLoadType) { // Don't copy prop into CheckFixedFld or CheckPropertyGuardAndLoadType return opnd; } // Don't copy-prop link operands of ExtendedArgs if (instr->m_opcode == Js::OpCode::ExtendArg_A && opnd == instr->GetSrc2()) { return opnd; } // Don't copy-prop operand of SIMD instr with ExtendedArg operands. Each instr should have its exclusive EA sequence. if ( Js::IsSimd128Opcode(instr->m_opcode) && instr->GetSrc1() != nullptr && instr->GetSrc1()->IsRegOpnd() && instr->GetSrc2() == nullptr ) { StackSym *sym = instr->GetSrc1()->GetStackSym(); if (sym && sym->IsSingleDef() && sym->GetInstrDef()->m_opcode == Js::OpCode::ExtendArg_A) { return opnd; } } ValueInfo *valueInfo = val->GetValueInfo(); if (this->func->HasFinally()) { // s0 = undefined was added on functions with early exit in try-finally functions, that can get copy-proped and case incorrect results if (instr->m_opcode == Js::OpCode::ArgOut_A_Inline && valueInfo->GetSymStore() && valueInfo->GetSymStore()->m_id == 0) { // We don't want to copy-prop s0 (return symbol) into inlinee code return opnd; } } // Constant prop? int32 intConstantValue; int64 int64ConstantValue; if (valueInfo->TryGetIntConstantValue(&intConstantValue)) { if (PHASE_OFF(Js::ConstPropPhase, this->func)) { return opnd; } if (( instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::StElemC ) && instr->GetSrc1() == opnd) { // Disabling prop to src of native array store, because we were losing the chance to type specialize. // Is it possible to type specialize this src if we allow constants, etc., to be prop'd here? if (instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyNativeArray()) { return opnd; } } if(opnd != instr->GetSrc1() && opnd != instr->GetSrc2()) { if(PHASE_OFF(Js::IndirCopyPropPhase, instr->m_func)) { return opnd; } // Const-prop an indir opnd's constant index into its offset IR::Opnd *srcs[] = { instr->GetSrc1(), instr->GetSrc2(), instr->GetDst() }; for(int i = 0; i < sizeof(srcs) / sizeof(srcs[0]); ++i) { const auto src = srcs[i]; if(!src || !src->IsIndirOpnd()) { continue; } const auto indir = src->AsIndirOpnd(); if ((int64)indir->GetOffset() + intConstantValue > INT32_MAX) { continue; } if(opnd == indir->GetIndexOpnd()) { Assert(indir->GetScale() == 0); GOPT_TRACE_OPND(opnd, _u("Constant prop indir index into offset (value: %d)\n"), intConstantValue); this->CaptureByteCodeSymUses(instr); indir->SetOffset(indir->GetOffset() + intConstantValue); indir->SetIndexOpnd(nullptr); } } return opnd; } if (Js::TaggedInt::IsOverflow(intConstantValue)) { return opnd; } IR::Opnd *constOpnd; if (opnd->IsVar()) { IR::AddrOpnd *addrOpnd = IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked((int)intConstantValue), IR::AddrOpndKindConstantVar, instr->m_func); GOPT_TRACE_OPND(opnd, _u("Constant prop %d (value:%d)\n"), addrOpnd->m_address, intConstantValue); constOpnd = addrOpnd; } else { // Note: Jit loop body generates some i32 operands... Assert(opnd->IsInt32() || opnd->IsInt64() || opnd->IsUInt32()); IRType opndType; IntConstType constVal; if (opnd->IsUInt32()) { // avoid sign extension constVal = (uint32)intConstantValue; opndType = TyUint32; } else { constVal = intConstantValue; opndType = TyInt32; } IR::IntConstOpnd *intOpnd = IR::IntConstOpnd::New(constVal, opndType, instr->m_func); GOPT_TRACE_OPND(opnd, _u("Constant prop %d (value:%d)\n"), intOpnd->GetImmediateValue(instr->m_func), intConstantValue); constOpnd = intOpnd; } #if ENABLE_DEBUG_CONFIG_OPTIONS //Need to update DumpFieldCopyPropTestTrace for every new opcode that is added for fieldcopyprop if(Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FieldCopyPropPhase)) { instr->DumpFieldCopyPropTestTrace(); } #endif this->CaptureByteCodeSymUses(instr); opnd = instr->ReplaceSrc(opnd, constOpnd); switch (instr->m_opcode) { case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdFldForCallApplyTarget: case Js::OpCode::LdRootFld: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::LdMethodFromFlags: case Js::OpCode::ScopedLdMethodFld: instr->m_opcode = Js::OpCode::Ld_A; case Js::OpCode::Ld_A: { IR::Opnd * dst = instr->GetDst(); if (dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->IsSingleDef()) { dst->AsRegOpnd()->m_sym->SetIsIntConst((int)intConstantValue); } break; } case Js::OpCode::ArgOut_A: case Js::OpCode::ArgOut_A_Inline: case Js::OpCode::ArgOut_A_FixupForStackArgs: case Js::OpCode::ArgOut_A_InlineBuiltIn: if (instr->GetDst()->IsRegOpnd()) { Assert(instr->GetDst()->AsRegOpnd()->m_sym->m_isSingleDef); instr->GetDst()->AsRegOpnd()->m_sym->AsStackSym()->SetIsIntConst((int)intConstantValue); } else { instr->GetDst()->AsSymOpnd()->m_sym->AsStackSym()->SetIsIntConst((int)intConstantValue); } break; case Js::OpCode::TypeofElem: instr->m_opcode = Js::OpCode::Typeof; break; case Js::OpCode::StSlotChkUndecl: if (instr->GetSrc2() == opnd) { // Src2 here should refer to the same location as the Dst operand, which we need to keep live // due to the implicit read for ChkUndecl. instr->m_opcode = Js::OpCode::StSlot; instr->FreeSrc2(); opnd = nullptr; } break; } return opnd; } else if (valueInfo->TryGetIntConstantValue(&int64ConstantValue, false)) { if (PHASE_OFF(Js::ConstPropPhase, this->func) || !PHASE_ON(Js::Int64ConstPropPhase, this->func)) { return opnd; } Assert(this->func->GetJITFunctionBody()->IsWasmFunction()); if (this->func->GetJITFunctionBody()->IsWasmFunction() && opnd->IsInt64()) { IR::Int64ConstOpnd *intOpnd = IR::Int64ConstOpnd::New(int64ConstantValue, opnd->GetType(), instr->m_func); GOPT_TRACE_OPND(opnd, _u("Constant prop %lld (value:%lld)\n"), intOpnd->GetImmediateValue(instr->m_func), int64ConstantValue); this->CaptureByteCodeSymUses(instr); opnd = instr->ReplaceSrc(opnd, intOpnd); } return opnd; } Sym *opndSym = nullptr; if (opnd->IsRegOpnd()) { IR::RegOpnd *regOpnd = opnd->AsRegOpnd(); opndSym = regOpnd->m_sym; } else if (opnd->IsSymOpnd()) { IR::SymOpnd *symOpnd = opnd->AsSymOpnd(); opndSym = symOpnd->m_sym; } if (!opndSym) { return opnd; } if (PHASE_OFF(Js::CopyPropPhase, this->func)) { this->SetSymStoreDirect(valueInfo, opndSym); return opnd; } // We should have dealt with field hoist already Assert(!instr->TransfersSrcValue() || !opndSym->IsPropertySym() || !this->IsHoistedPropertySym(opndSym->AsPropertySym())); StackSym *copySym = CurrentBlockData()->GetCopyPropSym(opndSym, val); if (copySym != nullptr) { // Copy prop. return CopyPropReplaceOpnd(instr, opnd, copySym, parentIndirOpnd); } else { if (valueInfo->GetSymStore() && instr->m_opcode == Js::OpCode::Ld_A && instr->GetDst()->IsRegOpnd() && valueInfo->GetSymStore() == instr->GetDst()->AsRegOpnd()->m_sym) { // Avoid resetting symStore after fieldHoisting: // t1 = LdFld field <- set symStore to fieldHoistSym // fieldHoistSym = Ld_A t1 <- we're looking at t1 now, but want to copy-prop fieldHoistSym forward return opnd; } this->SetSymStoreDirect(valueInfo, opndSym); } return opnd; } IR::Opnd * GlobOpt::CopyPropReplaceOpnd(IR::Instr * instr, IR::Opnd * opnd, StackSym * copySym, IR::IndirOpnd *parentIndirOpnd) { Assert( parentIndirOpnd ? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd() : opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd()); Assert(CurrentBlockData()->IsLive(copySym)); IR::RegOpnd *regOpnd; StackSym *newSym = copySym; GOPT_TRACE_OPND(opnd, _u("Copy prop s%d\n"), newSym->m_id); #if ENABLE_DEBUG_CONFIG_OPTIONS //Need to update DumpFieldCopyPropTestTrace for every new opcode that is added for fieldcopyprop if(Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FieldCopyPropPhase)) { instr->DumpFieldCopyPropTestTrace(); } #endif this->CaptureByteCodeSymUses(instr); if (opnd->IsRegOpnd()) { regOpnd = opnd->AsRegOpnd(); regOpnd->m_sym = newSym; regOpnd->SetIsJITOptimizedReg(true); // The dead bit on the opnd is specific to the sym it is referencing. Since we replaced the sym, the bit is reset. regOpnd->SetIsDead(false); if(parentIndirOpnd) { return regOpnd; } } else { // If this is an object type specialized field load inside a loop, and it produces a type value which wasn't live // before, make sure the type check is left in the loop, because it may be the last type check in the loop protecting // other fields which are not hoistable and are lexically upstream in the loop. If the check is not ultimately // needed, the dead store pass will remove it. if (this->currentBlock->loop != nullptr && opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd()) { IR::PropertySymOpnd* propertySymOpnd = opnd->AsPropertySymOpnd(); if (CheckIfPropOpEmitsTypeCheck(instr, propertySymOpnd)) { // We only set guarded properties in the dead store pass, so they shouldn't be set here yet. If they were // we would need to move them from this operand to the operand which is being copy propagated. Assert(propertySymOpnd->GetGuardedPropOps() == nullptr); // We're creating a copy of this operand to be reused in the same spot in the flow, so we can copy all // flow sensitive fields. However, we will do only a type check here (no property access) and only for // the sake of downstream instructions, so the flags pertaining to this property access are irrelevant. IR::PropertySymOpnd* checkObjTypeOpnd = CreateOpndForTypeCheckOnly(propertySymOpnd, instr->m_func); IR::Instr* checkObjTypeInstr = IR::Instr::New(Js::OpCode::CheckObjType, instr->m_func); checkObjTypeInstr->SetSrc1(checkObjTypeOpnd); checkObjTypeInstr->SetByteCodeOffset(instr); instr->InsertBefore(checkObjTypeInstr); // Since we inserted this instruction before the one that is being processed in natural flow, we must process // it for object type spec explicitly here. FinishOptPropOp(checkObjTypeInstr, checkObjTypeOpnd); Assert(!propertySymOpnd->IsTypeChecked()); checkObjTypeInstr = this->SetTypeCheckBailOut(checkObjTypeOpnd, checkObjTypeInstr, nullptr); Assert(checkObjTypeInstr->HasBailOutInfo()); if (this->currentBlock->loop && !this->IsLoopPrePass()) { // Try hoisting this checkObjType. // But since this isn't the current instr being optimized, we need to play tricks with // the byteCodeUse fields... TrackByteCodeUsesForInstrAddedInOptInstr(checkObjTypeInstr, [&]() { TryHoistInvariant(checkObjTypeInstr, this->currentBlock, NULL, CurrentBlockData()->FindValue(copySym), NULL, true); }); } } } if (opnd->IsSymOpnd() && opnd->GetIsDead()) { // Take the property sym out of the live fields set this->EndFieldLifetime(opnd->AsSymOpnd()); } regOpnd = IR::RegOpnd::New(newSym, opnd->GetType(), instr->m_func); regOpnd->SetIsJITOptimizedReg(true); instr->ReplaceSrc(opnd, regOpnd); } switch (instr->m_opcode) { case Js::OpCode::Ld_A: if (instr->GetDst()->IsRegOpnd() && instr->GetSrc1()->IsRegOpnd() && instr->GetDst()->AsRegOpnd()->GetStackSym() == instr->GetSrc1()->AsRegOpnd()->GetStackSym()) { this->InsertByteCodeUses(instr, true); instr->m_opcode = Js::OpCode::Nop; } break; case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: if (instr->GetDst()->IsRegOpnd() && instr->GetSrc1()->IsRegOpnd() && instr->GetDst()->AsRegOpnd()->GetStackSym() == instr->GetSrc1()->AsRegOpnd()->GetStackSym()) { this->InsertByteCodeUses(instr, true); instr->m_opcode = Js::OpCode::Nop; } else { instr->m_opcode = Js::OpCode::Ld_A; } break; case Js::OpCode::StSlotChkUndecl: if (instr->GetSrc2()->IsRegOpnd()) { // Src2 here should refer to the same location as the Dst operand, which we need to keep live // due to the implicit read for ChkUndecl. instr->m_opcode = Js::OpCode::StSlot; instr->FreeSrc2(); return nullptr; } break; case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdFldForCallApplyTarget: case Js::OpCode::LdRootFld: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::ScopedLdMethodFld: instr->m_opcode = Js::OpCode::Ld_A; break; case Js::OpCode::LdMethodFromFlags: // The bailout is checked on the loop top and we don't need to check bailout again in loop. instr->m_opcode = Js::OpCode::Ld_A; instr->ClearBailOutInfo(); break; case Js::OpCode::TypeofElem: instr->m_opcode = Js::OpCode::Typeof; break; } CurrentBlockData()->MarkTempLastUse(instr, regOpnd); return regOpnd; } ValueNumber GlobOpt::NewValueNumber() { ValueNumber valueNumber = this->currentValue++; if (valueNumber == 0) { Js::Throw::OutOfMemory(); } return valueNumber; } Value *GlobOpt::NewValue(ValueInfo *const valueInfo) { return NewValue(NewValueNumber(), valueInfo); } Value *GlobOpt::NewValue(const ValueNumber valueNumber, ValueInfo *const valueInfo) { Assert(valueInfo); return Value::New(alloc, valueNumber, valueInfo); } Value *GlobOpt::CopyValue(Value const *const value) { return CopyValue(value, NewValueNumber()); } Value *GlobOpt::CopyValue(Value const *const value, const ValueNumber valueNumber) { Assert(value); return value->Copy(alloc, valueNumber); } Value * GlobOpt::NewGenericValue(const ValueType valueType) { return NewGenericValue(valueType, static_cast<IR::Opnd *>(nullptr)); } Value * GlobOpt::NewGenericValue(const ValueType valueType, IR::Opnd *const opnd) { // Shouldn't assign a likely-int value to something that is definitely not an int Assert(!(valueType.IsLikelyInt() && opnd && opnd->IsRegOpnd() && opnd->AsRegOpnd()->m_sym->m_isNotInt)); ValueInfo *valueInfo = ValueInfo::New(this->alloc, valueType); Value *val = NewValue(valueInfo); TrackNewValueForKills(val); CurrentBlockData()->InsertNewValue(val, opnd); return val; } Value * GlobOpt::NewGenericValue(const ValueType valueType, Sym *const sym) { ValueInfo *valueInfo = ValueInfo::New(this->alloc, valueType); Value *val = NewValue(valueInfo); TrackNewValueForKills(val); CurrentBlockData()->SetValue(val, sym); return val; } Value * GlobOpt::GetIntConstantValue(const int32 intConst, IR::Instr * instr, IR::Opnd *const opnd) { Value *value = nullptr; Value *const cachedValue = this->intConstantToValueMap->Lookup(intConst, nullptr); if(cachedValue) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *const symStore = cachedValue->GetValueInfo()->GetSymStore(); if (symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); int32 symStoreIntConstantValue; if (symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber() && symStoreValue->GetValueInfo()->TryGetIntConstantValue(&symStoreIntConstantValue) && symStoreIntConstantValue == intConst) { value = symStoreValue; } } } if (!value) { value = NewIntConstantValue(intConst, instr, !Js::TaggedInt::IsOverflow(intConst)); } return CurrentBlockData()->InsertNewValue(value, opnd); } Value * GlobOpt::GetIntConstantValue(const int64 intConst, IR::Instr * instr, IR::Opnd *const opnd) { Assert(instr->m_func->GetJITFunctionBody()->IsWasmFunction()); Value *value = nullptr; Value *const cachedValue = this->int64ConstantToValueMap->Lookup(intConst, nullptr); if (cachedValue) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *const symStore = cachedValue->GetValueInfo()->GetSymStore(); if (symStore && this->currentBlock->globOptData.IsLive(symStore)) { Value *const symStoreValue = this->currentBlock->globOptData.FindValue(symStore); int64 symStoreIntConstantValue; if (symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber() && symStoreValue->GetValueInfo()->TryGetInt64ConstantValue(&symStoreIntConstantValue, false) && symStoreIntConstantValue == intConst) { value = symStoreValue; } } } if (!value) { value = NewInt64ConstantValue(intConst, instr); } return this->currentBlock->globOptData.InsertNewValue(value, opnd); } Value * GlobOpt::NewInt64ConstantValue(const int64 intConst, IR::Instr* instr) { Value * value = NewValue(Int64ConstantValueInfo::New(this->alloc, intConst)); this->int64ConstantToValueMap->Item(intConst, value); if (!value->GetValueInfo()->GetSymStore() && (instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4)) { StackSym * sym = instr->GetDst()->GetStackSym(); Assert(sym && !sym->IsTypeSpec()); this->currentBlock->globOptData.SetValue(value, sym); this->currentBlock->globOptData.liveVarSyms->Set(sym->m_id); } return value; } Value * GlobOpt::NewIntConstantValue(const int32 intConst, IR::Instr * instr, bool isTaggable) { Value * value = NewValue(IntConstantValueInfo::New(this->alloc, intConst)); this->intConstantToValueMap->Item(intConst, value); if (isTaggable && !PHASE_OFF(Js::HoistConstIntPhase, this->func)) { // When creating a new int constant value, make sure it gets a symstore. If the int const doesn't have a symstore, // any downstream instruction using the same int will have to create a new value (object) for the int. // This gets in the way of CSE. value = HoistConstantLoadAndPropagateValueBackward(Js::TaggedInt::ToVarUnchecked(intConst), instr, value); if (!value->GetValueInfo()->GetSymStore() && (instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4)) { StackSym * sym = instr->GetDst()->GetStackSym(); Assert(sym); if (sym->IsTypeSpec()) { Assert(sym->IsInt32()); StackSym * varSym = sym->GetVarEquivSym(instr->m_func); CurrentBlockData()->SetValue(value, varSym); CurrentBlockData()->liveInt32Syms->Set(varSym->m_id); } else { CurrentBlockData()->SetValue(value, sym); CurrentBlockData()->liveVarSyms->Set(sym->m_id); } } } return value; } ValueInfo * GlobOpt::NewIntRangeValueInfo(const int32 min, const int32 max, const bool wasNegativeZeroPreventedByBailout) { return ValueInfo::NewIntRangeValueInfo(this->alloc, min, max, wasNegativeZeroPreventedByBailout); } ValueInfo *GlobOpt::NewIntRangeValueInfo( const ValueInfo *const originalValueInfo, const int32 min, const int32 max) const { Assert(originalValueInfo); ValueInfo *valueInfo; if(min == max) { // Since int constant values are const-propped, negative zero tracking does not track them, and so it's okay to ignore // 'wasNegativeZeroPreventedByBailout' valueInfo = IntConstantValueInfo::New(alloc, min); } else { valueInfo = IntRangeValueInfo::New( alloc, min, max, min <= 0 && max >= 0 && originalValueInfo->WasNegativeZeroPreventedByBailout()); } valueInfo->SetSymStore(originalValueInfo->GetSymStore()); return valueInfo; } Value * GlobOpt::NewIntRangeValue( const int32 min, const int32 max, const bool wasNegativeZeroPreventedByBailout, IR::Opnd *const opnd) { ValueInfo *valueInfo = this->NewIntRangeValueInfo(min, max, wasNegativeZeroPreventedByBailout); Value *val = NewValue(valueInfo); if (opnd) { GOPT_TRACE_OPND(opnd, _u("Range %d (0x%X) to %d (0x%X)\n"), min, min, max, max); } CurrentBlockData()->InsertNewValue(val, opnd); return val; } IntBoundedValueInfo *GlobOpt::NewIntBoundedValueInfo( const ValueInfo *const originalValueInfo, const IntBounds *const bounds) const { Assert(originalValueInfo); bounds->Verify(); IntBoundedValueInfo *const valueInfo = IntBoundedValueInfo::New( originalValueInfo->Type(), bounds, ( bounds->ConstantLowerBound() <= 0 && bounds->ConstantUpperBound() >= 0 && originalValueInfo->WasNegativeZeroPreventedByBailout() ), alloc); valueInfo->SetSymStore(originalValueInfo->GetSymStore()); return valueInfo; } Value *GlobOpt::NewIntBoundedValue( const ValueType valueType, const IntBounds *const bounds, const bool wasNegativeZeroPreventedByBailout, IR::Opnd *const opnd) { Value *const value = NewValue(IntBoundedValueInfo::New(valueType, bounds, wasNegativeZeroPreventedByBailout, alloc)); CurrentBlockData()->InsertNewValue(value, opnd); return value; } Value * GlobOpt::NewFloatConstantValue(const FloatConstType floatValue, IR::Opnd *const opnd) { FloatConstantValueInfo *valueInfo = FloatConstantValueInfo::New(this->alloc, floatValue); Value *val = NewValue(valueInfo); CurrentBlockData()->InsertNewValue(val, opnd); return val; } Value * GlobOpt::GetVarConstantValue(IR::AddrOpnd *addrOpnd) { bool isVar = addrOpnd->IsVar(); bool isString = isVar && addrOpnd->m_localAddress && JITJavascriptString::Is(addrOpnd->m_localAddress); Value *val = nullptr; Value *cachedValue = nullptr; if(this->addrConstantToValueMap->TryGetValue(addrOpnd->m_address, &cachedValue)) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *symStore = cachedValue->GetValueInfo()->GetSymStore(); if(symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); if(symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber()) { ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo(); if(symStoreValueInfo->IsVarConstant() && symStoreValueInfo->AsVarConstant()->VarValue() == addrOpnd->m_address) { val = symStoreValue; } } } } else if (isString) { JITJavascriptString* jsString = JITJavascriptString::FromVar(addrOpnd->m_localAddress); Js::InternalString internalString(jsString->GetString(), jsString->GetLength()); if (this->stringConstantToValueMap->TryGetValue(internalString, &cachedValue)) { Sym *symStore = cachedValue->GetValueInfo()->GetSymStore(); if (symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); if (symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber()) { ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo(); if (symStoreValueInfo->IsVarConstant()) { JITJavascriptString * cachedString = JITJavascriptString::FromVar(symStoreValue->GetValueInfo()->AsVarConstant()->VarValue(true)); Js::InternalString cachedInternalString(cachedString->GetString(), cachedString->GetLength()); if (Js::InternalStringComparer::Equals(internalString, cachedInternalString)) { val = symStoreValue; } } } } } } if(!val) { val = NewVarConstantValue(addrOpnd, isString); } addrOpnd->SetValueType(val->GetValueInfo()->Type()); return val; } Value * GlobOpt::NewVarConstantValue(IR::AddrOpnd *addrOpnd, bool isString) { VarConstantValueInfo *valueInfo = VarConstantValueInfo::New(this->alloc, addrOpnd->m_address, addrOpnd->GetValueType(), false, addrOpnd->m_localAddress); Value * value = NewValue(valueInfo); this->addrConstantToValueMap->Item(addrOpnd->m_address, value); if (isString) { JITJavascriptString* jsString = JITJavascriptString::FromVar(addrOpnd->m_localAddress); Js::InternalString internalString(jsString->GetString(), jsString->GetLength()); this->stringConstantToValueMap->Item(internalString, value); } return value; } Value * GlobOpt::HoistConstantLoadAndPropagateValueBackward(Js::Var varConst, IR::Instr * origInstr, Value * value) { if (this->IsLoopPrePass() || ((this->currentBlock == this->func->m_fg->blockList) && origInstr->TransfersSrcValue())) { return value; } // Only hoisting taggable int const loads for now. Could be extended to other constants (floats, strings, addr opnds) if we see some benefit. Assert(Js::TaggedInt::Is(varConst)); // Insert a load of the constant at the top of the function StackSym * dstSym = StackSym::New(this->func); IR::RegOpnd * constRegOpnd = IR::RegOpnd::New(dstSym, TyVar, this->func); IR::Instr * loadInstr = IR::Instr::NewConstantLoad(constRegOpnd, (intptr_t)varConst, ValueType::GetInt(true), this->func); this->func->m_fg->blockList->GetFirstInstr()->InsertAfter(loadInstr); // Type-spec the load (Support for floats needs to be added when we start hoisting float constants). bool typeSpecedToInt = false; if (Js::TaggedInt::Is(varConst) && !IsTypeSpecPhaseOff(this->func)) { typeSpecedToInt = true; loadInstr->m_opcode = Js::OpCode::Ld_I4; ToInt32Dst(loadInstr, loadInstr->GetDst()->AsRegOpnd(), this->currentBlock); loadInstr->GetDst()->GetStackSym()->SetIsConst(); } else { CurrentBlockData()->liveVarSyms->Set(dstSym->m_id); } // Add the value (object) to the current block's symToValueMap and propagate the value backward to all relevant blocks so it is available on merges. value = CurrentBlockData()->InsertNewValue(value, constRegOpnd); BVSparse<JitArenaAllocator>* GlobOptBlockData::*bv; bv = typeSpecedToInt ? &GlobOptBlockData::liveInt32Syms : &GlobOptBlockData::liveVarSyms; // Will need to be expanded when we start hoisting float constants. if (this->currentBlock != this->func->m_fg->blockList) { for (InvariantBlockBackwardIterator it(this, this->currentBlock, this->func->m_fg->blockList, nullptr); it.IsValid(); it.MoveNext()) { BasicBlock * block = it.Block(); (block->globOptData.*bv)->Set(dstSym->m_id); Assert(!block->globOptData.FindValue(dstSym)); Value *const valueCopy = CopyValue(value, value->GetValueNumber()); block->globOptData.SetValue(valueCopy, dstSym); } } return value; } Value * GlobOpt::NewFixedFunctionValue(Js::JavascriptFunction *function, IR::AddrOpnd *addrOpnd) { Assert(function != nullptr); Value *val = nullptr; Value *cachedValue = nullptr; if(this->addrConstantToValueMap->TryGetValue(addrOpnd->m_address, &cachedValue)) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *symStore = cachedValue->GetValueInfo()->GetSymStore(); if(symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); if(symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber()) { ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo(); if(symStoreValueInfo->IsVarConstant()) { VarConstantValueInfo *const symStoreVarConstantValueInfo = symStoreValueInfo->AsVarConstant(); if(symStoreVarConstantValueInfo->VarValue() == addrOpnd->m_address && symStoreVarConstantValueInfo->IsFunction()) { val = symStoreValue; } } } } } if(!val) { VarConstantValueInfo *valueInfo = VarConstantValueInfo::New(this->alloc, function, addrOpnd->GetValueType(), true, addrOpnd->m_localAddress); val = NewValue(valueInfo); this->addrConstantToValueMap->AddNew(addrOpnd->m_address, val); } CurrentBlockData()->InsertNewValue(val, addrOpnd); return val; } StackSym *GlobOpt::GetTaggedIntConstantStackSym(const int32 intConstantValue) const { Assert(!Js::TaggedInt::IsOverflow(intConstantValue)); return intConstantToStackSymMap->Lookup(intConstantValue, nullptr); } StackSym *GlobOpt::GetOrCreateTaggedIntConstantStackSym(const int32 intConstantValue) const { StackSym *stackSym = GetTaggedIntConstantStackSym(intConstantValue); if(stackSym) { return stackSym; } stackSym = StackSym::New(TyVar,func); intConstantToStackSymMap->Add(intConstantValue, stackSym); return stackSym; } Sym * GlobOpt::SetSymStore(ValueInfo *valueInfo, Sym *sym) { if (sym->IsStackSym()) { StackSym *stackSym = sym->AsStackSym(); if (stackSym->IsTypeSpec()) { stackSym = stackSym->GetVarEquivSym(this->func); sym = stackSym; } } if (valueInfo->GetSymStore() == nullptr || valueInfo->GetSymStore()->IsPropertySym()) { SetSymStoreDirect(valueInfo, sym); } return sym; } void GlobOpt::SetSymStoreDirect(ValueInfo * valueInfo, Sym * sym) { Sym * prevSymStore = valueInfo->GetSymStore(); CurrentBlockData()->SetChangedSym(prevSymStore); valueInfo->SetSymStore(sym); } // Figure out the Value of this dst. Value * GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val) { IR::Instr *&instr = *pInstr; IR::Opnd *dst = instr->GetDst(); Value *dstVal = nullptr; Sym *sym; if (instr->CallsSetter()) { return nullptr; } if (dst == nullptr) { return nullptr; } switch (dst->GetKind()) { case IR::OpndKindSym: sym = dst->AsSymOpnd()->m_sym; break; case IR::OpndKindReg: sym = dst->AsRegOpnd()->m_sym; if (OpCodeAttr::TempNumberProducing(instr->m_opcode)) { CurrentBlockData()->isTempSrc->Set(sym->m_id); } else if (OpCodeAttr::TempNumberTransfer(instr->m_opcode)) { IR::Opnd *src1 = instr->GetSrc1(); if (src1->IsRegOpnd() && CurrentBlockData()->isTempSrc->Test(src1->AsRegOpnd()->m_sym->m_id)) { StackSym *src1Sym = src1->AsRegOpnd()->m_sym; // isTempSrc is used for marking isTempLastUse, which is used to generate AddLeftDead() // calls instead of the normal Add helpers. It tells the runtime that concats can use string // builders. // We need to be careful in the case where src1 points to a string builder and is getting aliased. // Clear the bit on src and dst of the transfer instr in this case, unless we can prove src1 // isn't pointing at a string builder, like if it is single def and the def instr is not an Add, // but TempProducing. if (src1Sym->IsSingleDef() && src1Sym->m_instrDef->m_opcode != Js::OpCode::Add_A && OpCodeAttr::TempNumberProducing(src1Sym->m_instrDef->m_opcode)) { CurrentBlockData()->isTempSrc->Set(sym->m_id); } else { CurrentBlockData()->isTempSrc->Clear(src1->AsRegOpnd()->m_sym->m_id); CurrentBlockData()->isTempSrc->Clear(sym->m_id); } } else { CurrentBlockData()->isTempSrc->Clear(sym->m_id); } } else { CurrentBlockData()->isTempSrc->Clear(sym->m_id); } break; case IR::OpndKindIndir: return nullptr; default: return nullptr; } int32 min1, max1, min2, max2, newMin, newMax; ValueInfo *src1ValueInfo = (src1Val ? src1Val->GetValueInfo() : nullptr); ValueInfo *src2ValueInfo = (src2Val ? src2Val->GetValueInfo() : nullptr); switch (instr->m_opcode) { case Js::OpCode::Conv_PrimStr: AssertMsg(instr->GetDst()->GetValueType().IsString(), "Creator of this instruction should have set the type"); if (this->IsLoopPrePass() || src1ValueInfo == nullptr || !src1ValueInfo->IsPrimitive()) { break; } instr->m_opcode = Js::OpCode::Conv_Str; // fall-through case Js::OpCode::Conv_Str: // This opcode is commented out since we don't track regex information in GlobOpt now. //case Js::OpCode::Coerce_Regex: case Js::OpCode::Coerce_Str: AssertMsg(instr->GetDst()->GetValueType().IsString(), "Creator of this instruction should have set the type"); // fall-through case Js::OpCode::Coerce_StrOrRegex: // We don't set the ValueType of src1 for Coerce_StrOrRegex, hence skip the ASSERT if (this->IsLoopPrePass() || src1ValueInfo == nullptr || !src1ValueInfo->IsString()) { break; } instr->m_opcode = Js::OpCode::Ld_A; // fall-through case Js::OpCode::BytecodeArgOutCapture: case Js::OpCode::InitConst: case Js::OpCode::LdAsmJsFunc: case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: // Propagate sym attributes across the reg copy. if (!this->IsLoopPrePass() && instr->GetSrc1()->IsRegOpnd()) { if (dst->AsRegOpnd()->m_sym->IsSingleDef()) { dst->AsRegOpnd()->m_sym->CopySymAttrs(instr->GetSrc1()->AsRegOpnd()->m_sym); } } if (instr->IsProfiledInstr()) { const ValueType profiledValueType(instr->AsProfiledInstr()->u.FldInfo().valueType); if(!( profiledValueType.IsLikelyInt() && ( (dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt) || (instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) ) )) { if(!src1ValueInfo) { dstVal = this->NewGenericValue(profiledValueType, dst); } else if(src1ValueInfo->IsUninitialized()) { if(IsLoopPrePass()) { dstVal = this->NewGenericValue(profiledValueType, dst); } else { // Assuming the profile data gives more precise value types based on the path it took at runtime, we // can improve the original value type. src1ValueInfo->Type() = profiledValueType; instr->GetSrc1()->SetValueType(profiledValueType); } } } } if (dstVal == nullptr) { // Ld_A is just transferring the value dstVal = this->ValueNumberTransferDst(instr, src1Val); } break; case Js::OpCode::ExtendArg_A: { // SIMD_JS // We avoid transforming EAs to Lds to keep the IR shape consistent and avoid CSEing of EAs. // CSEOptimize only assigns a Value to the EA dst, and doesn't turn it to a Ld. If this happened, we shouldn't assign a new Value here. if (DoCSE()) { IR::Opnd * currDst = instr->GetDst(); Value * currDstVal = CurrentBlockData()->FindValue(currDst->GetStackSym()); if (currDstVal != nullptr) { return currDstVal; } } break; } case Js::OpCode::CheckFixedFld: AssertMsg(false, "CheckFixedFld doesn't have a dst, so we should never get here"); break; case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdFldForCallApplyTarget: // Do not transfer value type on ldFldForTypeOf to prevent copy-prop to LdRootFld in case the field doesn't exist since LdRootFldForTypeOf does not throw //case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdRootFld: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::ScopedLdMethodFld: case Js::OpCode::LdMethodFromFlags: if (instr->IsProfiledInstr()) { ValueType profiledValueType(instr->AsProfiledInstr()->u.FldInfo().valueType); if(!(profiledValueType.IsLikelyInt() && dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt)) { if(!src1ValueInfo) { dstVal = this->NewGenericValue(profiledValueType, dst); } else if(src1ValueInfo->IsUninitialized()) { if(IsLoopPrePass() && (!dst->IsRegOpnd() || !dst->AsRegOpnd()->m_sym->IsSingleDef() || DoFieldHoisting())) { dstVal = this->NewGenericValue(profiledValueType, dst); } else { // Assuming the profile data gives more precise value types based on the path it took at runtime, we // can improve the original value type. src1ValueInfo->Type() = profiledValueType; instr->GetSrc1()->SetValueType(profiledValueType); } } } } if (dstVal == nullptr) { dstVal = this->ValueNumberTransferDst(instr, src1Val); } if(!this->IsLoopPrePass()) { // We cannot transfer value if the field hasn't been copy prop'd because we don't generate // an implicit call bailout between those values if we don't have "live fields" unless, we are hoisting the field. PropertySym *propertySym = instr->GetSrc1()->AsSymOpnd()->m_sym->AsPropertySym(); StackSym * fieldHoistSym; Loop * loop = this->FindFieldHoistStackSym(this->currentBlock->loop, propertySym->m_id, &fieldHoistSym, instr); ValueInfo *dstValueInfo = (dstVal ? dstVal->GetValueInfo() : nullptr); // Update symStore for field hoisting if (loop != nullptr && (dstValueInfo != nullptr)) { this->SetSymStoreDirect(dstValueInfo, fieldHoistSym); } // Update symStore if it isn't a stackSym if (dstVal && (!dstValueInfo->GetSymStore() || !dstValueInfo->GetSymStore()->IsStackSym())) { Assert(dst->IsRegOpnd()); this->SetSymStoreDirect(dstValueInfo, dst->AsRegOpnd()->m_sym); } if (src1Val != dstVal) { CurrentBlockData()->SetValue(dstVal, instr->GetSrc1()); } } break; case Js::OpCode::LdC_A_R8: case Js::OpCode::LdC_A_I4: case Js::OpCode::ArgIn_A: dstVal = src1Val; break; case Js::OpCode::LdStr: if (src1Val == nullptr) { src1Val = NewGenericValue(ValueType::String, dst); } dstVal = src1Val; break; // LdElemUndef only assign undef if the field doesn't exist. // So we don't actually know what the value is, so we can't really copy prop it. //case Js::OpCode::LdElemUndef: case Js::OpCode::StSlot: case Js::OpCode::StSlotChkUndecl: case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StFldStrict: case Js::OpCode::StRootFldStrict: case Js::OpCode::InitFld: case Js::OpCode::InitComputedProperty: if (DoFieldCopyProp()) { if (src1Val == nullptr) { // src1 may have no value if it's not a valid var, e.g., NULL for let/const initialization. // Consider creating generic values for such things. return nullptr; } AssertMsg(!src2Val, "Bad src Values..."); Assert(sym->IsPropertySym()); SymID symId = sym->m_id; Assert(instr->m_opcode == Js::OpCode::StSlot || instr->m_opcode == Js::OpCode::StSlotChkUndecl || !CurrentBlockData()->liveFields->Test(symId)); if (IsHoistablePropertySym(symId)) { // We have changed the value of a hoistable field, load afterwards shouldn't get hoisted, // but we will still copy prop the pre-assign sym to it if we have a live value. Assert((instr->m_opcode == Js::OpCode::StSlot || instr->m_opcode == Js::OpCode::StSlotChkUndecl) && CurrentBlockData()->liveFields->Test(symId)); CurrentBlockData()->hoistableFields->Clear(symId); } CurrentBlockData()->liveFields->Set(symId); if (!this->IsLoopPrePass() && dst->GetIsDead()) { // Take the property sym out of the live fields set (with special handling for loops). this->EndFieldLifetime(dst->AsSymOpnd()); } dstVal = this->ValueNumberTransferDst(instr, src1Val); } else { return nullptr; } break; case Js::OpCode::Conv_Num: if(src1ValueInfo->IsNumber()) { dstVal = ValueNumberTransferDst(instr, src1Val); } else { return NewGenericValue(src1ValueInfo->Type().ToDefiniteAnyNumber(), dst); } break; case Js::OpCode::Not_A: { if (!src1Val || !src1ValueInfo->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec())) { min1 = INT32_MIN; max1 = INT32_MAX; } this->PropagateIntRangeForNot(min1, max1, &newMin, &newMax); return CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val); } case Js::OpCode::Xor_A: case Js::OpCode::Or_A: case Js::OpCode::And_A: case Js::OpCode::Shl_A: case Js::OpCode::Shr_A: case Js::OpCode::ShrU_A: { if (!src1Val || !src1ValueInfo->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec())) { min1 = INT32_MIN; max1 = INT32_MAX; } if (!src2Val || !src2ValueInfo->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec())) { min2 = INT32_MIN; max2 = INT32_MAX; } if (instr->m_opcode == Js::OpCode::ShrU_A && min1 < 0 && IntConstantBounds(min2, max2).And_0x1f().Contains(0)) { // Src1 may be too large to represent as a signed int32, and src2 may be zero. // Since the result can therefore be too large to represent as a signed int32, // include Number in the value type. return CreateDstUntransferredValue( ValueType::AnyNumber.SetCanBeTaggedValue(true), instr, src1Val, src2Val); } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); return CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val); } case Js::OpCode::Incr_A: case Js::OpCode::Decr_A: { ValueType valueType; if(src1Val) { valueType = src1Val->GetValueInfo()->Type().ToDefiniteAnyNumber(); } else { valueType = ValueType::Number; } return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val); } case Js::OpCode::Add_A: { ValueType valueType; if (src1Val && src1ValueInfo->IsLikelyNumber() && src2Val && src2ValueInfo->IsLikelyNumber()) { if(src1ValueInfo->IsLikelyInt() && src2ValueInfo->IsLikelyInt()) { // When doing aggressiveIntType, just assume the result is likely going to be int // if both input is int. const bool isLikelyTagged = src1ValueInfo->IsLikelyTaggedInt() && src2ValueInfo->IsLikelyTaggedInt(); if(src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber()) { // If both of them are numbers then we can definitely say that the result is a number. valueType = ValueType::GetNumberAndLikelyInt(isLikelyTagged); } else { // This is only likely going to be int but can be a string as well. valueType = ValueType::GetInt(isLikelyTagged).ToLikely(); } } else { // We can only be certain of any thing if both of them are numbers. // Otherwise, the result could be string. if (src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber()) { if (src1ValueInfo->IsFloat() || src2ValueInfo->IsFloat()) { // If one of them is a float, the result probably is a float instead of just int // but should always be a number. valueType = ValueType::Float; } else { // Could be int, could be number valueType = ValueType::Number; } } else if (src1ValueInfo->IsLikelyFloat() || src2ValueInfo->IsLikelyFloat()) { // Result is likely a float (but can be anything) valueType = ValueType::Float.ToLikely(); } else { // Otherwise it is a likely int or float (but can be anything) valueType = ValueType::Number.ToLikely(); } } } else if((src1Val && src1ValueInfo->IsString()) || (src2Val && src2ValueInfo->IsString())) { // String + anything should always result in a string valueType = ValueType::String; } else if((src1Val && src1ValueInfo->IsNotString() && src1ValueInfo->IsPrimitive()) && (src2Val && src2ValueInfo->IsNotString() && src2ValueInfo->IsPrimitive())) { // If src1 and src2 are not strings and primitive, add should yield a number. valueType = ValueType::Number; } else if((src1Val && src1ValueInfo->IsLikelyString()) || (src2Val && src2ValueInfo->IsLikelyString())) { // likelystring + anything should always result in a likelystring valueType = ValueType::String.ToLikely(); } else { // Number or string. Could make the value a merge of Number and String, but Uninitialized is more useful at the moment. Assert(valueType.IsUninitialized()); } return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val); } case Js::OpCode::Div_A: { ValueType divValueType = GetDivValueType(instr, src1Val, src2Val, false); if (divValueType.IsLikelyInt() || divValueType.IsFloat()) { return CreateDstUntransferredValue(divValueType, instr, src1Val, src2Val); } } // fall-through case Js::OpCode::Sub_A: case Js::OpCode::Mul_A: case Js::OpCode::Rem_A: { ValueType valueType; if( src1Val && src1ValueInfo->IsLikelyInt() && src2Val && src2ValueInfo->IsLikelyInt() && instr->m_opcode != Js::OpCode::Div_A) { const bool isLikelyTagged = src1ValueInfo->IsLikelyTaggedInt() && (src2ValueInfo->IsLikelyTaggedInt() || instr->m_opcode == Js::OpCode::Rem_A); if(src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber()) { valueType = ValueType::GetNumberAndLikelyInt(isLikelyTagged); } else { valueType = ValueType::GetInt(isLikelyTagged).ToLikely(); } } else if ((src1Val && src1ValueInfo->IsLikelyFloat()) || (src2Val && src2ValueInfo->IsLikelyFloat())) { // This should ideally be NewNumberAndLikelyFloatValue since we know the result is a number but not sure if it will // be a float value. However, that Number/LikelyFloat value type doesn't exist currently and all the necessary // checks are done for float values (tagged int checks, etc.) so it's sufficient to just create a float value here. valueType = ValueType::Float; } else { valueType = ValueType::Number; } return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val); } case Js::OpCode::CallI: Assert(dst->IsRegOpnd()); return NewGenericValue(dst->AsRegOpnd()->GetValueType(), dst); case Js::OpCode::LdElemI_A: { dstVal = ValueNumberLdElemDst(pInstr, src1Val); const ValueType baseValueType(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType()); if( ( baseValueType.IsLikelyNativeArray() || #ifdef _M_IX86 ( !AutoSystemInfo::Data.SSE2Available() && baseValueType.IsLikelyObject() && ( baseValueType.GetObjectType() == ObjectType::Float32Array || baseValueType.GetObjectType() == ObjectType::Float64Array ) ) #else false #endif ) && instr->GetDst()->IsVar() && instr->HasBailOutInfo()) { // The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast // path. Note that the removed bailouts should not be necessary for correctness. IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if(bailOutKind & IR::BailOutOnArrayAccessHelperCall) { bailOutKind -= IR::BailOutOnArrayAccessHelperCall; } if(bailOutKind == IR::BailOutOnImplicitCallsPreOp) { bailOutKind -= IR::BailOutOnImplicitCallsPreOp; } if(bailOutKind) { instr->SetBailOutKind(bailOutKind); } else { instr->ClearBailOutInfo(); } } return dstVal; } case Js::OpCode::LdMethodElem: // Not worth profiling this, just assume it's likely object (should be likely function but ValueType does not track // functions currently, so using ObjectType::Object instead) dstVal = NewGenericValue(ValueType::GetObject(ObjectType::Object).ToLikely(), dst); if(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyNativeArray() && instr->HasBailOutInfo()) { // The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast // path. Note that the removed bailouts should not be necessary for correctness. IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if(bailOutKind & IR::BailOutOnArrayAccessHelperCall) { bailOutKind -= IR::BailOutOnArrayAccessHelperCall; } if(bailOutKind == IR::BailOutOnImplicitCallsPreOp) { bailOutKind -= IR::BailOutOnImplicitCallsPreOp; } if(bailOutKind) { instr->SetBailOutKind(bailOutKind); } else { instr->ClearBailOutInfo(); } } return dstVal; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: dstVal = this->ValueNumberTransferDst(instr, src1Val); break; case Js::OpCode::LdLen_A: if (instr->IsProfiledInstr()) { const ValueType profiledValueType(instr->AsProfiledInstr()->u.ldElemInfo->GetElementType()); if(!(profiledValueType.IsLikelyInt() && dst->AsRegOpnd()->m_sym->m_isNotInt)) { return this->NewGenericValue(profiledValueType, dst); } } break; case Js::OpCode::BrOnEmpty: case Js::OpCode::BrOnNotEmpty: Assert(dst->IsRegOpnd()); Assert(dst->GetValueType().IsString()); return this->NewGenericValue(ValueType::String, dst); case Js::OpCode::IsInst: case Js::OpCode::LdTrue: case Js::OpCode::LdFalse: return this->NewGenericValue(ValueType::Boolean, dst); case Js::OpCode::LdUndef: return this->NewGenericValue(ValueType::Undefined, dst); case Js::OpCode::LdC_A_Null: return this->NewGenericValue(ValueType::Null, dst); case Js::OpCode::LdThis: if (!PHASE_OFF(Js::OptTagChecksPhase, this->func) && (src1ValueInfo == nullptr || src1ValueInfo->IsUninitialized())) { return this->NewGenericValue(ValueType::GetObject(ObjectType::Object), dst); } break; case Js::OpCode::Typeof: case Js::OpCode::TypeofElem: return this->NewGenericValue(ValueType::String, dst); case Js::OpCode::InitLocalClosure: Assert(instr->GetDst()); Assert(instr->GetDst()->IsRegOpnd()); IR::RegOpnd *regOpnd = instr->GetDst()->AsRegOpnd(); StackSym *opndStackSym = regOpnd->m_sym; Assert(opndStackSym != nullptr); ObjectSymInfo *objectSymInfo = opndStackSym->m_objectInfo; Assert(objectSymInfo != nullptr); for (PropertySym *localVarSlotList = objectSymInfo->m_propertySymList; localVarSlotList; localVarSlotList = localVarSlotList->m_nextInStackSymList) { this->slotSyms->Set(localVarSlotList->m_id); } break; } #ifdef ENABLE_SIMDJS // SIMD_JS if (Js::IsSimd128Opcode(instr->m_opcode) && !func->GetJITFunctionBody()->IsAsmJsMode()) { ThreadContext::SimdFuncSignature simdFuncSignature; instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, simdFuncSignature); return this->NewGenericValue(simdFuncSignature.returnType, dst); } #endif if (dstVal == nullptr) { return this->NewGenericValue(dst->GetValueType(), dst); } return CurrentBlockData()->SetValue(dstVal, dst); } Value * GlobOpt::ValueNumberLdElemDst(IR::Instr **pInstr, Value *srcVal) { IR::Instr *&instr = *pInstr; IR::Opnd *dst = instr->GetDst(); Value *dstVal = nullptr; int32 newMin, newMax; ValueInfo *srcValueInfo = (srcVal ? srcVal->GetValueInfo() : nullptr); ValueType profiledElementType; if (instr->IsProfiledInstr()) { profiledElementType = instr->AsProfiledInstr()->u.ldElemInfo->GetElementType(); if(!(profiledElementType.IsLikelyInt() && dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt) && srcVal && srcValueInfo->IsUninitialized()) { if(IsLoopPrePass()) { dstVal = NewGenericValue(profiledElementType, dst); } else { // Assuming the profile data gives more precise value types based on the path it took at runtime, we // can improve the original value type. srcValueInfo->Type() = profiledElementType; instr->GetSrc1()->SetValueType(profiledElementType); } } } IR::IndirOpnd *src = instr->GetSrc1()->AsIndirOpnd(); const ValueType baseValueType(src->GetBaseOpnd()->GetValueType()); if (instr->DoStackArgsOpt(this->func) || !( baseValueType.IsLikelyOptimizedTypedArray() || (baseValueType.IsLikelyNativeArray() && instr->IsProfiledInstr()) // Specialized native array lowering for LdElem requires that it is profiled. ) || (!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) || // Don't do type spec on native array with a history of accessing gaps, as this is a bailout (!this->DoNativeArrayTypeSpec() && baseValueType.IsLikelyNativeArray()) || !ShouldExpectConventionalArrayIndexValue(src)) { if(DoTypedArrayTypeSpec() && !IsLoopPrePass()) { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not type specialize, because %s.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, instr->DoStackArgsOpt(this->func) ? _u("instruction uses the arguments object") : baseValueType.IsLikelyOptimizedTypedArray() ? _u("index is negative or likely not int") : _u("of array type")); Output::Flush(); } } if(!dstVal) { if(srcVal) { dstVal = this->ValueNumberTransferDst(instr, srcVal); } else { dstVal = NewGenericValue(profiledElementType, dst); } } return dstVal; } Assert(instr->GetSrc1()->IsIndirOpnd()); IRType toType = TyVar; IR::BailOutKind bailOutKind = IR::BailOutConventionalTypedArrayAccessOnly; switch(baseValueType.GetObjectType()) { case ObjectType::Int8Array: case ObjectType::Int8VirtualArray: case ObjectType::Int8MixedArray: newMin = Int8ConstMin; newMax = Int8ConstMax; goto IntArrayCommon; case ObjectType::Uint8Array: case ObjectType::Uint8VirtualArray: case ObjectType::Uint8MixedArray: case ObjectType::Uint8ClampedArray: case ObjectType::Uint8ClampedVirtualArray: case ObjectType::Uint8ClampedMixedArray: newMin = Uint8ConstMin; newMax = Uint8ConstMax; goto IntArrayCommon; case ObjectType::Int16Array: case ObjectType::Int16VirtualArray: case ObjectType::Int16MixedArray: newMin = Int16ConstMin; newMax = Int16ConstMax; goto IntArrayCommon; case ObjectType::Uint16Array: case ObjectType::Uint16VirtualArray: case ObjectType::Uint16MixedArray: newMin = Uint16ConstMin; newMax = Uint16ConstMax; goto IntArrayCommon; case ObjectType::Int32Array: case ObjectType::Int32VirtualArray: case ObjectType::Int32MixedArray: case ObjectType::Uint32Array: // int-specialized loads from uint32 arrays will bail out on values that don't fit in an int32 case ObjectType::Uint32VirtualArray: case ObjectType::Uint32MixedArray: Int32Array: newMin = Int32ConstMin; newMax = Int32ConstMax; goto IntArrayCommon; IntArrayCommon: Assert(dst->IsRegOpnd()); // If int type spec is disabled, it is ok to load int values as they can help float type spec, and merging int32 with float64 => float64. // But if float type spec is also disabled, we'll have problems because float64 merged with var => float64... if (!this->DoAggressiveIntTypeSpec() && !this->DoFloatTypeSpec()) { if (!dstVal) { if (srcVal) { dstVal = this->ValueNumberTransferDst(instr, srcVal); } else { dstVal = NewGenericValue(profiledElementType, dst); } } return dstVal; } if (!this->IsLoopPrePass()) { if (instr->HasBailOutInfo()) { const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind(); Assert( ( !(oldBailOutKind & ~IR::BailOutKindBits) || (oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp ) && !(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject))); if (bailOutKind == IR::BailOutConventionalTypedArrayAccessOnly) { // BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head // segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction // bails out for the right reason. instr->SetBailOutKind( bailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall))); } else { // BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit // calls to occur, so it must be merged in to eliminate generating the helper call Assert(bailOutKind == IR::BailOutConventionalNativeArrayAccessOnly); instr->SetBailOutKind(oldBailOutKind | bailOutKind); } } else { GenerateBailAtOperation(&instr, bailOutKind); } } TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, nullptr, nullptr, bailOutKind, newMin, newMax, &dstVal); toType = TyInt32; break; case ObjectType::Float32Array: case ObjectType::Float32VirtualArray: case ObjectType::Float32MixedArray: case ObjectType::Float64Array: case ObjectType::Float64VirtualArray: case ObjectType::Float64MixedArray: Float64Array: Assert(dst->IsRegOpnd()); // If float type spec is disabled, don't load float64 values if (!this->DoFloatTypeSpec()) { if (!dstVal) { if (srcVal) { dstVal = this->ValueNumberTransferDst(instr, srcVal); } else { dstVal = NewGenericValue(profiledElementType, dst); } } return dstVal; } if (!this->IsLoopPrePass()) { if (instr->HasBailOutInfo()) { const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind(); Assert( ( !(oldBailOutKind & ~IR::BailOutKindBits) || (oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp ) && !(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject))); if (bailOutKind == IR::BailOutConventionalTypedArrayAccessOnly) { // BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head // segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction // bails out for the right reason. instr->SetBailOutKind( bailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall))); } else { // BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit // calls to occur, so it must be merged in to eliminate generating the helper call Assert(bailOutKind == IR::BailOutConventionalNativeArrayAccessOnly); instr->SetBailOutKind(oldBailOutKind | bailOutKind); } } else { GenerateBailAtOperation(&instr, bailOutKind); } } TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, &dstVal); toType = TyFloat64; break; default: Assert(baseValueType.IsLikelyNativeArray()); bailOutKind = IR::BailOutConventionalNativeArrayAccessOnly; if(baseValueType.HasIntElements()) { goto Int32Array; } Assert(baseValueType.HasFloatElements()); goto Float64Array; } if(!dstVal) { dstVal = NewGenericValue(profiledElementType, dst); } Assert(toType != TyVar); GOPT_TRACE_INSTR(instr, _u("Type specialized array access.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); char dstValTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; dstVal->GetValueInfo()->Type().ToString(dstValTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, type specialized to %s producing %S"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, toType == TyInt32 ? _u("int32") : _u("float64"), dstValTypeStr); #if DBG_DUMP Output::Print(_u(" (")); dstVal->Dump(); Output::Print(_u(").\n")); #else Output::Print(_u(".\n")); #endif Output::Flush(); } return dstVal; } ValueType GlobOpt::GetPrepassValueTypeForDst( const ValueType desiredValueType, IR::Instr *const instr, Value *const src1Value, Value *const src2Value, bool *const isValueInfoPreciseRef) const { // Values with definite types can be created in the loop prepass only when it is guaranteed that the value type will be the // same on any iteration of the loop. The heuristics currently used are: // - If the source sym is not live on the back-edge, then it acquires a new value for each iteration of the loop, so // that value type can be definite // - Consider: A better solution for this is to track values that originate in this loop, which can have definite value // types. That catches more cases, should look into that in the future. // - If the source sym has a constant value that doesn't change for the duration of the function // - The operation always results in a definite value type. For instance, signed bitwise operations always result in an // int32, conv_num and ++ always result in a number, etc. // - For operations that always result in an int32, the resulting int range is precise only if the source syms pass // the above heuristics. Otherwise, the range must be expanded to the full int32 range. Assert(IsLoopPrePass()); Assert(instr); if(isValueInfoPreciseRef) { *isValueInfoPreciseRef = false; } if(!desiredValueType.IsDefinite()) { return desiredValueType; } if((instr->GetSrc1() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Value)) || (instr->GetSrc2() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Value))) { // If the desired value type is not precise, the value type of the destination is derived from the value types of the // sources. Since the value type of a source sym is not definite, the destination value type also cannot be definite. if(desiredValueType.IsInt() && OpCodeAttr::IsInt32(instr->m_opcode)) { // The op always produces an int32, but not always a tagged int return ValueType::GetInt(desiredValueType.IsLikelyTaggedInt()); } if(desiredValueType.IsNumber() && OpCodeAttr::ProducesNumber(instr->m_opcode)) { // The op always produces a number, but not always an int return desiredValueType.ToDefiniteAnyNumber(); } return desiredValueType.ToLikely(); } if(isValueInfoPreciseRef) { // The produced value info is derived from the sources, which have precise value infos *isValueInfoPreciseRef = true; } return desiredValueType; } bool GlobOpt::IsPrepassSrcValueInfoPrecise(IR::Opnd *const src, Value *const srcValue) const { Assert(IsLoopPrePass()); Assert(src); if(!src->IsRegOpnd() || !srcValue) { return false; } ValueInfo *const srcValueInfo = srcValue->GetValueInfo(); if(!srcValueInfo->IsDefinite()) { return false; } StackSym *srcSym = src->AsRegOpnd()->m_sym; Assert(!srcSym->IsTypeSpec()); int32 intConstantValue; return srcSym->IsFromByteCodeConstantTable() || ( srcValueInfo->TryGetIntConstantValue(&intConstantValue) && !Js::TaggedInt::IsOverflow(intConstantValue) && GetTaggedIntConstantStackSym(intConstantValue) == srcSym ) || !currentBlock->loop->regAlloc.liveOnBackEdgeSyms->Test(srcSym->m_id); } Value *GlobOpt::CreateDstUntransferredIntValue( const int32 min, const int32 max, IR::Instr *const instr, Value *const src1Value, Value *const src2Value) { Assert(instr); Assert(instr->GetDst()); Assert(OpCodeAttr::ProducesNumber(instr->m_opcode) || (instr->m_opcode == Js::OpCode::Add_A && src1Value->GetValueInfo()->IsNumber() && src2Value->GetValueInfo()->IsNumber())); ValueType valueType(ValueType::GetInt(IntConstantBounds(min, max).IsLikelyTaggable())); Assert(valueType.IsInt()); bool isValueInfoPrecise; if(IsLoopPrePass()) { valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value, &isValueInfoPrecise); } else { isValueInfoPrecise = true; } IR::Opnd *const dst = instr->GetDst(); if(isValueInfoPrecise) { Assert(valueType == ValueType::GetInt(IntConstantBounds(min, max).IsLikelyTaggable())); Assert(!(dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->IsTypeSpec())); return NewIntRangeValue(min, max, false, dst); } return NewGenericValue(valueType, dst); } Value * GlobOpt::CreateDstUntransferredValue( const ValueType desiredValueType, IR::Instr *const instr, Value *const src1Value, Value *const src2Value) { Assert(instr); Assert(instr->GetDst()); Assert(!desiredValueType.IsInt()); // use CreateDstUntransferredIntValue instead ValueType valueType(desiredValueType); if(IsLoopPrePass()) { valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value); } return NewGenericValue(valueType, instr->GetDst()); } Value * GlobOpt::ValueNumberTransferDst(IR::Instr *const instr, Value * src1Val) { Value *dstVal = this->IsLoopPrePass() ? this->ValueNumberTransferDstInPrepass(instr, src1Val) : src1Val; // Don't copy-prop a temp over a user symbol. This is likely to extend the temp's lifetime, as the user symbol // is more likely to already have later references. // REVIEW: Enabling this does cause perf issues... #if 0 if (dstVal != src1Val) { return dstVal; } Sym *dstSym = dst->GetStackSym(); if (dstVal && dstSym && dstSym->IsStackSym() && !dstSym->AsStackSym()->m_isBytecodeTmp) { Sym *dstValSym = dstVal->GetValueInfo()->GetSymStore(); if (dstValSym && dstValSym->AsStackSym()->m_isBytecodeTmp /* src->GetIsDead()*/) { dstVal->GetValueInfo()->SetSymStore(dstSym); } } #endif return dstVal; } bool GlobOpt::IsSafeToTransferInPrePass(IR::Opnd *src, Value *srcValue) { if (this->DoFieldHoisting()) { return false; } if (src->IsRegOpnd()) { StackSym *srcSym = src->AsRegOpnd()->m_sym; if (srcSym->IsFromByteCodeConstantTable()) { return true; } ValueInfo *srcValueInfo = srcValue->GetValueInfo(); int32 srcIntConstantValue; if (srcValueInfo->TryGetIntConstantValue(&srcIntConstantValue) && !Js::TaggedInt::IsOverflow(srcIntConstantValue) && GetTaggedIntConstantStackSym(srcIntConstantValue) == srcSym) { return true; } } return false; } Value * GlobOpt::ValueNumberTransferDstInPrepass(IR::Instr *const instr, Value *const src1Val) { Value *dstVal = nullptr; if (!src1Val) { return nullptr; } bool isValueInfoPrecise; ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); // TODO: This conflicts with new values created by the type specialization code // We should re-enable if we change that code to avoid the new values. #if 0 if (this->IsSafeToTransferInPrePass(instr->GetSrc1(), src1Val)) { return src1Val; } if (this->IsPREInstrCandidateLoad(instr->m_opcode) && instr->GetDst()) { StackSym *dstSym = instr->GetDst()->AsRegOpnd()->m_sym; for (Loop *curLoop = this->currentBlock->loop; curLoop; curLoop = curLoop->parent) { if (curLoop->fieldPRESymStore->Test(dstSym->m_id)) { return src1Val; } } } if (!this->DoFieldHoisting()) { if (instr->GetDst()->IsRegOpnd()) { StackSym *stackSym = instr->GetDst()->AsRegOpnd()->m_sym; if (stackSym->IsSingleDef() || this->IsLive(stackSym, this->prePassLoop->landingPad)) { IntConstantBounds src1IntConstantBounds; if (src1ValueInfo->TryGetIntConstantBounds(&src1IntConstantBounds) && !( src1IntConstantBounds.LowerBound() == INT32_MIN && src1IntConstantBounds.UpperBound() == INT32_MAX )) { const ValueType valueType( GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, &isValueInfoPrecise)); if (isValueInfoPrecise) { return src1Val; } } else { return src1Val; } } } } #endif // Src1's value could change later in the loop, so the value wouldn't be the same for each // iteration. Since we don't iterate over loops "while (!changed)", go conservative on the // first pass when transferring a value that is live on the back-edge. // In prepass we are going to copy the value but with a different value number // for aggressive int type spec. const ValueType valueType(GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, &isValueInfoPrecise)); if(isValueInfoPrecise || (valueType == src1ValueInfo->Type() && src1ValueInfo->IsGeneric())) { Assert(valueType == src1ValueInfo->Type()); dstVal = CopyValue(src1Val); TrackCopiedValueForKills(dstVal); } else { dstVal = NewGenericValue(valueType); dstVal->GetValueInfo()->SetSymStore(src1ValueInfo->GetSymStore()); } return dstVal; } void GlobOpt::PropagateIntRangeForNot(int32 minimum, int32 maximum, int32 *pNewMin, int32* pNewMax) { int32 tmp; Int32Math::Not(minimum, pNewMin); *pNewMax = *pNewMin; Int32Math::Not(maximum, &tmp); *pNewMin = min(*pNewMin, tmp); *pNewMax = max(*pNewMax, tmp); } void GlobOpt::PropagateIntRangeBinary(IR::Instr *instr, int32 min1, int32 max1, int32 min2, int32 max2, int32 *pNewMin, int32* pNewMax) { int32 min, max, tmp, tmp2; min = INT32_MIN; max = INT32_MAX; switch (instr->m_opcode) { case Js::OpCode::Xor_A: case Js::OpCode::Or_A: // Find range with highest high order bit tmp = ::max((uint32)min1, (uint32)max1); tmp2 = ::max((uint32)min2, (uint32)max2); if ((uint32)tmp > (uint32)tmp2) { max = tmp; } else { max = tmp2; } if (max < 0) { min = INT32_MIN; // REVIEW: conservative... max = INT32_MAX; } else { // Turn values like 0x1010 into 0x1111 max = 1 << Math::Log2(max); max = (uint32)(max << 1) - 1; min = 0; } break; case Js::OpCode::And_A: if (min1 == INT32_MIN && min2 == INT32_MIN) { // Shortcut break; } // Find range with lowest higher bit tmp = ::max((uint32)min1, (uint32)max1); tmp2 = ::max((uint32)min2, (uint32)max2); if ((uint32)tmp < (uint32)tmp2) { min = min1; max = max1; } else { min = min2; max = max2; } // To compute max, look if min has higher high bit if ((uint32)min > (uint32)max) { max = min; } // If max is negative, max let's assume it could be -1, so result in MAX_INT if (max < 0) { max = INT32_MAX; } // If min is positive, the resulting min is zero if (min >= 0) { min = 0; } else { min = INT32_MIN; } break; case Js::OpCode::Shl_A: { // Shift count if (min2 != max2 && ((uint32)min2 > 0x1F || (uint32)max2 > 0x1F)) { min2 = 0; max2 = 0x1F; } else { min2 &= 0x1F; max2 &= 0x1F; } int32 min1FreeTopBitCount = min1 ? (sizeof(int32) * 8) - (Math::Log2(min1) + 1) : (sizeof(int32) * 8); int32 max1FreeTopBitCount = max1 ? (sizeof(int32) * 8) - (Math::Log2(max1) + 1) : (sizeof(int32) * 8); if (min1FreeTopBitCount <= max2 || max1FreeTopBitCount <= max2) { // If the shift is going to touch the sign bit return the max range min = INT32_MIN; max = INT32_MAX; } else { // Compute max // Turn values like 0x1010 into 0x1111 if (min1) { min1 = 1 << Math::Log2(min1); min1 = (min1 << 1) - 1; } if (max1) { max1 = 1 << Math::Log2(max1); max1 = (uint32)(max1 << 1) - 1; } if (max1 > 0) { int32 nrTopBits = (sizeof(int32) * 8) - Math::Log2(max1); if (nrTopBits < ::min(max2, 30)) max = INT32_MAX; else max = ::max((max1 << ::min(max2, 30)) & ~0x80000000, (min1 << min2) & ~0x80000000); } else { max = (max1 << min2) & ~0x80000000; } // Compute min if (min1 < 0) { min = ::min(min1 << max2, max1 << max2); } else { min = ::min(min1 << min2, max1 << max2); } // Turn values like 0x1110 into 0x1000 if (min) { min = 1 << Math::Log2(min); } } } break; case Js::OpCode::Shr_A: // Shift count if (min2 != max2 && ((uint32)min2 > 0x1F || (uint32)max2 > 0x1F)) { min2 = 0; max2 = 0x1F; } else { min2 &= 0x1F; max2 &= 0x1F; } // Compute max if (max1 < 0) { max = max1 >> max2; } else { max = max1 >> min2; } // Compute min if (min1 < 0) { min = min1 >> min2; } else { min = min1 >> max2; } break; case Js::OpCode::ShrU_A: // shift count is constant zero if ((min2 == max2) && (max2 & 0x1f) == 0) { // We can't encode uint32 result, so it has to be used as int32 only or the original value is positive. Assert(instr->ignoreIntOverflow || min1 >= 0); // We can transfer the signed int32 range. min = min1; max = max1; break; } const IntConstantBounds src2NewBounds = IntConstantBounds(min2, max2).And_0x1f(); // Zero is only allowed if result is always a signed int32 or always used as a signed int32 Assert(min1 >= 0 || instr->ignoreIntOverflow || !src2NewBounds.Contains(0)); min2 = src2NewBounds.LowerBound(); max2 = src2NewBounds.UpperBound(); Assert(min2 <= max2); // zero shift count is only allowed if result is used as int32 and/or value is positive Assert(min2 > 0 || instr->ignoreIntOverflow || min1 >= 0); uint32 umin1 = (uint32)min1; uint32 umax1 = (uint32)max1; if (umin1 > umax1) { uint32 temp = umax1; umax1 = umin1; umin1 = temp; } Assert(min2 >= 0 && max2 < 32); // Compute max if (min1 < 0) { umax1 = UINT32_MAX; } max = umax1 >> min2; // Compute min if (min1 <= 0 && max1 >=0) { min = 0; } else { min = umin1 >> max2; } // We should be able to fit uint32 range as int32 Assert(instr->ignoreIntOverflow || (min >= 0 && max >= 0) ); if (min > max) { // can only happen if shift count can be zero Assert(min2 == 0 && (instr->ignoreIntOverflow || min1 >= 0)); min = Int32ConstMin; max = Int32ConstMax; } break; } *pNewMin = min; *pNewMax = max; } IR::Instr * GlobOpt::TypeSpecialization( IR::Instr *instr, Value **pSrc1Val, Value **pSrc2Val, Value **pDstVal, bool *redoTypeSpecRef, bool *const forceInvariantHoistingRef) { Value *&src1Val = *pSrc1Val; Value *&src2Val = *pSrc2Val; *redoTypeSpecRef = false; Assert(!*forceInvariantHoistingRef); this->ignoredIntOverflowForCurrentInstr = false; this->ignoredNegativeZeroForCurrentInstr = false; // - Int32 values that can't be tagged are created as float constant values instead because a JavascriptNumber var is needed // for that value at runtime. For the purposes of type specialization, recover the int32 values so that they will be // treated as ints. // - If int overflow does not matter for the instruction, we can additionally treat uint32 values as int32 values because // the value resulting from the operation will eventually be converted to int32 anyway Value *const src1OriginalVal = src1Val; Value *const src2OriginalVal = src2Val; #ifdef ENABLE_SIMDJS // SIMD_JS if (TypeSpecializeSimd128(instr, pSrc1Val, pSrc2Val, pDstVal)) { return instr; } #endif if(!instr->ShouldCheckForIntOverflow()) { if(src1Val && src1Val->GetValueInfo()->IsFloatConstant()) { int32 int32Value; bool isInt32; if(Js::JavascriptNumber::TryGetInt32OrUInt32Value( src1Val->GetValueInfo()->AsFloatConstant()->FloatValue(), &int32Value, &isInt32)) { src1Val = GetIntConstantValue(int32Value, instr); if(!isInt32) { this->ignoredIntOverflowForCurrentInstr = true; } } } if(src2Val && src2Val->GetValueInfo()->IsFloatConstant()) { int32 int32Value; bool isInt32; if(Js::JavascriptNumber::TryGetInt32OrUInt32Value( src2Val->GetValueInfo()->AsFloatConstant()->FloatValue(), &int32Value, &isInt32)) { src2Val = GetIntConstantValue(int32Value, instr); if(!isInt32) { this->ignoredIntOverflowForCurrentInstr = true; } } } } const AutoRestoreVal autoRestoreSrc1Val(src1OriginalVal, &src1Val); const AutoRestoreVal autoRestoreSrc2Val(src2OriginalVal, &src2Val); if (src1Val && instr->GetSrc2() == nullptr) { // Unary // Note make sure that native array StElemI gets to TypeSpecializeStElem. Do this for typed arrays, too? int32 intConstantValue; if (!this->IsLoopPrePass() && !instr->IsBranchInstr() && src1Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && !( // Nothing to fold for element stores. Go into type specialization to see if they can at least be specialized. instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::StElemC || instr->m_opcode == Js::OpCode::MultiBr || instr->m_opcode == Js::OpCode::InlineArrayPop )) { if (OptConstFoldUnary(&instr, intConstantValue, src1Val == src1OriginalVal, pDstVal)) { return instr; } } else if (this->TypeSpecializeUnary( &instr, &src1Val, pDstVal, src1OriginalVal, redoTypeSpecRef, forceInvariantHoistingRef)) { return instr; } else if(*redoTypeSpecRef) { return instr; } } else if (instr->GetSrc2() && !instr->IsBranchInstr()) { // Binary if (!this->IsLoopPrePass()) { if (GetIsAsmJSFunc()) { if (CONFIG_FLAG(WasmFold)) { bool success = instr->GetSrc1()->IsInt64() ? this->OptConstFoldBinaryWasm<int64>(&instr, src1Val, src2Val, pDstVal) : this->OptConstFoldBinaryWasm<int>(&instr, src1Val, src2Val, pDstVal); if (success) { return instr; } } } else { // OptConstFoldBinary doesn't do type spec, so only deal with things we are sure are int (IntConstant and IntRange) // and not just likely ints TypeSpecializeBinary will deal with type specializing them and fold them again IntConstantBounds src1IntConstantBounds, src2IntConstantBounds; if (src1Val && src1Val->GetValueInfo()->TryGetIntConstantBounds(&src1IntConstantBounds)) { if (src2Val && src2Val->GetValueInfo()->TryGetIntConstantBounds(&src2IntConstantBounds)) { if (this->OptConstFoldBinary(&instr, src1IntConstantBounds, src2IntConstantBounds, pDstVal)) { return instr; } } } } } } if (instr->GetSrc2() && this->TypeSpecializeBinary(&instr, pSrc1Val, pSrc2Val, pDstVal, src1OriginalVal, src2OriginalVal, redoTypeSpecRef)) { if (!this->IsLoopPrePass() && instr->m_opcode != Js::OpCode::Nop && instr->m_opcode != Js::OpCode::Br && // We may have const fold a branch // Cannot const-peep if the result of the operation is required for a bailout check !(instr->HasBailOutInfo() && instr->GetBailOutKind() & IR::BailOutOnResultConditions)) { if (src1Val && src1Val->GetValueInfo()->HasIntConstantValue()) { if (this->OptConstPeep(instr, instr->GetSrc1(), pDstVal, src1Val->GetValueInfo())) { return instr; } } else if (src2Val && src2Val->GetValueInfo()->HasIntConstantValue()) { if (this->OptConstPeep(instr, instr->GetSrc2(), pDstVal, src2Val->GetValueInfo())) { return instr; } } } return instr; } else if(*redoTypeSpecRef) { return instr; } if (instr->IsBranchInstr() && !this->IsLoopPrePass()) { if (this->OptConstFoldBranch(instr, src1Val, src2Val, pDstVal)) { return instr; } } // We didn't type specialize, make sure the srcs are unspecialized IR::Opnd *src1 = instr->GetSrc1(); if (src1) { instr = this->ToVarUses(instr, src1, false, src1Val); IR::Opnd *src2 = instr->GetSrc2(); if (src2) { instr = this->ToVarUses(instr, src2, false, src2Val); } } IR::Opnd *dst = instr->GetDst(); if (dst) { instr = this->ToVarUses(instr, dst, true, nullptr); // Handling for instructions other than built-ins that may require only dst type specialization // should be added here. if(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode) && !GetIsAsmJSFunc()) // don't need to do typespec for asmjs { this->TypeSpecializeInlineBuiltInDst(&instr, pDstVal); return instr; } // Clear the int specialized bit on the dst. if (dst->IsRegOpnd()) { IR::RegOpnd *dstRegOpnd = dst->AsRegOpnd(); if (!dstRegOpnd->m_sym->IsTypeSpec()) { this->ToVarRegOpnd(dstRegOpnd, this->currentBlock); } else if (dstRegOpnd->m_sym->IsInt32()) { this->ToInt32Dst(instr, dstRegOpnd, this->currentBlock); } else if (dstRegOpnd->m_sym->IsUInt32() && GetIsAsmJSFunc()) { this->ToUInt32Dst(instr, dstRegOpnd, this->currentBlock); } else if (dstRegOpnd->m_sym->IsFloat64()) { this->ToFloat64Dst(instr, dstRegOpnd, this->currentBlock); } } else if (dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsStackSym()) { this->ToVarStackSym(dst->AsSymOpnd()->m_sym->AsStackSym(), this->currentBlock); } } return instr; } bool GlobOpt::OptConstPeep(IR::Instr *instr, IR::Opnd *constSrc, Value **pDstVal, ValueInfo *valuInfo) { int32 value; IR::Opnd *src; IR::Opnd *nonConstSrc = (constSrc == instr->GetSrc1() ? instr->GetSrc2() : instr->GetSrc1()); // Try to find the value from value info first if (valuInfo->TryGetIntConstantValue(&value)) { } else if (constSrc->IsAddrOpnd()) { IR::AddrOpnd *addrOpnd = constSrc->AsAddrOpnd(); #ifdef _M_X64 Assert(addrOpnd->IsVar() || Math::FitsInDWord((size_t)addrOpnd->m_address)); #else Assert(sizeof(value) == sizeof(addrOpnd->m_address)); #endif if (addrOpnd->IsVar()) { value = Js::TaggedInt::ToInt32(addrOpnd->m_address); } else { // We asserted that the address will fit in a DWORD above value = ::Math::PointerCastToIntegral<int32>(constSrc->AsAddrOpnd()->m_address); } } else if (constSrc->IsIntConstOpnd()) { value = constSrc->AsIntConstOpnd()->AsInt32(); } else { return false; } switch(instr->m_opcode) { // Can't do all Add_A because of string concats. // Sub_A cannot be transformed to a NEG_A because 0 - 0 != -0 case Js::OpCode::Add_A: src = nonConstSrc; if (!src->GetValueType().IsInt()) { // 0 + -0 != -0 // "Foo" + 0 != "Foo return false; } // fall-through case Js::OpCode::Add_I4: if (value != 0) { return false; } if (constSrc == instr->GetSrc1()) { src = instr->GetSrc2(); } else { src = instr->GetSrc1(); } break; case Js::OpCode::Mul_A: case Js::OpCode::Mul_I4: if (value == 0) { // -0 * 0 != 0 return false; } else if (value == 1) { src = nonConstSrc; } else { return false; } break; case Js::OpCode::Div_A: if (value == 1 && constSrc == instr->GetSrc2()) { src = instr->GetSrc1(); } else { return false; } break; case Js::OpCode::Or_I4: if (value == -1) { src = constSrc; } else if (value == 0) { src = nonConstSrc; } else { return false; } break; case Js::OpCode::And_I4: if (value == -1) { src = nonConstSrc; } else if (value == 0) { src = constSrc; } else { return false; } break; case Js::OpCode::Shl_I4: case Js::OpCode::ShrU_I4: case Js::OpCode::Shr_I4: if (value != 0 || constSrc != instr->GetSrc2()) { return false; } src = instr->GetSrc1(); break; default: return false; } this->CaptureByteCodeSymUses(instr); if (src == instr->GetSrc1()) { instr->FreeSrc2(); } else { Assert(src == instr->GetSrc2()); instr->ReplaceSrc1(instr->UnlinkSrc2()); } instr->m_opcode = Js::OpCode::Ld_A; return true; } Js::Var // TODO: michhol OOP JIT, shouldn't play with Vars GlobOpt::GetConstantVar(IR::Opnd *opnd, Value *val) { ValueInfo *valueInfo = val->GetValueInfo(); if (valueInfo->IsVarConstant() && valueInfo->IsPrimitive()) { return valueInfo->AsVarConstant()->VarValue(); } if (opnd->IsAddrOpnd()) { IR::AddrOpnd *addrOpnd = opnd->AsAddrOpnd(); if (addrOpnd->IsVar()) { return addrOpnd->m_address; } } else if (opnd->IsIntConstOpnd()) { if (!Js::TaggedInt::IsOverflow(opnd->AsIntConstOpnd()->AsInt32())) { return Js::TaggedInt::ToVarUnchecked(opnd->AsIntConstOpnd()->AsInt32()); } } else if (opnd->IsRegOpnd() && opnd->AsRegOpnd()->m_sym->IsSingleDef()) { if (valueInfo->IsBoolean()) { IR::Instr * defInstr = opnd->AsRegOpnd()->m_sym->GetInstrDef(); if (defInstr->m_opcode != Js::OpCode::Ld_A || !defInstr->GetSrc1()->IsAddrOpnd()) { return nullptr; } Assert(defInstr->GetSrc1()->AsAddrOpnd()->IsVar()); return defInstr->GetSrc1()->AsAddrOpnd()->m_address; } else if (valueInfo->IsUndefined()) { return (Js::Var)this->func->GetScriptContextInfo()->GetUndefinedAddr(); } else if (valueInfo->IsNull()) { return (Js::Var)this->func->GetScriptContextInfo()->GetNullAddr(); } } return nullptr; } bool BoolAndIntStaticAndTypeMismatch(Value* src1Val, Value* src2Val, Js::Var src1Var, Js::Var src2Var) { ValueInfo *src1ValInfo = src1Val->GetValueInfo(); ValueInfo *src2ValInfo = src2Val->GetValueInfo(); return (src1ValInfo->IsNumber() && src1Var && src2ValInfo->IsBoolean() && src1Var != Js::TaggedInt::ToVarUnchecked(0) && src1Var != Js::TaggedInt::ToVarUnchecked(1)) || (src2ValInfo->IsNumber() && src2Var && src1ValInfo->IsBoolean() && src2Var != Js::TaggedInt::ToVarUnchecked(0) && src2Var != Js::TaggedInt::ToVarUnchecked(1)); } bool GlobOpt::OptConstFoldBranch(IR::Instr *instr, Value *src1Val, Value*src2Val, Value **pDstVal) { if (!src1Val) { return false; } int64 left64, right64; Js::Var src1Var = this->GetConstantVar(instr->GetSrc1(), src1Val); Js::Var src2Var = nullptr; if (instr->GetSrc2()) { if (!src2Val) { return false; } src2Var = this->GetConstantVar(instr->GetSrc2(), src2Val); } auto AreSourcesEqual = [&](Value * val1, Value * val2) -> bool { // NaN !== NaN, and objects can have valueOf/toString return val1->IsEqualTo(val2) && val1->GetValueInfo()->IsPrimitive() && val1->GetValueInfo()->IsNotFloat(); }; // Make sure GetConstantVar only returns primitives. // TODO: OOP JIT, enabled these asserts //Assert(!src1Var || !Js::JavascriptOperators::IsObject(src1Var)); //Assert(!src2Var || !Js::JavascriptOperators::IsObject(src2Var)); BOOL result; int32 constVal; switch (instr->m_opcode) { #define BRANCH(OPCODE,CMP,TYPE,UNSIGNEDNESS) \ case Js::OpCode::##OPCODE: \ if (src1Val->GetValueInfo()->TryGetInt64ConstantValue(&left64, UNSIGNEDNESS) && \ src2Val->GetValueInfo()->TryGetInt64ConstantValue(&right64, UNSIGNEDNESS)) \ { \ result = (TYPE)left64 CMP (TYPE)right64; \ } \ else if (AreSourcesEqual(src1Val, src2Val)) \ { \ result = 0 CMP 0; \ } \ else \ { \ return false; \ } \ break; BRANCH(BrEq_I4, == , int64, false) BRANCH(BrGe_I4, >= , int64, false) BRANCH(BrGt_I4, >, int64, false) BRANCH(BrLt_I4, <, int64, false) BRANCH(BrLe_I4, <= , int64, false) BRANCH(BrNeq_I4, != , int64, false) BRANCH(BrUnGe_I4, >= , uint64, true) BRANCH(BrUnGt_I4, >, uint64, true) BRANCH(BrUnLt_I4, <, uint64, true) BRANCH(BrUnLe_I4, <= , uint64, true) case Js::OpCode::BrEq_A: case Js::OpCode::BrNotNeq_A: if (!src1Var || !src2Var) { if (BoolAndIntStaticAndTypeMismatch(src1Val, src2Val, src1Var, src2Var)) { result = false; } else if (AreSourcesEqual(src1Val, src2Val)) { result = true; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::Equal(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrNeq_A: case Js::OpCode::BrNotEq_A: if (!src1Var || !src2Var) { if (BoolAndIntStaticAndTypeMismatch(src1Val, src2Val, src1Var, src2Var)) { result = true; } else if (AreSourcesEqual(src1Val, src2Val)) { result = false; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::NotEqual(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrSrNotNeq_A: if (!src1Var || !src2Var) { ValueInfo *src1ValInfo = src1Val->GetValueInfo(); ValueInfo *src2ValInfo = src2Val->GetValueInfo(); if ( (src1ValInfo->IsUndefined() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenUndefined()) || (src1ValInfo->IsNull() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNull()) || (src1ValInfo->IsBoolean() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenBoolean()) || (src1ValInfo->IsNumber() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNumber()) || (src1ValInfo->IsString() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenString()) || (src2ValInfo->IsUndefined() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenUndefined()) || (src2ValInfo->IsNull() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNull()) || (src2ValInfo->IsBoolean() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenBoolean()) || (src2ValInfo->IsNumber() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNumber()) || (src2ValInfo->IsString() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenString()) ) { result = false; } else if (AreSourcesEqual(src1Val, src2Val)) { result = true; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::StrictEqual(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrSrNotEq_A: if (!src1Var || !src2Var) { ValueInfo *src1ValInfo = src1Val->GetValueInfo(); ValueInfo *src2ValInfo = src2Val->GetValueInfo(); if ( (src1ValInfo->IsUndefined() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenUndefined()) || (src1ValInfo->IsNull() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNull()) || (src1ValInfo->IsBoolean() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenBoolean()) || (src1ValInfo->IsNumber() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNumber()) || (src1ValInfo->IsString() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenString()) || (src2ValInfo->IsUndefined() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenUndefined()) || (src2ValInfo->IsNull() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNull()) || (src2ValInfo->IsBoolean() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenBoolean()) || (src2ValInfo->IsNumber() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNumber()) || (src2ValInfo->IsString() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenString()) ) { result = true; } else if (AreSourcesEqual(src1Val, src2Val)) { result = false; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::NotStrictEqual(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrFalse_A: case Js::OpCode::BrTrue_A: { ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); if(src1ValueInfo->IsNull() || src1ValueInfo->IsUndefined()) { result = instr->m_opcode == Js::OpCode::BrFalse_A; break; } if(src1ValueInfo->IsObject() && src1ValueInfo->GetObjectType() > ObjectType::Object) { // Specific object types that are tracked are equivalent to 'true' result = instr->m_opcode == Js::OpCode::BrTrue_A; break; } if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } if (!src1Var) { return false; } result = Js::JavascriptConversion::ToBoolean(src1Var, this->func->GetScriptContext()); if(instr->m_opcode == Js::OpCode::BrFalse_A) { result = !result; } break; } case Js::OpCode::BrFalse_I4: // this path would probably work outside of asm.js, but we should verify that if we ever hit this scenario Assert(GetIsAsmJSFunc()); constVal = 0; if (!src1Val->GetValueInfo()->TryGetIntConstantValue(&constVal)) { return false; } result = constVal == 0; break; default: return false; #undef BRANCH } this->OptConstFoldBr(!!result, instr); return true; } bool GlobOpt::OptConstFoldUnary( IR::Instr * *pInstr, const int32 intConstantValue, const bool isUsingOriginalSrc1Value, Value **pDstVal) { IR::Instr * &instr = *pInstr; int32 value = 0; IR::Opnd *constOpnd; bool isInt = true; bool doSetDstVal = true; FloatConstType fValue = 0.0; if (!DoConstFold()) { return false; } if (instr->GetDst() && !instr->GetDst()->IsRegOpnd()) { return false; } switch(instr->m_opcode) { case Js::OpCode::Neg_A: if (intConstantValue == 0) { // Could fold to -0.0 return false; } if (Int32Math::Neg(intConstantValue, &value)) { return false; } break; case Js::OpCode::Not_A: Int32Math::Not(intConstantValue, &value); break; case Js::OpCode::Ld_A: if (instr->HasBailOutInfo()) { //The profile data for switch expr can be string and in GlobOpt we realize it is an int. if(instr->GetBailOutKind() == IR::BailOutExpectingString) { throw Js::RejitException(RejitReason::DisableSwitchOptExpectingString); } Assert(instr->GetBailOutKind() == IR::BailOutExpectingInteger); instr->ClearBailOutInfo(); } value = intConstantValue; if(isUsingOriginalSrc1Value) { doSetDstVal = false; // Let OptDst do it by copying src1Val } break; case Js::OpCode::Conv_Num: case Js::OpCode::LdC_A_I4: value = intConstantValue; if(isUsingOriginalSrc1Value) { doSetDstVal = false; // Let OptDst do it by copying src1Val } break; case Js::OpCode::Incr_A: if (Int32Math::Inc(intConstantValue, &value)) { return false; } break; case Js::OpCode::Decr_A: if (Int32Math::Dec(intConstantValue, &value)) { return false; } break; case Js::OpCode::InlineMathAcos: fValue = Js::Math::Acos((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathAsin: fValue = Js::Math::Asin((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathAtan: fValue = Js::Math::Atan((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathCos: fValue = Js::Math::Cos((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathExp: fValue = Js::Math::Exp((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathLog: fValue = Js::Math::Log((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathSin: fValue = Js::Math::Sin((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathSqrt: fValue = ::sqrt((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathTan: fValue = ::tan((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathFround: fValue = (double) (float) intConstantValue; isInt = false; break; case Js::OpCode::InlineMathAbs: if (intConstantValue == INT32_MIN) { if (instr->GetDst()->IsInt32()) { // if dst is an int (e.g. in asm.js), we should coerce it, not convert to float value = static_cast<int32>(2147483648U); } else { // Rejit with AggressiveIntTypeSpecDisabled for Math.abs(INT32_MIN) because it causes dst // to be float type which could be different with previous type spec result in LoopPrePass throw Js::RejitException(RejitReason::AggressiveIntTypeSpecDisabled); } } else { value = ::abs(intConstantValue); } break; case Js::OpCode::InlineMathClz: DWORD clz; if (_BitScanReverse(&clz, intConstantValue)) { value = 31 - clz; } else { value = 32; } instr->ClearBailOutInfo(); break; case Js::OpCode::Ctz: Assert(func->GetJITFunctionBody()->IsWasmFunction()); Assert(!instr->HasBailOutInfo()); DWORD ctz; if (_BitScanForward(&ctz, intConstantValue)) { value = ctz; } else { value = 32; } break; case Js::OpCode::InlineMathFloor: value = intConstantValue; instr->ClearBailOutInfo(); break; case Js::OpCode::InlineMathCeil: value = intConstantValue; instr->ClearBailOutInfo(); break; case Js::OpCode::InlineMathRound: value = intConstantValue; instr->ClearBailOutInfo(); break; case Js::OpCode::ToVar: if (Js::TaggedInt::IsOverflow(intConstantValue)) { return false; } else { value = intConstantValue; instr->ClearBailOutInfo(); break; } default: return false; } this->CaptureByteCodeSymUses(instr); Assert(!instr->HasBailOutInfo()); // If we are, in fact, successful in constant folding the instruction, there is no point in having the bailoutinfo around anymore. // Make sure that it is cleared if it was initially present. if (!isInt) { value = (int32)fValue; if (fValue == (double)value) { isInt = true; } } if (isInt) { constOpnd = IR::IntConstOpnd::New(value, TyInt32, instr->m_func); GOPT_TRACE(_u("Constant folding to %d\n"), value); } else { constOpnd = IR::FloatConstOpnd::New(fValue, TyFloat64, instr->m_func); GOPT_TRACE(_u("Constant folding to %f\n"), fValue); } instr->ReplaceSrc1(constOpnd); this->OptSrc(constOpnd, &instr); IR::Opnd *dst = instr->GetDst(); Assert(dst->IsRegOpnd()); StackSym *dstSym = dst->AsRegOpnd()->m_sym; if (isInt) { if (dstSym->IsSingleDef()) { dstSym->SetIsIntConst(value); } if (doSetDstVal) { *pDstVal = GetIntConstantValue(value, instr, dst); } if (IsTypeSpecPhaseOff(this->func)) { instr->m_opcode = Js::OpCode::LdC_A_I4; this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } else { instr->m_opcode = Js::OpCode::Ld_I4; this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); StackSym * currDstSym = instr->GetDst()->AsRegOpnd()->m_sym; if (currDstSym->IsSingleDef()) { currDstSym->SetIsIntConst(value); } } } else { *pDstVal = NewFloatConstantValue(fValue, dst); if (IsTypeSpecPhaseOff(this->func)) { instr->m_opcode = Js::OpCode::LdC_A_R8; this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } else { instr->m_opcode = Js::OpCode::LdC_F8_R8; this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock); } } // If this is an induction variable, then treat it the way the prepass would have if it had seen // the assignment and the resulting change to the value number, and mark it as indeterminate. for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) { InductionVariable *iv = nullptr; if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) { iv->SetChangeIsIndeterminate(); } } return true; } //------------------------------------------------------------------------------------------------------ // Type specialization //------------------------------------------------------------------------------------------------------ bool GlobOpt::IsWorthSpecializingToInt32DueToSrc(IR::Opnd *const src, Value *const val) { Assert(src); Assert(val); ValueInfo *valueInfo = val->GetValueInfo(); Assert(valueInfo->IsLikelyInt()); // If it is not known that the operand is definitely an int, the operand is not already type-specialized, and it's not live // in the loop landing pad (if we're in a loop), it's probably not worth type-specializing this instruction. The common case // where type-specializing this would be bad is where the operations are entirely on properties or array elements, where the // ratio of FromVars and ToVars to the number of actual operations is high, and the conversions would dominate the time // spent. On the other hand, if we're using a function formal parameter more than once, it would probably be worth // type-specializing it, hence the IsDead check on the operands. return valueInfo->IsInt() || valueInfo->HasIntConstantValue(true) || !src->GetIsDead() || !src->IsRegOpnd() || CurrentBlockData()->IsInt32TypeSpecialized(src->AsRegOpnd()->m_sym) || (this->currentBlock->loop && this->currentBlock->loop->landingPad->globOptData.IsLive(src->AsRegOpnd()->m_sym)); } bool GlobOpt::IsWorthSpecializingToInt32DueToDst(IR::Opnd *const dst) { Assert(dst); const auto sym = dst->AsRegOpnd()->m_sym; return CurrentBlockData()->IsInt32TypeSpecialized(sym) || (this->currentBlock->loop && this->currentBlock->loop->landingPad->globOptData.IsLive(sym)); } bool GlobOpt::IsWorthSpecializingToInt32(IR::Instr *const instr, Value *const src1Val, Value *const src2Val) { Assert(instr); const auto src1 = instr->GetSrc1(); const auto src2 = instr->GetSrc2(); // In addition to checking each operand and the destination, if for any reason we only have to do a maximum of two // conversions instead of the worst-case 3 conversions, it's probably worth specializing. if (IsWorthSpecializingToInt32DueToSrc(src1, src1Val) || (src2Val && IsWorthSpecializingToInt32DueToSrc(src2, src2Val))) { return true; } IR::Opnd *dst = instr->GetDst(); if (!dst || IsWorthSpecializingToInt32DueToDst(dst)) { return true; } if (dst->IsEqual(src1) || (src2Val && (dst->IsEqual(src2) || src1->IsEqual(src2)))) { return true; } IR::Instr *instrNext = instr->GetNextRealInstrOrLabel(); // Skip useless Ld_A's do { switch (instrNext->m_opcode) { case Js::OpCode::Ld_A: if (!dst->IsEqual(instrNext->GetSrc1())) { goto done; } dst = instrNext->GetDst(); break; case Js::OpCode::LdFld: case Js::OpCode::LdRootFld: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdElemI_A: case Js::OpCode::ByteCodeUses: break; default: goto done; } instrNext = instrNext->GetNextRealInstrOrLabel(); } while (true); done: // If the next instr could also be type specialized, then it is probably worth it. if ((instrNext->GetSrc1() && dst->IsEqual(instrNext->GetSrc1())) || (instrNext->GetSrc2() && dst->IsEqual(instrNext->GetSrc2()))) { switch (instrNext->m_opcode) { case Js::OpCode::Add_A: case Js::OpCode::Sub_A: case Js::OpCode::Mul_A: case Js::OpCode::Div_A: case Js::OpCode::Rem_A: case Js::OpCode::Xor_A: case Js::OpCode::And_A: case Js::OpCode::Or_A: case Js::OpCode::Shl_A: case Js::OpCode::Shr_A: case Js::OpCode::Incr_A: case Js::OpCode::Decr_A: case Js::OpCode::Neg_A: case Js::OpCode::Not_A: case Js::OpCode::Conv_Num: case Js::OpCode::BrEq_I4: case Js::OpCode::BrTrue_I4: case Js::OpCode::BrFalse_I4: case Js::OpCode::BrGe_I4: case Js::OpCode::BrGt_I4: case Js::OpCode::BrLt_I4: case Js::OpCode::BrLe_I4: case Js::OpCode::BrNeq_I4: return true; } } return false; } bool GlobOpt::TypeSpecializeNumberUnary(IR::Instr *instr, Value *src1Val, Value **pDstVal) { Assert(src1Val->GetValueInfo()->IsNumber()); if (this->IsLoopPrePass()) { return false; } switch (instr->m_opcode) { case Js::OpCode::Conv_Num: // Optimize Conv_Num away since we know this is a number instr->m_opcode = Js::OpCode::Ld_A; return false; } return false; } bool GlobOpt::TypeSpecializeUnary( IR::Instr **pInstr, Value **pSrc1Val, Value **pDstVal, Value *const src1OriginalVal, bool *redoTypeSpecRef, bool *const forceInvariantHoistingRef) { Assert(pSrc1Val); Value *&src1Val = *pSrc1Val; Assert(src1Val); // We don't need to do typespec for asmjs if (IsTypeSpecPhaseOff(this->func) || GetIsAsmJSFunc()) { return false; } IR::Instr *&instr = *pInstr; int32 min, max; // Inline built-ins explicitly specify how srcs/dst must be specialized. if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { TypeSpecializeInlineBuiltInUnary(pInstr, &src1Val, pDstVal, src1OriginalVal, redoTypeSpecRef); return true; } // Consider: If type spec wasn't completely done, make sure that we don't type-spec the dst 2nd time. if(instr->m_opcode == Js::OpCode::LdLen_A && TypeSpecializeLdLen(&instr, &src1Val, pDstVal, forceInvariantHoistingRef)) { return true; } if (!src1Val->GetValueInfo()->GetIntValMinMax(&min, &max, this->DoAggressiveIntTypeSpec())) { src1Val = src1OriginalVal; if (src1Val->GetValueInfo()->IsLikelyFloat()) { // Try to type specialize to float return this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal); } else if (src1Val->GetValueInfo()->IsNumber()) { return TypeSpecializeNumberUnary(instr, src1Val, pDstVal); } return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } return this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, min, max, src1OriginalVal, redoTypeSpecRef); } // Returns true if the built-in requested type specialization, and no further action needed, // otherwise returns false. void GlobOpt::TypeSpecializeInlineBuiltInUnary(IR::Instr **pInstr, Value **pSrc1Val, Value **pDstVal, Value *const src1OriginalVal, bool *redoTypeSpecRef) { IR::Instr *&instr = *pInstr; Assert(pSrc1Val); Value *&src1Val = *pSrc1Val; Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInInlineCandidateId(instr->m_opcode); // From actual instr, not profile based. Assert(builtInId != Js::BuiltinFunction::None); // Consider using different bailout for float/int FromVars, so that when the arg cannot be converted to number we don't disable // type spec for other parts of the big function but rather just don't inline that built-in instr. // E.g. could do that if the value is not likelyInt/likelyFloat. Js::BuiltInFlags builtInFlags = Js::JavascriptLibrary::GetFlagsForBuiltIn(builtInId); bool areAllArgsAlwaysFloat = (builtInFlags & Js::BuiltInFlags::BIF_Args) == Js::BuiltInFlags::BIF_TypeSpecUnaryToFloat; if (areAllArgsAlwaysFloat) { // InlineMathAcos, InlineMathAsin, InlineMathAtan, InlineMathCos, InlineMathExp, InlineMathLog, InlineMathSin, InlineMathSqrt, InlineMathTan. Assert(this->DoFloatTypeSpec()); // Type-spec the src. src1Val = src1OriginalVal; bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, /* skipDst = */ true); AssertMsg(retVal, "For inline built-ins the args have to be type-specialized to float, but something failed during the process."); // Type-spec the dst. this->TypeSpecializeFloatDst(instr, nullptr, src1Val, nullptr, pDstVal); } else if (instr->m_opcode == Js::OpCode::InlineMathAbs) { // Consider the case when the value is unknown - because of bailout in abs we may disable type spec for the whole function which is too much. // First, try int. int minVal, maxVal; bool shouldTypeSpecToInt = src1Val->GetValueInfo()->GetIntValMinMax(&minVal, &maxVal, /* doAggressiveIntTypeSpec = */ true); if (shouldTypeSpecToInt) { Assert(this->DoAggressiveIntTypeSpec()); bool retVal = this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, minVal, maxVal, src1OriginalVal, redoTypeSpecRef, true); AssertMsg(retVal, "For inline built-ins the args have to be type-specialized (int), but something failed during the process."); if (!this->IsLoopPrePass()) { // Create bailout for INT_MIN which does not have corresponding int value on the positive side. // Check int range: if we know the range is out of overflow, we do not need the bail out at all. if (minVal == INT32_MIN) { GenerateBailAtOperation(&instr, IR::BailOnIntMin); } } // Account for ::abs(INT_MIN) == INT_MIN (which is less than 0). maxVal = ::max( ::abs(Int32Math::NearestInRangeTo(minVal, INT_MIN + 1, INT_MAX)), ::abs(Int32Math::NearestInRangeTo(maxVal, INT_MIN + 1, INT_MAX))); minVal = minVal >= 0 ? minVal : 0; this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, nullptr, IR::BailOutInvalid, minVal, maxVal, pDstVal); } else { // If we couldn't do int, do float. Assert(this->DoFloatTypeSpec()); src1Val = src1OriginalVal; bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, true); AssertMsg(retVal, "For inline built-ins the args have to be type-specialized (float), but something failed during the process."); this->TypeSpecializeFloatDst(instr, nullptr, src1Val, nullptr, pDstVal); } } else if (instr->m_opcode == Js::OpCode::InlineMathFloor || instr->m_opcode == Js::OpCode::InlineMathCeil || instr->m_opcode == Js::OpCode::InlineMathRound) { // Type specialize src to float src1Val = src1OriginalVal; bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, /* skipDst = */ true); AssertMsg(retVal, "For inline Math.floor and Math.ceil the src has to be type-specialized to float, but something failed during the process."); // Type specialize dst to int this->TypeSpecializeIntDst( instr, instr->m_opcode, nullptr, src1Val, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal); } else if(instr->m_opcode == Js::OpCode::InlineArrayPop) { IR::Opnd *const thisOpnd = instr->GetSrc1(); Assert(thisOpnd); // Ensure src1 (Array) is a var this->ToVarUses(instr, thisOpnd, false, src1Val); if(!this->IsLoopPrePass() && thisOpnd->GetValueType().IsLikelyNativeArray()) { // We bail out, if there is illegal access or a mismatch in the Native array type that is optimized for, during the run time. GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly); } if(!instr->GetDst()) { return; } // Try Type Specializing the element (return item from Pop) based on the array's profile data. if(thisOpnd->GetValueType().IsLikelyNativeIntArray()) { this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, nullptr, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal); } else if(thisOpnd->GetValueType().IsLikelyNativeFloatArray()) { this->TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, pDstVal); } else { // We reached here so the Element is not yet type specialized. Ensure element is a var if(instr->GetDst()->IsRegOpnd()) { this->ToVarRegOpnd(instr->GetDst()->AsRegOpnd(), currentBlock); } } } else if (instr->m_opcode == Js::OpCode::InlineMathClz) { Assert(this->DoAggressiveIntTypeSpec()); Assert(this->DoLossyIntTypeSpec()); //Type specialize to int bool retVal = this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, INT32_MIN, INT32_MAX, src1OriginalVal, redoTypeSpecRef); AssertMsg(retVal, "For clz32, the arg has to be type-specialized to int."); } else { AssertMsg(FALSE, "Unsupported built-in!"); } } void GlobOpt::TypeSpecializeInlineBuiltInBinary(IR::Instr **pInstr, Value *src1Val, Value* src2Val, Value **pDstVal, Value *const src1OriginalVal, Value *const src2OriginalVal) { IR::Instr *&instr = *pInstr; Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); switch(instr->m_opcode) { case Js::OpCode::InlineMathAtan2: { Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInInlineCandidateId(instr->m_opcode); // From actual instr, not profile based. Js::BuiltInFlags builtInFlags = Js::JavascriptLibrary::GetFlagsForBuiltIn(builtInId); bool areAllArgsAlwaysFloat = (builtInFlags & Js::BuiltInFlags::BIF_TypeSpecAllToFloat) != 0; Assert(areAllArgsAlwaysFloat); Assert(this->DoFloatTypeSpec()); // Type-spec the src1, src2 and dst. src1Val = src1OriginalVal; src2Val = src2OriginalVal; bool retVal = this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); AssertMsg(retVal, "For pow and atnan2 the args have to be type-specialized to float, but something failed during the process."); break; } case Js::OpCode::InlineMathPow: { #ifndef _M_ARM32_OR_ARM64 if (src2Val->GetValueInfo()->IsLikelyInt()) { bool lossy = false; this->ToInt32(instr, instr->GetSrc2(), this->currentBlock, src2Val, nullptr, lossy); IR::Opnd* src1 = instr->GetSrc1(); int32 valueMin, valueMax; if (src1Val->GetValueInfo()->IsLikelyInt() && this->DoPowIntIntTypeSpec() && src2Val->GetValueInfo()->GetIntValMinMax(&valueMin, &valueMax, this->DoAggressiveIntTypeSpec()) && valueMin >= 0) { this->ToInt32(instr, src1, this->currentBlock, src1Val, nullptr, lossy); this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, src2Val, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal); if(!this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, IR::BailOutOnPowIntIntOverflow); } } else { this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, IR::BailOutPrimitiveButString); TypeSpecializeFloatDst(instr, nullptr, src1Val, src2Val, pDstVal); } } else { #endif this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); #ifndef _M_ARM32_OR_ARM64 } #endif break; } case Js::OpCode::InlineMathImul: { Assert(this->DoAggressiveIntTypeSpec()); Assert(this->DoLossyIntTypeSpec()); //Type specialize to int bool retVal = this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, INT32_MIN, INT32_MAX, false /* skipDst */); AssertMsg(retVal, "For imul, the args have to be type-specialized to int but something failed during the process."); break; } case Js::OpCode::InlineMathMin: case Js::OpCode::InlineMathMax: { if(src1Val->GetValueInfo()->IsLikelyInt() && src2Val->GetValueInfo()->IsLikelyInt()) { // Compute resulting range info int32 min1 = INT32_MIN; int32 max1 = INT32_MAX; int32 min2 = INT32_MIN; int32 max2 = INT32_MAX; int32 newMin, newMax; Assert(this->DoAggressiveIntTypeSpec()); src1Val->GetValueInfo()->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec()); src2Val->GetValueInfo()->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec()); if (instr->m_opcode == Js::OpCode::InlineMathMin) { newMin = min(min1, min2); newMax = min(max1, max2); } else { Assert(instr->m_opcode == Js::OpCode::InlineMathMax); newMin = max(min1, min2); newMax = max(max1, max2); } // Type specialize to int bool retVal = this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, newMin, newMax, false /* skipDst */); AssertMsg(retVal, "For min and max, the args have to be type-specialized to int if any one of the sources is an int, but something failed during the process."); } // Couldn't type specialize to int, type specialize to float else { Assert(this->DoFloatTypeSpec()); src1Val = src1OriginalVal; src2Val = src2OriginalVal; bool retVal = this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); AssertMsg(retVal, "For min and max, the args have to be type-specialized to float if any one of the sources is a float, but something failed during the process."); } break; } case Js::OpCode::InlineArrayPush: { IR::Opnd *const thisOpnd = instr->GetSrc1(); Assert(thisOpnd); if(instr->GetDst() && instr->GetDst()->IsRegOpnd()) { // Set the dst as live here, as the built-ins return early from the TypeSpecialization functions - before the dst is marked as live. // Also, we are not specializing the dst separately and we are skipping the dst to be handled when we specialize the instruction above. this->ToVarRegOpnd(instr->GetDst()->AsRegOpnd(), currentBlock); } // Ensure src1 (Array) is a var this->ToVarUses(instr, thisOpnd, false, src1Val); if(!this->IsLoopPrePass()) { if(thisOpnd->GetValueType().IsLikelyNativeArray()) { // We bail out, if there is illegal access or a mismatch in the Native array type that is optimized for, during run time. GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly); } else { GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp); } } // Try Type Specializing the element based on the array's profile data. if(thisOpnd->GetValueType().IsLikelyNativeFloatArray()) { src1Val = src1OriginalVal; src2Val = src2OriginalVal; } if((thisOpnd->GetValueType().IsLikelyNativeIntArray() && this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, INT32_MIN, INT32_MAX, true)) || (thisOpnd->GetValueType().IsLikelyNativeFloatArray() && this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal))) { break; } // The Element is not yet type specialized. Ensure element is a var this->ToVarUses(instr, instr->GetSrc2(), false, src2Val); break; } } } void GlobOpt::TypeSpecializeInlineBuiltInDst(IR::Instr **pInstr, Value **pDstVal) { IR::Instr *&instr = *pInstr; Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); if (instr->m_opcode == Js::OpCode::InlineMathRandom) { Assert(this->DoFloatTypeSpec()); // Type specialize dst to float this->TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, pDstVal); } } bool GlobOpt::TryTypeSpecializeUnaryToFloatHelper(IR::Instr** pInstr, Value** pSrc1Val, Value* const src1OriginalVal, Value **pDstVal) { // It has been determined that this instruction cannot be int-specialized. We need to determine whether to attempt to // float-specialize the instruction, or leave it unspecialized. #if !INT32VAR Value*& src1Val = *pSrc1Val; if(src1Val->GetValueInfo()->IsLikelyUntaggedInt()) { // An input range is completely outside the range of an int31. Even if the operation may overflow, it is // unlikely to overflow on these operations, so we leave it unspecialized on 64-bit platforms. However, on // 32-bit platforms, the value is untaggable and will be a JavascriptNumber, which is significantly slower to // use in an unspecialized operation compared to a tagged int. So, try to float-specialize the instruction. src1Val = src1OriginalVal; return this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal); } #endif return false; } bool GlobOpt::TypeSpecializeIntBinary(IR::Instr **pInstr, Value *src1Val, Value *src2Val, Value **pDstVal, int32 min, int32 max, bool skipDst /* = false */) { // Consider moving the code for int type spec-ing binary functions here. IR::Instr *&instr = *pInstr; bool lossy = false; if(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { if(instr->m_opcode == Js::OpCode::InlineArrayPush) { int32 intConstantValue; bool isIntConstMissingItem = src2Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue); if(isIntConstMissingItem) { isIntConstMissingItem = Js::SparseArraySegment<int>::IsMissingItem(&intConstantValue); } // Don't specialize if the element is not likelyInt or an IntConst which is a missing item value. if(!(src2Val->GetValueInfo()->IsLikelyInt()) || isIntConstMissingItem) { return false; } // We don't want to specialize both the source operands, though it is a binary instr. IR::Opnd * elementOpnd = instr->GetSrc2(); this->ToInt32(instr, elementOpnd, this->currentBlock, src2Val, nullptr, lossy); } else { IR::Opnd *src1 = instr->GetSrc1(); this->ToInt32(instr, src1, this->currentBlock, src1Val, nullptr, lossy); IR::Opnd *src2 = instr->GetSrc2(); this->ToInt32(instr, src2, this->currentBlock, src2Val, nullptr, lossy); } if(!skipDst) { IR::Opnd *dst = instr->GetDst(); if (dst) { TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, src2Val, IR::BailOutInvalid, min, max, pDstVal); } } return true; } else { AssertMsg(false, "Yet to move code for other binary functions here"); return false; } } bool GlobOpt::TypeSpecializeIntUnary( IR::Instr **pInstr, Value **pSrc1Val, Value **pDstVal, int32 min, int32 max, Value *const src1OriginalVal, bool *redoTypeSpecRef, bool skipDst /* = false */) { IR::Instr *&instr = *pInstr; Assert(pSrc1Val); Value *&src1Val = *pSrc1Val; bool isTransfer = false; Js::OpCode opcode; int32 newMin, newMax; bool lossy = false; IR::BailOutKind bailOutKind = IR::BailOutInvalid; bool ignoredIntOverflow = this->ignoredIntOverflowForCurrentInstr; bool ignoredNegativeZero = false; bool checkTypeSpecWorth = false; if(instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) { return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } AddSubConstantInfo addSubConstantInfo; switch(instr->m_opcode) { case Js::OpCode::Ld_A: if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym) == false) { // Type specializing an Ld_A isn't worth it, unless the src // is already type specialized. return false; } } newMin = min; newMax = max; opcode = Js::OpCode::Ld_I4; isTransfer = true; break; case Js::OpCode::Conv_Num: newMin = min; newMax = max; opcode = Js::OpCode::Ld_I4; isTransfer = true; break; case Js::OpCode::LdC_A_I4: newMin = newMax = instr->GetSrc1()->AsIntConstOpnd()->AsInt32(); opcode = Js::OpCode::Ld_I4; break; case Js::OpCode::Neg_A: if (min <= 0 && max >= 0) { if(instr->ShouldCheckForNegativeZero()) { // -0 matters since the sym is not a local, or is used in a way in which -0 would differ from +0 if(!DoAggressiveIntTypeSpec()) { // May result in -0 // Consider adding a dynamic check for src1 == 0 return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } if(min == 0 && max == 0) { // Always results in -0 return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } if (Int32Math::Neg(min, &newMax)) { if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } if(min == max) { // Always overflows return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMax = INT32_MAX; } else { ignoredIntOverflow = true; } } if (Int32Math::Neg(max, &newMin)) { if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMin = INT32_MAX; } else { ignoredIntOverflow = true; } } if(!instr->ShouldCheckForIntOverflow() && newMin > newMax) { // When ignoring overflow, the range needs to account for overflow. Since MIN_INT is the only int32 value that // overflows on Neg, and the value resulting from overflow is also MIN_INT, if calculating only the new min or new // max overflowed but not both, then the new min will be greater than the new max. In that case we need to consider // the full range of int32s as possible resulting values. newMin = INT32_MIN; newMax = INT32_MAX; } opcode = Js::OpCode::Neg_I4; checkTypeSpecWorth = true; break; case Js::OpCode::Not_A: if(!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeForNot(min, max, &newMin, &newMax); opcode = Js::OpCode::Not_I4; lossy = true; break; case Js::OpCode::Incr_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&]() { const ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); return (src1ValueInfo->IsInt() || DoAggressiveIntTypeSpec()) && src1ValueInfo->IsIntBounded() && src1ValueInfo->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(1); }; if (Int32Math::Inc(min, &newMin)) { if(CannotOverflowBasedOnRelativeBounds()) { newMin = INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { // Always overflows return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow // causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints, // we use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Inc(max, &newMax)) { if(CannotOverflowBasedOnRelativeBounds()) { newMax = INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMax = INT32_MAX; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } } while(false); if(!ignoredIntOverflow && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min == max, 1); } opcode = Js::OpCode::Add_I4; if (!this->IsLoopPrePass()) { instr->SetSrc2(IR::IntConstOpnd::New(1, TyInt32, instr->m_func)); } checkTypeSpecWorth = true; break; case Js::OpCode::Decr_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&]() { const ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); return (src1ValueInfo->IsInt() || DoAggressiveIntTypeSpec()) && src1ValueInfo->IsIntBounded() && src1ValueInfo->AsIntBounded()->Bounds()->SubCannotOverflowBasedOnRelativeBounds(1); }; if (Int32Math::Dec(max, &newMax)) { if(CannotOverflowBasedOnRelativeBounds()) { newMax = INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { // Always overflows return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow // causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints, we // use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Dec(min, &newMin)) { if(CannotOverflowBasedOnRelativeBounds()) { newMin = INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMin = INT32_MIN; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } } while(false); if(!ignoredIntOverflow && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min == max, -1); } opcode = Js::OpCode::Sub_I4; if (!this->IsLoopPrePass()) { instr->SetSrc2(IR::IntConstOpnd::New(1, TyInt32, instr->m_func)); } checkTypeSpecWorth = true; break; case Js::OpCode::BrFalse_A: case Js::OpCode::BrTrue_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrFalse(instr, src1Val, min, max)) { return true; } bool specialize = true; if (!src1Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym) == false) { // Type specializing a BrTrue_A/BrFalse_A isn't worth it, unless the src // is already type specialized specialize = false; } } if(instr->m_opcode == Js::OpCode::BrTrue_A) { UpdateIntBoundsForNotEqualBranch(src1Val, nullptr, 0); opcode = Js::OpCode::BrTrue_I4; } else { UpdateIntBoundsForEqualBranch(src1Val, nullptr, 0); opcode = Js::OpCode::BrFalse_I4; } if(!specialize) { return false; } newMin = 2; newMax = 1; // We'll assert if we make a range where min > max break; } case Js::OpCode::MultiBr: newMin = min; newMax = max; opcode = instr->m_opcode; break; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: if(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyAnyArrayWithNativeFloatValues()) { src1Val = src1OriginalVal; } return TypeSpecializeStElem(pInstr, src1Val, pDstVal); case Js::OpCode::NewScArray: case Js::OpCode::NewScArrayWithMissingValues: case Js::OpCode::InitFld: case Js::OpCode::InitRootFld: case Js::OpCode::StSlot: case Js::OpCode::StSlotChkUndecl: #if !FLOATVAR case Js::OpCode::StSlotBoxTemp: #endif case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StFldStrict: case Js::OpCode::StRootFldStrict: case Js::OpCode::ArgOut_A: case Js::OpCode::ArgOut_A_Inline: case Js::OpCode::ArgOut_A_FixupForStackArgs: case Js::OpCode::ArgOut_A_Dynamic: case Js::OpCode::ArgOut_A_FromStackArgs: case Js::OpCode::ArgOut_A_SpreadArg: // For this one we need to implement type specialization //case Js::OpCode::ArgOut_A_InlineBuiltIn: case Js::OpCode::Ret: case Js::OpCode::LdElemUndef: case Js::OpCode::LdElemUndefScoped: return false; default: if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { newMin = min; newMax = max; opcode = instr->m_opcode; break; // Note: we must keep checkTypeSpecWorth = false to make sure we never return false from this function. } return false; } // If this instruction is in a range of instructions where int overflow does not matter, we will still specialize it (won't // leave it unspecialized based on heuristics), since it is most likely worth specializing, and the dst value needs to be // guaranteed to be an int if(checkTypeSpecWorth && !ignoredIntOverflow && !ignoredNegativeZero && instr->ShouldCheckForIntOverflow() && !IsWorthSpecializingToInt32(instr, src1Val)) { // Even though type specialization is being skipped since it may not be worth it, the proper value should still be // maintained so that the result may be type specialized later. An int value is not created for the dst in any of // the following cases. // - A bailout check is necessary to specialize this instruction. The bailout check is what guarantees the result to be // an int, but since we're not going to specialize this instruction, there won't be a bailout check. // - Aggressive int type specialization is disabled and we're in a loop prepass. We're conservative on dst values in // that case, especially if the dst sym is live on the back-edge. if(bailOutKind == IR::BailOutInvalid && instr->GetDst() && (DoAggressiveIntTypeSpec() || !this->IsLoopPrePass())) { *pDstVal = CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, nullptr); } if(instr->GetSrc2()) { instr->FreeSrc2(); } return false; } this->ignoredIntOverflowForCurrentInstr = ignoredIntOverflow; this->ignoredNegativeZeroForCurrentInstr = ignoredNegativeZero; { // Try CSE again before modifying the IR, in case some attributes are required for successful CSE Value *src1IndirIndexVal = nullptr; Value *src2Val = nullptr; if(CSEOptimize(currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal, true /* intMathExprOnly */)) { *redoTypeSpecRef = true; return false; } } const Js::OpCode originalOpCode = instr->m_opcode; if (!this->IsLoopPrePass()) { // No re-write on prepass instr->m_opcode = opcode; } Value *src1ValueToSpecialize = src1Val; if(lossy) { // Lossy conversions to int32 must be done based on the original source values. For instance, if one of the values is a // float constant with a value that fits in a uint32 but not an int32, and the instruction can ignore int overflow, the // source value for the purposes of int specialization would have been changed to an int constant value by ignoring // overflow. If we were to specialize the sym using the int constant value, it would be treated as a lossless // conversion, but since there may be subsequent uses of the same float constant value that may not ignore overflow, // this must be treated as a lossy conversion by specializing the sym using the original float constant value. src1ValueToSpecialize = src1OriginalVal; } // Make sure the srcs are specialized IR::Opnd *src1 = instr->GetSrc1(); this->ToInt32(instr, src1, this->currentBlock, src1ValueToSpecialize, nullptr, lossy); if(bailOutKind != IR::BailOutInvalid && !this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, bailOutKind); } if (!skipDst) { IR::Opnd *dst = instr->GetDst(); if (dst) { AssertMsg(!(isTransfer && !this->IsLoopPrePass()) || min == newMin && max == newMax, "If this is just a copy, old/new min/max should be the same"); TypeSpecializeIntDst( instr, originalOpCode, isTransfer ? src1Val : nullptr, src1Val, nullptr, bailOutKind, newMin, newMax, pDstVal, addSubConstantInfo.HasInfo() ? &addSubConstantInfo : nullptr); } } if(bailOutKind == IR::BailOutInvalid) { GOPT_TRACE(_u("Type specialized to INT\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } else { GOPT_TRACE(_u("Type specialized to INT with bailout on:\n")); if(bailOutKind & IR::BailOutOnOverflow) { GOPT_TRACE(_u(" Overflow\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Overflow"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } if(bailOutKind & IR::BailOutOnNegativeZero) { GOPT_TRACE(_u(" Zero\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Zero"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } } return true; } void GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, int32 newMin, int32 newMax, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo) { this->TypeSpecializeIntDst(instr, originalOpCode, valToTransfer, src1Value, src2Value, bailOutKind, ValueType::GetInt(IntConstantBounds(newMin, newMax).IsLikelyTaggable()), newMin, newMax, pDstVal, addSubConstantInfo); } void GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, ValueType valueType, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo) { this->TypeSpecializeIntDst(instr, originalOpCode, valToTransfer, src1Value, src2Value, bailOutKind, valueType, 0, 0, pDstVal, addSubConstantInfo); } void GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, ValueType valueType, int32 newMin, int32 newMax, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo) { Assert(valueType.IsInt() || (valueType.IsNumber() && valueType.IsLikelyInt() && newMin == 0 && newMax == 0)); Assert(!valToTransfer || valToTransfer == src1Value); Assert(!addSubConstantInfo || addSubConstantInfo->HasInfo()); IR::Opnd *dst = instr->GetDst(); Assert(dst); bool isValueInfoPrecise; if(IsLoopPrePass()) { valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value, &isValueInfoPrecise); } else { isValueInfoPrecise = true; } // If dst has a circular reference in a loop, it probably won't get specialized. Don't mark the dst as type-specialized on // the pre-pass. With aggressive int spec though, it will take care of bailing out if necessary so there's no need to assume // that the dst will be a var even if it's live on the back-edge. Also if the op always produces an int32, then there's no // ambiguity in the dst's value type even in the prepass. if (!DoAggressiveIntTypeSpec() && this->IsLoopPrePass() && !valueType.IsInt()) { if (dst->IsRegOpnd()) { this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } return; } const IntBounds *dstBounds = nullptr; if(addSubConstantInfo && !addSubConstantInfo->SrcValueIsLikelyConstant() && DoTrackRelativeIntBounds()) { Assert(!ignoredIntOverflowForCurrentInstr); // Track bounds for add or sub with a constant. For instance, consider (b = a + 2). The value of 'b' should track that // it is equal to (the value of 'a') + 2. Additionally, the value of 'b' should inherit the bounds of 'a', offset by // the constant value. if(!valueType.IsInt() || !isValueInfoPrecise) { newMin = INT32_MIN; newMax = INT32_MAX; } dstBounds = IntBounds::Add( addSubConstantInfo->SrcValue(), addSubConstantInfo->Offset(), isValueInfoPrecise, IntConstantBounds(newMin, newMax), alloc); } // Src1's value could change later in the loop, so the value wouldn't be the same for each // iteration. Since we don't iterate over loops "while (!changed)", go conservative on the // pre-pass. if (valToTransfer) { // If this is just a copy, no need for creating a new value. Assert(!addSubConstantInfo); *pDstVal = this->ValueNumberTransferDst(instr, valToTransfer); CurrentBlockData()->InsertNewValue(*pDstVal, dst); } else if (valueType.IsInt() && isValueInfoPrecise) { bool wasNegativeZeroPreventedByBailout = false; if(newMin <= 0 && newMax >= 0) { switch(originalOpCode) { case Js::OpCode::Add_A: // -0 + -0 == -0 Assert(src1Value); Assert(src2Value); wasNegativeZeroPreventedByBailout = src1Value->GetValueInfo()->WasNegativeZeroPreventedByBailout() && src2Value->GetValueInfo()->WasNegativeZeroPreventedByBailout(); break; case Js::OpCode::Sub_A: // -0 - 0 == -0 Assert(src1Value); wasNegativeZeroPreventedByBailout = src1Value->GetValueInfo()->WasNegativeZeroPreventedByBailout(); break; case Js::OpCode::Neg_A: case Js::OpCode::Mul_A: case Js::OpCode::Div_A: case Js::OpCode::Rem_A: wasNegativeZeroPreventedByBailout = !!(bailOutKind & IR::BailOutOnNegativeZero); break; } } *pDstVal = dstBounds ? NewIntBoundedValue(valueType, dstBounds, wasNegativeZeroPreventedByBailout, nullptr) : NewIntRangeValue(newMin, newMax, wasNegativeZeroPreventedByBailout, nullptr); } else { *pDstVal = dstBounds ? NewIntBoundedValue(valueType, dstBounds, false, nullptr) : NewGenericValue(valueType); } if(addSubConstantInfo || updateInductionVariableValueNumber) { TrackIntSpecializedAddSubConstant(instr, addSubConstantInfo, *pDstVal, !!dstBounds); } CurrentBlockData()->SetValue(*pDstVal, dst); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); } bool GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc2Val, Value **pDstVal, Value *const src1OriginalVal, Value *const src2OriginalVal, bool *redoTypeSpecRef) { IR::Instr *&instr = *pInstr; int32 min1 = INT32_MIN, max1 = INT32_MAX, min2 = INT32_MIN, max2 = INT32_MAX, newMin, newMax, tmp; Js::OpCode opcode; Value *&src1Val = *pSrc1Val; Value *&src2Val = *pSrc2Val; // We don't need to do typespec for asmjs if (IsTypeSpecPhaseOff(this->func) || GetIsAsmJSFunc()) { return false; } if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { this->TypeSpecializeInlineBuiltInBinary(pInstr, src1Val, src2Val, pDstVal, src1OriginalVal, src2OriginalVal); return true; } if (src1Val) { src1Val->GetValueInfo()->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec()); } if (src2Val) { src2Val->GetValueInfo()->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec()); } // Type specialize binary operators to int32 bool src1Lossy = true; bool src2Lossy = true; IR::BailOutKind bailOutKind = IR::BailOutInvalid; bool ignoredIntOverflow = this->ignoredIntOverflowForCurrentInstr; bool ignoredNegativeZero = false; bool skipSrc2 = false; bool skipDst = false; bool needsBoolConv = false; AddSubConstantInfo addSubConstantInfo; switch (instr->m_opcode) { case Js::OpCode::Or_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Or_I4; break; case Js::OpCode::And_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::And_I4; break; case Js::OpCode::Xor_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Xor_I4; break; case Js::OpCode::Shl_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Shl_I4; break; case Js::OpCode::Shr_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Shr_I4; break; case Js::OpCode::ShrU_A: if (!DoLossyIntTypeSpec()) { return false; } if (min1 < 0 && IntConstantBounds(min2, max2).And_0x1f().Contains(0)) { // Src1 may be too large to represent as a signed int32, and src2 may be zero. Unless the resulting value is only // used as a signed int32 (hence allowing us to ignore the result's sign), don't specialize the instruction. if (!instr->ignoreIntOverflow) return false; ignoredIntOverflow = true; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::ShrU_I4; break; case Js::OpCode::BrUnLe_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedGreaterThan(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForLessThanOrEqualBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnLe_I4; break; case Js::OpCode::BrUnLt_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedLessThan(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForLessThanBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnLt_I4; break; case Js::OpCode::BrUnGe_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedLessThan(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForGreaterThanOrEqualBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnGe_I4; break; case Js::OpCode::BrUnGt_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedGreaterThan(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForGreaterThanBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnGt_I4; break; case Js::OpCode::CmUnLe_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnLe_I4; needsBoolConv = true; break; case Js::OpCode::CmUnLt_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnLt_I4; needsBoolConv = true; break; case Js::OpCode::CmUnGe_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnGe_I4; needsBoolConv = true; break; case Js::OpCode::CmUnGt_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnGt_I4; needsBoolConv = true; break; case Js::OpCode::Expo_A: { src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } case Js::OpCode::Div_A: { ValueType specializedValueType = GetDivValueType(instr, src1Val, src2Val, true); if (specializedValueType.IsFloat()) { // Either result is float or 1/x or cst1/cst2 where cst1%cst2 != 0 // Note: We should really constant fold cst1%cst2... src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } #ifdef _M_ARM if (!AutoSystemInfo::Data.ArmDivAvailable()) { return false; } #endif if (specializedValueType.IsInt()) { if (max2 == 0x80000000 || (min2 == 0 && max2 == 00)) { return false; } if (min1 == 0x80000000 && min2 <= -1 && max2 >= -1) { // Prevent integer overflow, as div by zero or MIN_INT / -1 will throw an exception // Or we know we are dividing by zero (which is weird to have because the profile data // say we got an int) bailOutKind = IR::BailOutOnDivOfMinInt; } src1Lossy = false; // Detect -0 on the sources src2Lossy = false; opcode = Js::OpCode::Div_I4; Assert(!instr->GetSrc1()->IsUnsigned()); bailOutKind |= IR::BailOnDivResultNotInt; if (max2 >= 0 && min2 <= 0) { // Need to check for divide by zero if the denominator range includes 0 bailOutKind |= IR::BailOutOnDivByZero; } if (max1 >= 0 && min1 <= 0) { // Numerator contains 0 so the result contains 0 newMin = 0; newMax = 0; if (min2 < 0) { // Denominator may be negative, so the result could be negative 0 if (instr->ShouldCheckForNegativeZero()) { bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } } else { // Initialize to invalid value, one of the condition below will update it correctly newMin = INT_MAX; newMax = INT_MIN; } // Deal with the positive and negative range separately for both the numerator and the denominator, // and integrate to the overall min and max. // If the result is positive (positive/positive or negative/negative): // The min should be the smallest magnitude numerator (positive_Min1 | negative_Max1) // divided by --------------------------------------------------------------- // largest magnitude denominator (positive_Max2 | negative_Min2) // // The max should be the largest magnitude numerator (positive_Max1 | negative_Max1) // divided by --------------------------------------------------------------- // smallest magnitude denominator (positive_Min2 | negative_Max2) // If the result is negative (positive/negative or positive/negative): // The min should be the largest magnitude numerator (positive_Max1 | negative_Min1) // divided by --------------------------------------------------------------- // smallest magnitude denominator (negative_Max2 | positive_Min2) // // The max should be the smallest magnitude numerator (positive_Min1 | negative_Max1) // divided by --------------------------------------------------------------- // largest magnitude denominator (negative_Min2 | positive_Max2) // Consider: The range can be slightly more precise if we take care of the rounding if (max1 > 0) { // Take only the positive numerator range int32 positive_Min1 = max(1, min1); int32 positive_Max1 = max1; if (max2 > 0) { // Take only the positive denominator range int32 positive_Min2 = max(1, min2); int32 positive_Max2 = max2; // Positive / Positive int32 quadrant1_Min = positive_Min1 <= positive_Max2? 1 : positive_Min1 / positive_Max2; int32 quadrant1_Max = positive_Max1 <= positive_Min2? 1 : positive_Max1 / positive_Min2; Assert(1 <= quadrant1_Min && quadrant1_Min <= quadrant1_Max); // The result should positive newMin = min(newMin, quadrant1_Min); newMax = max(newMax, quadrant1_Max); } if (min2 < 0) { // Take only the negative denominator range int32 negative_Min2 = min2; int32 negative_Max2 = min(-1, max2); // Positive / Negative int32 quadrant2_Min = -positive_Max1 >= negative_Max2? -1 : positive_Max1 / negative_Max2; int32 quadrant2_Max = -positive_Min1 >= negative_Min2? -1 : positive_Min1 / negative_Min2; // The result should negative Assert(quadrant2_Min <= quadrant2_Max && quadrant2_Max <= -1); newMin = min(newMin, quadrant2_Min); newMax = max(newMax, quadrant2_Max); } } if (min1 < 0) { // Take only the native numerator range int32 negative_Min1 = min1; int32 negative_Max1 = min(-1, max1); if (max2 > 0) { // Take only the positive denominator range int32 positive_Min2 = max(1, min2); int32 positive_Max2 = max2; // Negative / Positive int32 quadrant4_Min = negative_Min1 >= -positive_Min2? -1 : negative_Min1 / positive_Min2; int32 quadrant4_Max = negative_Max1 >= -positive_Max2? -1 : negative_Max1 / positive_Max2; // The result should negative Assert(quadrant4_Min <= quadrant4_Max && quadrant4_Max <= -1); newMin = min(newMin, quadrant4_Min); newMax = max(newMax, quadrant4_Max); } if (min2 < 0) { // Take only the negative denominator range int32 negative_Min2 = min2; int32 negative_Max2 = min(-1, max2); int32 quadrant3_Min; int32 quadrant3_Max; // Negative / Negative if (negative_Max1 == 0x80000000 && negative_Min2 == -1) { quadrant3_Min = negative_Max1 >= negative_Min2? 1 : (negative_Max1+1) / negative_Min2; } else { quadrant3_Min = negative_Max1 >= negative_Min2? 1 : negative_Max1 / negative_Min2; } if (negative_Min1 == 0x80000000 && negative_Max2 == -1) { quadrant3_Max = negative_Min1 >= negative_Max2? 1 : (negative_Min1+1) / negative_Max2; } else { quadrant3_Max = negative_Min1 >= negative_Max2? 1 : negative_Min1 / negative_Max2; } // The result should positive Assert(1 <= quadrant3_Min && quadrant3_Min <= quadrant3_Max); newMin = min(newMin, quadrant3_Min); newMax = max(newMax, quadrant3_Max); } } Assert(newMin <= newMax); // Continue to int type spec break; } } // fall-through default: { const bool involesLargeInt32 = (src1Val && src1Val->GetValueInfo()->IsLikelyUntaggedInt()) || (src2Val && src2Val->GetValueInfo()->IsLikelyUntaggedInt()); const auto trySpecializeToFloat = [&](const bool mayOverflow) -> bool { // It has been determined that this instruction cannot be int-specialized. Need to determine whether to attempt // to float-specialize the instruction, or leave it unspecialized. if((involesLargeInt32 #if INT32VAR && mayOverflow #endif ) || (instr->m_opcode == Js::OpCode::Mul_A && !this->DoAggressiveMulIntTypeSpec()) ) { // An input range is completely outside the range of an int31 and the operation is likely to overflow. // Additionally, on 32-bit platforms, the value is untaggable and will be a JavascriptNumber, which is // significantly slower to use in an unspecialized operation compared to a tagged int. So, try to // float-specialize the instruction. src1Val = src1OriginalVal; src2Val = src2OriginalVal; return TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } return false; }; if (instr->m_opcode != Js::OpCode::ArgOut_A_InlineBuiltIn) { if ((src1Val && src1Val->GetValueInfo()->IsLikelyFloat()) || (src2Val && src2Val->GetValueInfo()->IsLikelyFloat())) { // Try to type specialize to float src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } if (src1Val == nullptr || src2Val == nullptr || !src1Val->GetValueInfo()->IsLikelyInt() || !src2Val->GetValueInfo()->IsLikelyInt() || ( !DoAggressiveIntTypeSpec() && ( !(src1Val->GetValueInfo()->IsInt() || CurrentBlockData()->IsSwitchInt32TypeSpecialized(instr)) || !src2Val->GetValueInfo()->IsInt() ) ) || (instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) || (instr->GetSrc2()->IsRegOpnd() && instr->GetSrc2()->AsRegOpnd()->m_sym->m_isNotInt)) { return trySpecializeToFloat(true); } } // Try to type specialize to int32 // If one of the values is a float constant with a value that fits in a uint32 but not an int32, // and the instruction can ignore int overflow, the source value for the purposes of int specialization // would have been changed to an int constant value by ignoring overflow. But, the conversion is still lossy. if (!(src1OriginalVal && src1OriginalVal->GetValueInfo()->IsFloatConstant() && src1Val && src1Val->GetValueInfo()->HasIntConstantValue())) { src1Lossy = false; } if (!(src2OriginalVal && src2OriginalVal->GetValueInfo()->IsFloatConstant() && src2Val && src2Val->GetValueInfo()->HasIntConstantValue())) { src2Lossy = false; } switch(instr->m_opcode) { case Js::OpCode::ArgOut_A_InlineBuiltIn: // If the src is already type-specialized, if we don't type-specialize ArgOut_A_InlineBuiltIn instr, we'll get additional ToVar. // So, to avoid that, type-specialize the ArgOut_A_InlineBuiltIn instr. // Else we don't need to type-specialize the instr, we are fine with src being Var. if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym)) { opcode = instr->m_opcode; skipDst = true; // We should keep dst as is, otherwise the link opnd for next ArgOut/InlineBuiltInStart would be broken. skipSrc2 = true; // src2 is linkOpnd. We don't need to type-specialize it. newMin = min1; newMax = max1; // Values don't matter, these are unused. goto LOutsideSwitch; // Continue to int-type-specialize. } else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } #ifdef ENABLE_SIMDJS else if (CurrentBlockData()->IsSimd128F4TypeSpecialized(sym)) { // SIMD_JS // We should be already using the SIMD type-spec sym. See TypeSpecializeSimd128. Assert(IRType_IsSimd128(instr->GetSrc1()->GetType())); } #endif } return false; case Js::OpCode::Add_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&](int32 *const constantValueRef) { Assert(constantValueRef); if(min2 == max2 && src1Val->GetValueInfo()->IsIntBounded() && src1Val->GetValueInfo()->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(min2)) { *constantValueRef = min2; return true; } else if( min1 == max1 && src2Val->GetValueInfo()->IsIntBounded() && src2Val->GetValueInfo()->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(min1)) { *constantValueRef = min1; return true; } return false; }; if (Int32Math::Add(min1, min2, &newMin)) { int32 constantSrcValue; if(CannotOverflowBasedOnRelativeBounds(&constantSrcValue)) { newMin = constantSrcValue >= 0 ? INT32_MAX : INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMin = min1 < 0 ? INT32_MIN : INT32_MAX; } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since // overflow causes the value to wrap around, and we don't have a way to specify a lower and upper // range of ints, we use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Add(max1, max2, &newMax)) { int32 constantSrcValue; if(CannotOverflowBasedOnRelativeBounds(&constantSrcValue)) { newMax = constantSrcValue >= 0 ? INT32_MAX : INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMax = max1 < 0 ? INT32_MIN : INT32_MAX; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if(bailOutKind & IR::BailOutOnOverflow) { Assert(bailOutKind == IR::BailOutOnOverflow); Assert(instr->ShouldCheckForIntOverflow()); int32 temp; if(Int32Math::Add( Int32Math::NearestInRangeTo(0, min1, max1), Int32Math::NearestInRangeTo(0, min2, max2), &temp)) { // Always overflows return trySpecializeToFloat(true); } } } while(false); if (!this->IsLoopPrePass() && newMin == newMax && bailOutKind == IR::BailOutInvalid) { // Take care of Add with zero here, since we know we're dealing with 2 numbers. this->CaptureByteCodeSymUses(instr); IR::Opnd *src; bool isAddZero = true; int32 intConstantValue; if (src1Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && intConstantValue == 0) { src = instr->UnlinkSrc2(); instr->FreeSrc1(); } else if (src2Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && intConstantValue == 0) { src = instr->UnlinkSrc1(); instr->FreeSrc2(); } else { // This should have been handled by const folding, unless: // - A source's value was substituted with a different value here, which is after const folding happened // - A value is not definitely int, but once converted to definite int, it would be zero due to a // condition in the source code such as if(a === 0). Ideally, we would specialize the sources and // remove the add, but doesn't seem too important for now. Assert( !DoConstFold() || src1Val != src1OriginalVal || src2Val != src2OriginalVal || !src1Val->GetValueInfo()->IsInt() || !src2Val->GetValueInfo()->IsInt()); isAddZero = false; src = nullptr; } if (isAddZero) { IR::Instr *newInstr = IR::Instr::New(Js::OpCode::Ld_A, instr->UnlinkDst(), src, instr->m_func); newInstr->SetByteCodeOffset(instr); instr->m_opcode = Js::OpCode::Nop; this->currentBlock->InsertInstrAfter(newInstr, instr); return true; } } if(!ignoredIntOverflow) { if(min2 == max2 && (!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Val)) && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min1 == max1, min2); } else if( min1 == max1 && (!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Val)) && instr->GetSrc2()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc2()->AsRegOpnd()->m_sym, src2Val, min2 == max2, min1); } } opcode = Js::OpCode::Add_I4; break; case Js::OpCode::Sub_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&]() { return min2 == max2 && src1Val->GetValueInfo()->IsIntBounded() && src1Val->GetValueInfo()->AsIntBounded()->Bounds()->SubCannotOverflowBasedOnRelativeBounds(min2); }; if (Int32Math::Sub(min1, max2, &newMin)) { if(CannotOverflowBasedOnRelativeBounds()) { Assert(min2 == max2); newMin = min2 >= 0 ? INT32_MIN : INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMin = min1 < 0 ? INT32_MIN : INT32_MAX; } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow // causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints, // we use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Sub(max1, min2, &newMax)) { if(CannotOverflowBasedOnRelativeBounds()) { Assert(min2 == max2); newMax = min2 >= 0 ? INT32_MIN: INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMax = max1 < 0 ? INT32_MIN : INT32_MAX; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if(bailOutKind & IR::BailOutOnOverflow) { Assert(bailOutKind == IR::BailOutOnOverflow); Assert(instr->ShouldCheckForIntOverflow()); int32 temp; if(Int32Math::Sub( Int32Math::NearestInRangeTo(-1, min1, max1), Int32Math::NearestInRangeTo(0, min2, max2), &temp)) { // Always overflows return trySpecializeToFloat(true); } } } while(false); if(!ignoredIntOverflow && min2 == max2 && min2 != INT32_MIN && (!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Val)) && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min1 == max1, -min2); } opcode = Js::OpCode::Sub_I4; break; case Js::OpCode::Mul_A: { if (Int32Math::Mul(min1, min2, &newMin)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; newMin = (min1 < 0) ^ (min2 < 0) ? INT32_MIN : INT32_MAX; } newMax = newMin; if (Int32Math::Mul(max1, max2, &tmp)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; tmp = (max1 < 0) ^ (max2 < 0) ? INT32_MIN : INT32_MAX; } newMin = min(newMin, tmp); newMax = max(newMax, tmp); if (Int32Math::Mul(min1, max2, &tmp)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; tmp = (min1 < 0) ^ (max2 < 0) ? INT32_MIN : INT32_MAX; } newMin = min(newMin, tmp); newMax = max(newMax, tmp); if (Int32Math::Mul(max1, min2, &tmp)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; tmp = (max1 < 0) ^ (min2 < 0) ? INT32_MIN : INT32_MAX; } newMin = min(newMin, tmp); newMax = max(newMax, tmp); if (bailOutKind & IR::BailOutOnMulOverflow) { // CSE only if two MULs have the same overflow check behavior. // Currently this is set to be ignore int32 overflow, but not 53-bit, or int32 overflow matters. if (!instr->ShouldCheckFor32BitOverflow() && instr->ShouldCheckForNon32BitOverflow()) { // If we allow int to overflow then there can be anything in the resulting int newMin = IntConstMin; newMax = IntConstMax; ignoredIntOverflow = true; } int32 temp, overflowValue; if (Int32Math::Mul( Int32Math::NearestInRangeTo(0, min1, max1), Int32Math::NearestInRangeTo(0, min2, max2), &temp, &overflowValue)) { Assert(instr->ignoreOverflowBitCount >= 32); int overflowMatters = 64 - instr->ignoreOverflowBitCount; if (!ignoredIntOverflow || // Use shift to check high bits in case its negative ((overflowValue << overflowMatters) >> overflowMatters) != overflowValue ) { // Always overflows return trySpecializeToFloat(true); } } } if (newMin <= 0 && newMax >= 0 && // New range crosses zero (min1 < 0 || min2 < 0) && // An operand's range contains a negative integer !(min1 > 0 || min2 > 0) && // Neither operand's range contains only positive integers !instr->GetSrc1()->IsEqual(instr->GetSrc2())) // The operands don't have the same value { if (instr->ShouldCheckForNegativeZero()) { // -0 matters since the sym is not a local, or is used in a way in which -0 would differ from +0 if (!DoAggressiveIntTypeSpec()) { // May result in -0 return trySpecializeToFloat(false); } if (((min1 == 0 && max1 == 0) || (min2 == 0 && max2 == 0)) && (max1 < 0 || max2 < 0)) { // Always results in -0 return trySpecializeToFloat(false); } bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } opcode = Js::OpCode::Mul_I4; break; } case Js::OpCode::Rem_A: { IR::Opnd* src2 = instr->GetSrc2(); if (!this->IsLoopPrePass() && min2 == max2 && min1 >= 0) { int32 value = min2; if (value == (1 << Math::Log2(value)) && src2->IsAddrOpnd()) { Assert(src2->AsAddrOpnd()->IsVar()); instr->m_opcode = Js::OpCode::And_A; src2->AsAddrOpnd()->SetAddress(Js::TaggedInt::ToVarUnchecked(value - 1), IR::AddrOpndKindConstantVar); *pSrc2Val = GetIntConstantValue(value - 1, instr); src2Val = *pSrc2Val; return this->TypeSpecializeBinary(&instr, pSrc1Val, pSrc2Val, pDstVal, src1OriginalVal, src2Val, redoTypeSpecRef); } } #ifdef _M_ARM if (!AutoSystemInfo::Data.ArmDivAvailable()) { return false; } #endif if (min1 < 0) { // The most negative it can be is min1, unless limited by min2/max2 int32 negMaxAbs2; if (min2 == INT32_MIN) { negMaxAbs2 = INT32_MIN; } else { negMaxAbs2 = -max(abs(min2), abs(max2)) + 1; } newMin = max(min1, negMaxAbs2); } else { newMin = 0; } bool isModByPowerOf2 = (instr->IsProfiledInstr() && instr->m_func->HasProfileInfo() && instr->m_func->GetReadOnlyProfileInfo()->IsModulusOpByPowerOf2(static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId))); if(isModByPowerOf2) { Assert(bailOutKind == IR::BailOutInvalid); bailOutKind = IR::BailOnModByPowerOf2; newMin = 0; } else { if (min2 <= 0 && max2 >= 0) { // Consider: We could handle the zero case with a check and bailout... return false; } if (min1 == 0x80000000 && (min2 <= -1 && max2 >= -1)) { // Prevent integer overflow, as div by zero or MIN_INT / -1 will throw an exception return false; } if (min1 < 0) { if(instr->ShouldCheckForNegativeZero()) { if (!DoAggressiveIntTypeSpec()) { return false; } bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } } { int32 absMax2; if (min2 == INT32_MIN) { // abs(INT32_MIN) == INT32_MAX because of overflow absMax2 = INT32_MAX; } else { absMax2 = max(abs(min2), abs(max2)) - 1; } newMax = min(absMax2, max(max1, 0)); newMax = max(newMin, newMax); } opcode = Js::OpCode::Rem_I4; Assert(!instr->GetSrc1()->IsUnsigned()); break; } case Js::OpCode::CmEq_A: case Js::OpCode::CmSrEq_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmEq_I4; needsBoolConv = true; break; case Js::OpCode::CmNeq_A: case Js::OpCode::CmSrNeq_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmNeq_I4; needsBoolConv = true; break; case Js::OpCode::CmLe_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmLe_I4; needsBoolConv = true; break; case Js::OpCode::CmLt_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmLt_I4; needsBoolConv = true; break; case Js::OpCode::CmGe_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmGe_I4; needsBoolConv = true; break; case Js::OpCode::CmGt_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmGt_I4; needsBoolConv = true; break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrEq_A: case Js::OpCode::BrNotNeq_A: case Js::OpCode::BrSrNotNeq_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrEqual(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrEq_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrNeq_A: case Js::OpCode::BrSrNotEq_A: case Js::OpCode::BrNotEq_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrEqual(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForNotEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrNeq_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrGt_A: case Js::OpCode::BrNotLe_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThan(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForGreaterThanBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrGt_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrGe_A: case Js::OpCode::BrNotLt_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThanOrEqual(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForGreaterThanOrEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrGe_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrLt_A: case Js::OpCode::BrNotGe_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThanOrEqual(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForLessThanBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrLt_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrLe_A: case Js::OpCode::BrNotGt_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThan(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForLessThanOrEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrLe_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } default: return false; } // If this instruction is in a range of instructions where int overflow does not matter, we will still specialize it // (won't leave it unspecialized based on heuristics), since it is most likely worth specializing, and the dst value // needs to be guaranteed to be an int if(!ignoredIntOverflow && !ignoredNegativeZero && !needsBoolConv && instr->ShouldCheckForIntOverflow() && !IsWorthSpecializingToInt32(instr, src1Val, src2Val)) { // Even though type specialization is being skipped since it may not be worth it, the proper value should still be // maintained so that the result may be type specialized later. An int value is not created for the dst in any of // the following cases. // - A bailout check is necessary to specialize this instruction. The bailout check is what guarantees the result to // be an int, but since we're not going to specialize this instruction, there won't be a bailout check. // - Aggressive int type specialization is disabled and we're in a loop prepass. We're conservative on dst values in // that case, especially if the dst sym is live on the back-edge. if(bailOutKind == IR::BailOutInvalid && instr->GetDst() && src1Val->GetValueInfo()->IsInt() && src2Val->GetValueInfo()->IsInt() && (DoAggressiveIntTypeSpec() || !this->IsLoopPrePass())) { *pDstVal = CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val); } return false; } } // case default } // switch LOutsideSwitch: this->ignoredIntOverflowForCurrentInstr = ignoredIntOverflow; this->ignoredNegativeZeroForCurrentInstr = ignoredNegativeZero; { // Try CSE again before modifying the IR, in case some attributes are required for successful CSE Value *src1IndirIndexVal = nullptr; if(CSEOptimize(currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal, true /* intMathExprOnly */)) { *redoTypeSpecRef = true; return false; } } const Js::OpCode originalOpCode = instr->m_opcode; if (!this->IsLoopPrePass()) { // No re-write on prepass instr->m_opcode = opcode; } Value *src1ValueToSpecialize = src1Val, *src2ValueToSpecialize = src2Val; // Lossy conversions to int32 must be done based on the original source values. For instance, if one of the values is a // float constant with a value that fits in a uint32 but not an int32, and the instruction can ignore int overflow, the // source value for the purposes of int specialization would have been changed to an int constant value by ignoring // overflow. If we were to specialize the sym using the int constant value, it would be treated as a lossless // conversion, but since there may be subsequent uses of the same float constant value that may not ignore overflow, // this must be treated as a lossy conversion by specializing the sym using the original float constant value. if(src1Lossy) { src1ValueToSpecialize = src1OriginalVal; } if (src2Lossy) { src2ValueToSpecialize = src2OriginalVal; } // Make sure the srcs are specialized IR::Opnd* src1 = instr->GetSrc1(); this->ToInt32(instr, src1, this->currentBlock, src1ValueToSpecialize, nullptr, src1Lossy); if (!skipSrc2) { IR::Opnd* src2 = instr->GetSrc2(); this->ToInt32(instr, src2, this->currentBlock, src2ValueToSpecialize, nullptr, src2Lossy); } if(bailOutKind != IR::BailOutInvalid && !this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, bailOutKind); } if (!skipDst && instr->GetDst()) { if (needsBoolConv) { IR::RegOpnd *varDst; if (this->IsLoopPrePass()) { varDst = instr->GetDst()->AsRegOpnd(); this->ToVarRegOpnd(varDst, this->currentBlock); } else { // Generate: // t1.i = CmCC t2.i, t3.i // t1.v = Conv_bool t1.i // // If the only uses of t1 are ints, the conv_bool will get dead-stored TypeSpecializeIntDst(instr, originalOpCode, nullptr, src1Val, src2Val, bailOutKind, newMin, newMax, pDstVal); IR::RegOpnd *intDst = instr->GetDst()->AsRegOpnd(); intDst->SetIsJITOptimizedReg(true); varDst = IR::RegOpnd::New(intDst->m_sym->GetVarEquivSym(this->func), TyVar, this->func); IR::Instr *convBoolInstr = IR::Instr::New(Js::OpCode::Conv_Bool, varDst, intDst, this->func); // In some cases (e.g. unsigned compare peep code), a comparison will use variables // other than the ones initially intended for it, if we can determine that we would // arrive at the same result. This means that we get a ByteCodeUses operation after // the actual comparison. Since Inserting the Conv_bool just after the compare, and // just before the ByteCodeUses, would cause issues later on with register lifetime // calculation, we want to insert the Conv_bool after the whole compare instruction // block. IR::Instr *putAfter = instr; while (putAfter->m_next && putAfter->m_next->m_opcode == Js::OpCode::ByteCodeUses) { putAfter = putAfter->m_next; } putAfter->InsertAfter(convBoolInstr); convBoolInstr->SetByteCodeOffset(instr); this->ToVarRegOpnd(varDst, this->currentBlock); CurrentBlockData()->liveInt32Syms->Set(varDst->m_sym->m_id); CurrentBlockData()->liveLossyInt32Syms->Set(varDst->m_sym->m_id); } *pDstVal = this->NewGenericValue(ValueType::Boolean, varDst); } else { TypeSpecializeIntDst( instr, originalOpCode, nullptr, src1Val, src2Val, bailOutKind, newMin, newMax, pDstVal, addSubConstantInfo.HasInfo() ? &addSubConstantInfo : nullptr); } } if(bailOutKind == IR::BailOutInvalid) { GOPT_TRACE(_u("Type specialized to INT\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } else { GOPT_TRACE(_u("Type specialized to INT with bailout on:\n")); if(bailOutKind & (IR::BailOutOnOverflow | IR::BailOutOnMulOverflow) ) { GOPT_TRACE(_u(" Overflow\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Overflow"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } if(bailOutKind & IR::BailOutOnNegativeZero) { GOPT_TRACE(_u(" Zero\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Zero"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } } return true; } bool GlobOpt::IsWorthSpecializingToInt32Branch(IR::Instr const * instr, Value const * src1Val, Value const * src2Val) const { if (!src1Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc1()->IsRegOpnd()) { StackSym const *sym1 = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym1) == false) { if (!src2Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc2()->IsRegOpnd()) { StackSym const *sym2 = instr->GetSrc2()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym2) == false) { // Type specializing a Br itself isn't worth it, unless one src // is already type specialized return false; } } } } return true; } bool GlobOpt::TryOptConstFoldBrFalse( IR::Instr *const instr, Value *const srcValue, const int32 min, const int32 max) { Assert(instr); Assert(instr->m_opcode == Js::OpCode::BrFalse_A || instr->m_opcode == Js::OpCode::BrTrue_A); Assert(srcValue); if(!(DoAggressiveIntTypeSpec() ? srcValue->GetValueInfo()->IsLikelyInt() : srcValue->GetValueInfo()->IsInt())) { return false; } if(ValueInfo::IsEqualTo(srcValue, min, max, nullptr, 0, 0)) { OptConstFoldBr(instr->m_opcode == Js::OpCode::BrFalse_A, instr, srcValue); return true; } if(ValueInfo::IsNotEqualTo(srcValue, min, max, nullptr, 0, 0)) { OptConstFoldBr(instr->m_opcode == Js::OpCode::BrTrue_A, instr, srcValue); return true; } return false; } bool GlobOpt::TryOptConstFoldBrEqual( IR::Instr *const instr, const bool branchOnEqual, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(instr); Assert(src1Value); Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt()); Assert(src2Value); Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt()); if(ValueInfo::IsEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(branchOnEqual, instr, src1Value, src2Value); return true; } if(ValueInfo::IsNotEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(!branchOnEqual, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrGreaterThan( IR::Instr *const instr, const bool branchOnGreaterThan, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(instr); Assert(src1Value); Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt()); Assert(src2Value); Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt()); if(ValueInfo::IsGreaterThan(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(branchOnGreaterThan, instr, src1Value, src2Value); return true; } if(ValueInfo::IsLessThanOrEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(!branchOnGreaterThan, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrGreaterThanOrEqual( IR::Instr *const instr, const bool branchOnGreaterThanOrEqual, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(instr); Assert(src1Value); Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt()); Assert(src2Value); Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt()); if(ValueInfo::IsGreaterThanOrEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(branchOnGreaterThanOrEqual, instr, src1Value, src2Value); return true; } if(ValueInfo::IsLessThan(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(!branchOnGreaterThanOrEqual, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrUnsignedLessThan( IR::Instr *const instr, const bool branchOnLessThan, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(DoConstFold()); Assert(!IsLoopPrePass()); if(!src1Value || !src2Value || !( DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() && src2Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt() && src2Value->GetValueInfo()->IsInt() )) { return false; } uint uMin1 = (min1 < 0 ? (max1 < 0 ? min((uint)min1, (uint)max1) : 0) : min1); uint uMax1 = max((uint)min1, (uint)max1); uint uMin2 = (min2 < 0 ? (max2 < 0 ? min((uint)min2, (uint)max2) : 0) : min2); uint uMax2 = max((uint)min2, (uint)max2); if (uMax1 < uMin2) { // Range 1 is always lesser than Range 2 OptConstFoldBr(branchOnLessThan, instr, src1Value, src2Value); return true; } if (uMin1 >= uMax2) { // Range 2 is always lesser than Range 1 OptConstFoldBr(!branchOnLessThan, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrUnsignedGreaterThan( IR::Instr *const instr, const bool branchOnGreaterThan, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(DoConstFold()); Assert(!IsLoopPrePass()); if(!src1Value || !src2Value || !( DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() && src2Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt() && src2Value->GetValueInfo()->IsInt() )) { return false; } uint uMin1 = (min1 < 0 ? (max1 < 0 ? min((uint)min1, (uint)max1) : 0) : min1); uint uMax1 = max((uint)min1, (uint)max1); uint uMin2 = (min2 < 0 ? (max2 < 0 ? min((uint)min2, (uint)max2) : 0) : min2); uint uMax2 = max((uint)min2, (uint)max2); if (uMin1 > uMax2) { // Range 1 is always greater than Range 2 OptConstFoldBr(branchOnGreaterThan, instr, src1Value, src2Value); return true; } if (uMax1 <= uMin2) { // Range 2 is always greater than Range 1 OptConstFoldBr(!branchOnGreaterThan, instr, src1Value, src2Value); return true; } return false; } void GlobOpt::SetPathDependentInfo(const bool conditionToBranch, const PathDependentInfo &info) { Assert(this->currentBlock->GetSuccList()->Count() == 2); IR::Instr * fallthrough = this->currentBlock->GetNext()->GetFirstInstr(); FOREACH_SLISTBASECOUNTED_ENTRY(FlowEdge*, edge, this->currentBlock->GetSuccList()) { if (conditionToBranch == (edge->GetSucc()->GetFirstInstr() != fallthrough)) { edge->SetPathDependentInfo(info, alloc); return; } } NEXT_SLISTBASECOUNTED_ENTRY; Assert(false); } PathDependentInfoToRestore GlobOpt::UpdatePathDependentInfo(PathDependentInfo *const info) { Assert(info); if(!info->HasInfo()) { return PathDependentInfoToRestore(); } decltype(&GlobOpt::UpdateIntBoundsForEqual) UpdateIntBoundsForLeftValue, UpdateIntBoundsForRightValue; switch(info->Relationship()) { case PathDependentRelationship::Equal: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForEqual; break; case PathDependentRelationship::NotEqual: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForNotEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForNotEqual; break; case PathDependentRelationship::GreaterThanOrEqual: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForGreaterThanOrEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForLessThanOrEqual; break; case PathDependentRelationship::GreaterThan: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForGreaterThan; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForLessThan; break; case PathDependentRelationship::LessThanOrEqual: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForLessThanOrEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForGreaterThanOrEqual; break; case PathDependentRelationship::LessThan: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForLessThan; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForGreaterThan; break; default: Assert(false); __assume(false); } ValueInfo *leftValueInfo = info->LeftValue()->GetValueInfo(); IntConstantBounds leftConstantBounds; AssertVerify(leftValueInfo->TryGetIntConstantBounds(&leftConstantBounds, true)); ValueInfo *rightValueInfo; IntConstantBounds rightConstantBounds; if(info->RightValue()) { rightValueInfo = info->RightValue()->GetValueInfo(); AssertVerify(rightValueInfo->TryGetIntConstantBounds(&rightConstantBounds, true)); } else { rightValueInfo = nullptr; rightConstantBounds = IntConstantBounds(info->RightConstantValue(), info->RightConstantValue()); } ValueInfo *const newLeftValueInfo = (this->*UpdateIntBoundsForLeftValue)( info->LeftValue(), leftConstantBounds, info->RightValue(), rightConstantBounds, true); if(newLeftValueInfo) { ChangeValueInfo(nullptr, info->LeftValue(), newLeftValueInfo); AssertVerify(newLeftValueInfo->TryGetIntConstantBounds(&leftConstantBounds, true)); } else { leftValueInfo = nullptr; } ValueInfo *const newRightValueInfo = (this->*UpdateIntBoundsForRightValue)( info->RightValue(), rightConstantBounds, info->LeftValue(), leftConstantBounds, true); if(newRightValueInfo) { ChangeValueInfo(nullptr, info->RightValue(), newRightValueInfo); } else { rightValueInfo = nullptr; } return PathDependentInfoToRestore(leftValueInfo, rightValueInfo); } void GlobOpt::RestorePathDependentInfo(PathDependentInfo *const info, const PathDependentInfoToRestore infoToRestore) { Assert(info); if(infoToRestore.LeftValueInfo()) { Assert(info->LeftValue()); ChangeValueInfo(nullptr, info->LeftValue(), infoToRestore.LeftValueInfo()); } if(infoToRestore.RightValueInfo()) { Assert(info->RightValue()); ChangeValueInfo(nullptr, info->RightValue(), infoToRestore.RightValueInfo()); } } bool GlobOpt::TypeSpecializeFloatUnary(IR::Instr **pInstr, Value *src1Val, Value **pDstVal, bool skipDst /* = false */) { IR::Instr *&instr = *pInstr; IR::Opnd *src1; IR::Opnd *dst; Js::OpCode opcode = instr->m_opcode; Value *valueToTransfer = nullptr; Assert(src1Val && src1Val->GetValueInfo()->IsLikelyNumber() || OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); if (!this->DoFloatTypeSpec()) { return false; } // For inline built-ins we need to do type specialization. Check upfront to avoid duplicating same case labels. if (!OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { switch (opcode) { case Js::OpCode::ArgOut_A_InlineBuiltIn: skipDst = true; // fall-through case Js::OpCode::Ld_A: case Js::OpCode::BrTrue_A: case Js::OpCode::BrFalse_A: if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsFloat64TypeSpecialized(sym) == false) { // Type specializing an Ld_A isn't worth it, unless the src // is already type specialized return false; } } if (instr->m_opcode == Js::OpCode::Ld_A) { valueToTransfer = src1Val; } break; case Js::OpCode::Neg_A: break; case Js::OpCode::Conv_Num: Assert(src1Val); opcode = Js::OpCode::Ld_A; valueToTransfer = src1Val; if (!src1Val->GetValueInfo()->IsNumber()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; valueToTransfer = NewGenericValue(ValueType::Float, instr->GetDst()->GetStackSym()); if (CurrentBlockData()->IsFloat64TypeSpecialized(sym) == false) { // Set the dst as a nonDeadStore. We want to keep the Ld_A to prevent the FromVar from // being dead-stored, as it could cause implicit calls. dst = instr->GetDst(); dst->AsRegOpnd()->m_dontDeadStore = true; } } break; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: return TypeSpecializeStElem(pInstr, src1Val, pDstVal); default: return false; } } // Make sure the srcs are specialized src1 = instr->GetSrc1(); // Use original val when calling toFloat64 as this is what we'll use to try hoisting the fromVar if we're in a loop. this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, IR::BailOutPrimitiveButString); if (!skipDst) { dst = instr->GetDst(); if (dst) { this->TypeSpecializeFloatDst(instr, valueToTransfer, src1Val, nullptr, pDstVal); if (!this->IsLoopPrePass()) { instr->m_opcode = opcode; } } } GOPT_TRACE_INSTR(instr, _u("Type specialized to FLOAT: ")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FloatTypeSpecPhase)) { Output::Print(_u("Type specialized to FLOAT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif return true; } // Unconditionally type-spec dst to float. void GlobOpt::TypeSpecializeFloatDst(IR::Instr *instr, Value *valToTransfer, Value *const src1Value, Value *const src2Value, Value **pDstVal) { IR::Opnd* dst = instr->GetDst(); Assert(dst); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock); if(valToTransfer) { *pDstVal = this->ValueNumberTransferDst(instr, valToTransfer); CurrentBlockData()->InsertNewValue(*pDstVal, dst); } else { *pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Value, src2Value); } } #ifdef ENABLE_SIMDJS void GlobOpt::TypeSpecializeSimd128Dst(IRType type, IR::Instr *instr, Value *valToTransfer, Value *const src1Value, Value **pDstVal) { IR::Opnd* dst = instr->GetDst(); Assert(dst); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToSimd128Dst(type, instr, dst->AsRegOpnd(), this->currentBlock); if (valToTransfer) { *pDstVal = this->ValueNumberTransferDst(instr, valToTransfer); CurrentBlockData()->InsertNewValue(*pDstVal, dst); } else { *pDstVal = NewGenericValue(GetValueTypeFromIRType(type), instr->GetDst()); } } #endif bool GlobOpt::TypeSpecializeLdLen( IR::Instr * *const instrRef, Value * *const src1ValueRef, Value * *const dstValueRef, bool *const forceInvariantHoistingRef) { Assert(instrRef); IR::Instr *&instr = *instrRef; Assert(instr); Assert(instr->m_opcode == Js::OpCode::LdLen_A); Assert(src1ValueRef); Value *&src1Value = *src1ValueRef; Assert(dstValueRef); Value *&dstValue = *dstValueRef; Assert(forceInvariantHoistingRef); bool &forceInvariantHoisting = *forceInvariantHoistingRef; if(!DoLdLenIntSpec(instr, instr->GetSrc1()->GetValueType())) { return false; } IR::BailOutKind bailOutKind = IR::BailOutOnIrregularLength; if(!IsLoopPrePass()) { IR::RegOpnd *const baseOpnd = instr->GetSrc1()->AsRegOpnd(); if(baseOpnd->IsArrayRegOpnd()) { StackSym *const lengthSym = baseOpnd->AsArrayRegOpnd()->LengthSym(); if(lengthSym) { CaptureByteCodeSymUses(instr); instr->m_opcode = Js::OpCode::Ld_I4; instr->ReplaceSrc1(IR::RegOpnd::New(lengthSym, lengthSym->GetType(), func)); instr->ClearBailOutInfo(); // Find the hoisted length value Value *const lengthValue = CurrentBlockData()->FindValue(lengthSym); Assert(lengthValue); src1Value = lengthValue; ValueInfo *const lengthValueInfo = lengthValue->GetValueInfo(); Assert(lengthValueInfo->GetSymStore() != lengthSym); IntConstantBounds lengthConstantBounds; AssertVerify(lengthValueInfo->TryGetIntConstantBounds(&lengthConstantBounds)); Assert(lengthConstantBounds.LowerBound() >= 0); // Int-specialize, and transfer the value to the dst TypeSpecializeIntDst( instr, Js::OpCode::LdLen_A, src1Value, src1Value, nullptr, bailOutKind, lengthConstantBounds.LowerBound(), lengthConstantBounds.UpperBound(), &dstValue); // Try to force hoisting the Ld_I4 so that the length will have an invariant sym store that can be // copy-propped. Invariant hoisting does not automatically hoist Ld_I4. forceInvariantHoisting = true; return true; } } if (instr->HasBailOutInfo()) { Assert(instr->GetBailOutKind() == IR::BailOutMarkTempObject); bailOutKind = IR::BailOutOnIrregularLength | IR::BailOutMarkTempObject; instr->SetBailOutKind(bailOutKind); } else { Assert(bailOutKind == IR::BailOutOnIrregularLength); GenerateBailAtOperation(&instr, bailOutKind); } } TypeSpecializeIntDst( instr, Js::OpCode::LdLen_A, nullptr, nullptr, nullptr, bailOutKind, 0, INT32_MAX, &dstValue); return true; } bool GlobOpt::TypeSpecializeFloatBinary(IR::Instr *instr, Value *src1Val, Value *src2Val, Value **pDstVal) { IR::Opnd *src1; IR::Opnd *src2; IR::Opnd *dst; bool allowUndefinedOrNullSrc1 = true; bool allowUndefinedOrNullSrc2 = true; bool skipSrc1 = false; bool skipSrc2 = false; bool skipDst = false; if (!this->DoFloatTypeSpec()) { return false; } // For inline built-ins we need to do type specialization. Check upfront to avoid duplicating same case labels. if (!OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { switch (instr->m_opcode) { case Js::OpCode::Sub_A: case Js::OpCode::Mul_A: case Js::OpCode::Div_A: case Js::OpCode::Expo_A: // Avoid if one source is known not to be a number. if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber()) { return false; } break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrEq_A: case Js::OpCode::BrNeq_A: case Js::OpCode::BrSrNotEq_A: case Js::OpCode::BrNotEq_A: case Js::OpCode::BrSrNotNeq_A: case Js::OpCode::BrNotNeq_A: // Avoid if one source is known not to be a number. if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber()) { return false; } // Undef == Undef, but +Undef != +Undef // 0.0 != null, but 0.0 == +null // // So Bailout on anything but numbers for both src1 and src2 allowUndefinedOrNullSrc1 = false; allowUndefinedOrNullSrc2 = false; break; case Js::OpCode::BrGt_A: case Js::OpCode::BrGe_A: case Js::OpCode::BrLt_A: case Js::OpCode::BrLe_A: case Js::OpCode::BrNotGt_A: case Js::OpCode::BrNotGe_A: case Js::OpCode::BrNotLt_A: case Js::OpCode::BrNotLe_A: // Avoid if one source is known not to be a number. if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber()) { return false; } break; case Js::OpCode::Add_A: // For Add, we need both sources to be Numbers, otherwise it could be a string concat if (!src1Val || !src2Val || !(src1Val->GetValueInfo()->IsLikelyNumber() && src2Val->GetValueInfo()->IsLikelyNumber())) { return false; } break; case Js::OpCode::ArgOut_A_InlineBuiltIn: skipSrc2 = true; skipDst = true; break; default: return false; } } else { switch (instr->m_opcode) { case Js::OpCode::InlineArrayPush: bool isFloatConstMissingItem = src2Val->GetValueInfo()->IsFloatConstant(); if(isFloatConstMissingItem) { FloatConstType floatValue = src2Val->GetValueInfo()->AsFloatConstant()->FloatValue(); isFloatConstMissingItem = Js::SparseArraySegment<double>::IsMissingItem(&floatValue); } // Don't specialize if the element is not likelyNumber - we will surely bailout if(!(src2Val->GetValueInfo()->IsLikelyNumber()) || isFloatConstMissingItem) { return false; } // Only specialize the Second source - element skipSrc1 = true; skipDst = true; allowUndefinedOrNullSrc2 = false; break; } } // Make sure the srcs are specialized if(!skipSrc1) { src1 = instr->GetSrc1(); this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, (allowUndefinedOrNullSrc1 ? IR::BailOutPrimitiveButString : IR::BailOutNumberOnly)); } if (!skipSrc2) { src2 = instr->GetSrc2(); this->ToFloat64(instr, src2, this->currentBlock, src2Val, nullptr, (allowUndefinedOrNullSrc2 ? IR::BailOutPrimitiveButString : IR::BailOutNumberOnly)); } if (!skipDst) { dst = instr->GetDst(); if (dst) { *pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Val, src2Val); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock); } } GOPT_TRACE_INSTR(instr, _u("Type specialized to FLOAT: ")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FloatTypeSpecPhase)) { Output::Print(_u("Type specialized to FLOAT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif return true; } bool GlobOpt::TypeSpecializeStElem(IR::Instr ** pInstr, Value *src1Val, Value **pDstVal) { IR::Instr *&instr = *pInstr; IR::RegOpnd *baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd(); ValueType baseValueType(baseOpnd->GetValueType()); if (instr->DoStackArgsOpt(this->func) || (!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) || (!this->DoNativeArrayTypeSpec() && baseValueType.IsLikelyNativeArray()) || !(baseValueType.IsLikelyOptimizedTypedArray() || baseValueType.IsLikelyNativeArray())) { GOPT_TRACE_INSTR(instr, _u("Didn't type specialize array access, because typed array type specialization is disabled, or base is not an optimized typed array.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because %s.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, instr->DoStackArgsOpt(this->func) ? _u("instruction uses the arguments object") : _u("typed array type specialization is disabled, or base is not an optimized typed array")); Output::Flush(); } return false; } Assert(instr->GetSrc1()->IsRegOpnd() || (src1Val && src1Val->GetValueInfo()->HasIntConstantValue())); StackSym *sym = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd()->m_sym : nullptr; // Only type specialize the source of store element if the source symbol is already type specialized to int or float. if (sym) { if (baseValueType.IsLikelyNativeArray()) { // Gently coerce these src's into native if it seems likely to work. // Otherwise we can't use the fast path to store. // But don't try to put a float-specialized number into an int array this way. if (!( CurrentBlockData()->IsInt32TypeSpecialized(sym) || ( src1Val && ( DoAggressiveIntTypeSpec() ? src1Val->GetValueInfo()->IsLikelyInt() : src1Val->GetValueInfo()->IsInt() ) ) )) { if (!( CurrentBlockData()->IsFloat64TypeSpecialized(sym) || (src1Val && src1Val->GetValueInfo()->IsLikelyNumber()) ) || baseValueType.HasIntElements()) { return false; } } } else if (!CurrentBlockData()->IsInt32TypeSpecialized(sym) && !CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because src is not type specialized.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because src is not specialized.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr); Output::Flush(); } return false; } } int32 src1IntConstantValue; if(baseValueType.IsLikelyNativeIntArray() && src1Val && src1Val->GetValueInfo()->TryGetIntConstantValue(&src1IntConstantValue)) { if(Js::SparseArraySegment<int32>::IsMissingItem(&src1IntConstantValue)) { return false; } } // Note: doing ToVarUses to make sure we do get the int32 version of the index before trying to access its value in // ShouldExpectConventionalArrayIndexValue. Not sure why that never gave us a problem before. Assert(instr->GetDst()->IsIndirOpnd()); IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd(); // Make sure we use the int32 version of the index operand symbol, if available. Otherwise, ensure the var symbol is live (by // potentially inserting a ToVar). this->ToVarUses(instr, dst, /* isDst = */ true, nullptr); if (!ShouldExpectConventionalArrayIndexValue(dst)) { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because index is negative or likely not int.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because index is negative or likely not int.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr); Output::Flush(); } return false; } IRType toType = TyVar; bool isLossyAllowed = true; IR::BailOutKind arrayBailOutKind = IR::BailOutConventionalTypedArrayAccessOnly; switch(baseValueType.GetObjectType()) { case ObjectType::Int8Array: case ObjectType::Uint8Array: case ObjectType::Int16Array: case ObjectType::Uint16Array: case ObjectType::Int32Array: case ObjectType::Int8VirtualArray: case ObjectType::Uint8VirtualArray: case ObjectType::Int16VirtualArray: case ObjectType::Uint16VirtualArray: case ObjectType::Int32VirtualArray: case ObjectType::Int8MixedArray: case ObjectType::Uint8MixedArray: case ObjectType::Int16MixedArray: case ObjectType::Uint16MixedArray: case ObjectType::Int32MixedArray: Int32Array: if (this->DoAggressiveIntTypeSpec() || this->DoFloatTypeSpec()) { toType = TyInt32; } break; case ObjectType::Uint32Array: case ObjectType::Uint32VirtualArray: case ObjectType::Uint32MixedArray: // Uint32Arrays may store values that overflow int32. If the value being stored comes from a symbol that's // already losslessly type specialized to int32, we'll use it. Otherwise, if we only have a float64 specialized // value, we don't want to force bailout if it doesn't fit in int32. Instead, we'll emit conversion in the // lowerer, and handle overflow, if necessary. if (!sym || CurrentBlockData()->IsInt32TypeSpecialized(sym)) { toType = TyInt32; } else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { toType = TyFloat64; } break; case ObjectType::Float32Array: case ObjectType::Float64Array: case ObjectType::Float32VirtualArray: case ObjectType::Float32MixedArray: case ObjectType::Float64VirtualArray: case ObjectType::Float64MixedArray: Float64Array: if (this->DoFloatTypeSpec()) { toType = TyFloat64; } break; case ObjectType::Uint8ClampedArray: case ObjectType::Uint8ClampedVirtualArray: case ObjectType::Uint8ClampedMixedArray: // Uint8ClampedArray requires rounding (as opposed to truncation) of floating point values. If source symbol is // float type specialized, type specialize this instruction to float as well, and handle rounding in the // lowerer. if (!sym || CurrentBlockData()->IsInt32TypeSpecialized(sym)) { toType = TyInt32; isLossyAllowed = false; } else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { toType = TyFloat64; } break; default: Assert(baseValueType.IsLikelyNativeArray()); isLossyAllowed = false; arrayBailOutKind = IR::BailOutConventionalNativeArrayAccessOnly; if(baseValueType.HasIntElements()) { goto Int32Array; } Assert(baseValueType.HasFloatElements()); goto Float64Array; } if (toType != TyVar) { GOPT_TRACE_INSTR(instr, _u("Type specialized array access.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, type specialized to %s.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, toType == TyInt32 ? _u("int32") : _u("float64")); Output::Flush(); } IR::BailOutKind bailOutKind = ((toType == TyInt32) ? IR::BailOutIntOnly : IR::BailOutNumberOnly); this->ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, toType, bailOutKind, /* lossy = */ isLossyAllowed); if (!this->IsLoopPrePass()) { bool bConvertToBailoutInstr = true; // Definite StElemC doesn't need bailout, because it can't fail or cause conversion. if (instr->m_opcode == Js::OpCode::StElemC && baseValueType.IsObject()) { if (baseValueType.HasIntElements()) { //Native int array requires a missing element check & bailout int32 min = INT32_MIN; int32 max = INT32_MAX; if (src1Val->GetValueInfo()->GetIntValMinMax(&min, &max, false)) { bConvertToBailoutInstr = ((min <= Js::JavascriptNativeIntArray::MissingItem) && (max >= Js::JavascriptNativeIntArray::MissingItem)); } } else { bConvertToBailoutInstr = false; } } if (bConvertToBailoutInstr) { if(instr->HasBailOutInfo()) { const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind(); Assert( ( !(oldBailOutKind & ~IR::BailOutKindBits) || (oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp ) && !(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject))); if(arrayBailOutKind == IR::BailOutConventionalTypedArrayAccessOnly) { // BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head // segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction // bails out for the right reason. instr->SetBailOutKind( arrayBailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall))); } else { // BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit // calls to occur, so it must be merged in to eliminate generating the helper call. Assert(arrayBailOutKind == IR::BailOutConventionalNativeArrayAccessOnly); instr->SetBailOutKind(oldBailOutKind | arrayBailOutKind); } } else { GenerateBailAtOperation(&instr, arrayBailOutKind); } } } } else { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because the source was not already specialized.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not type specialize, because of array type.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr); Output::Flush(); } } return toType != TyVar; } IR::Instr * GlobOpt::ToVarUses(IR::Instr *instr, IR::Opnd *opnd, bool isDst, Value *val) { Sym *sym; switch (opnd->GetKind()) { case IR::OpndKindReg: if (!isDst && !CurrentBlockData()->liveVarSyms->Test(opnd->AsRegOpnd()->m_sym->m_id)) { instr = this->ToVar(instr, opnd->AsRegOpnd(), this->currentBlock, val, true); } break; case IR::OpndKindSym: sym = opnd->AsSymOpnd()->m_sym; if (sym->IsPropertySym() && !CurrentBlockData()->liveVarSyms->Test(sym->AsPropertySym()->m_stackSym->m_id) && sym->AsPropertySym()->m_stackSym->IsVar()) { StackSym *propertyBase = sym->AsPropertySym()->m_stackSym; IR::RegOpnd *newOpnd = IR::RegOpnd::New(propertyBase, TyVar, instr->m_func); instr = this->ToVar(instr, newOpnd, this->currentBlock, CurrentBlockData()->FindValue(propertyBase), true); } break; case IR::OpndKindIndir: IR::RegOpnd *baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd(); if (!CurrentBlockData()->liveVarSyms->Test(baseOpnd->m_sym->m_id)) { instr = this->ToVar(instr, baseOpnd, this->currentBlock, CurrentBlockData()->FindValue(baseOpnd->m_sym), true); } IR::RegOpnd *indexOpnd = opnd->AsIndirOpnd()->GetIndexOpnd(); if (indexOpnd && !indexOpnd->m_sym->IsTypeSpec()) { if((indexOpnd->GetValueType().IsInt() ? !IsTypeSpecPhaseOff(func) : indexOpnd->GetValueType().IsLikelyInt() && DoAggressiveIntTypeSpec()) && !GetIsAsmJSFunc()) // typespec is disabled for asmjs { StackSym *const indexVarSym = indexOpnd->m_sym; Value *const indexValue = CurrentBlockData()->FindValue(indexVarSym); Assert(indexValue); Assert(indexValue->GetValueInfo()->IsLikelyInt()); ToInt32(instr, indexOpnd, currentBlock, indexValue, opnd->AsIndirOpnd(), false); Assert(indexValue->GetValueInfo()->IsInt()); if(!IsLoopPrePass()) { indexOpnd = opnd->AsIndirOpnd()->GetIndexOpnd(); if(indexOpnd) { Assert(indexOpnd->m_sym->IsTypeSpec()); IntConstantBounds indexConstantBounds; AssertVerify(indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds)); if(ValueInfo::IsGreaterThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)) { indexOpnd->SetType(TyUint32); } } } } else if (!CurrentBlockData()->liveVarSyms->Test(indexOpnd->m_sym->m_id)) { instr = this->ToVar(instr, indexOpnd, this->currentBlock, CurrentBlockData()->FindValue(indexOpnd->m_sym), true); } } break; } return instr; } IR::Instr * GlobOpt::ToVar(IR::Instr *instr, IR::RegOpnd *regOpnd, BasicBlock *block, Value *value, bool needsUpdate) { IR::Instr *newInstr; StackSym *varSym = regOpnd->m_sym; if (IsTypeSpecPhaseOff(this->func)) { return instr; } if (this->IsLoopPrePass()) { block->globOptData.liveVarSyms->Set(varSym->m_id); return instr; } if (block->globOptData.liveVarSyms->Test(varSym->m_id)) { // Already live, nothing to do return instr; } if (!varSym->IsVar()) { Assert(!varSym->IsTypeSpec()); // Leave non-vars alone. return instr; } Assert(block->globOptData.IsTypeSpecialized(varSym)); if (!value) { value = block->globOptData.FindValue(varSym); } ValueInfo *valueInfo = value ? value->GetValueInfo() : nullptr; if(valueInfo && valueInfo->IsInt()) { // If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value // would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as // lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the // lossy state. block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } IRType fromType = TyIllegal; StackSym *typeSpecSym = nullptr; if (block->globOptData.liveInt32Syms->Test(varSym->m_id) && !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)) { fromType = TyInt32; typeSpecSym = varSym->GetInt32EquivSym(this->func); Assert(valueInfo); Assert(valueInfo->IsInt()); } else if (block->globOptData.liveFloat64Syms->Test(varSym->m_id)) { fromType = TyFloat64; typeSpecSym = varSym->GetFloat64EquivSym(this->func); // Ensure that all bailout FromVars that generate a value for this type-specialized sym will bail out on any non-number // value, even ones that have already been generated before. Float-specialized non-number values cannot be converted // back to Var since they will not go back to the original non-number value. The dead-store pass will update the bailout // kind on already-generated FromVars based on this bit. typeSpecSym->m_requiresBailOnNotNumber = true; // A previous float conversion may have used BailOutPrimitiveButString, which does not change the value type to say // definitely float, since it can also be a non-string primitive. The convert back to Var though, will cause that // bailout kind to be changed to BailOutNumberOnly in the dead-store phase, so from the point of the initial conversion // to float, that the value is definitely number. Since we don't know where the FromVar is, change the value type here. if(valueInfo) { if(!valueInfo->IsNumber()) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, value, valueInfo); regOpnd->SetValueType(valueInfo->Type()); } } else { value = NewGenericValue(ValueType::Float); valueInfo = value->GetValueInfo(); block->globOptData.SetValue(value, varSym); regOpnd->SetValueType(valueInfo->Type()); } } else { #ifdef ENABLE_SIMDJS // SIMD_JS Assert(block->globOptData.IsLiveAsSimd128(varSym)); if (block->globOptData.IsLiveAsSimd128F4(varSym)) { fromType = TySimd128F4; } else { Assert(block->globOptData.IsLiveAsSimd128I4(varSym)); fromType = TySimd128I4; } if (valueInfo) { if (fromType == TySimd128F4 && !valueInfo->Type().IsSimd128Float32x4()) { valueInfo = valueInfo->SpecializeToSimd128F4(alloc); ChangeValueInfo(block, value, valueInfo); regOpnd->SetValueType(valueInfo->Type()); } else if (fromType == TySimd128I4 && !valueInfo->Type().IsSimd128Int32x4()) { if (!valueInfo->Type().IsSimd128Int32x4()) { valueInfo = valueInfo->SpecializeToSimd128I4(alloc); ChangeValueInfo(block, value, valueInfo); regOpnd->SetValueType(valueInfo->Type()); } } } else { ValueType valueType = fromType == TySimd128F4 ? ValueType::GetSimd128(ObjectType::Simd128Float32x4) : ValueType::GetSimd128(ObjectType::Simd128Int32x4); value = NewGenericValue(valueType); valueInfo = value->GetValueInfo(); block->globOptData.SetValue(value, varSym); regOpnd->SetValueType(valueInfo->Type()); } ValueType valueType = valueInfo->Type(); // Should be definite if type-specialized Assert(valueType.IsSimd128()); typeSpecSym = varSym->GetSimd128EquivSym(fromType, this->func); #else Assert(UNREACHED); #endif } AssertOrFailFast(valueInfo); int32 intConstantValue; if (valueInfo->TryGetIntConstantValue(&intConstantValue)) { // Lower will tag or create a number directly newInstr = IR::Instr::New(Js::OpCode::LdC_A_I4, regOpnd, IR::IntConstOpnd::New(intConstantValue, TyInt32, instr->m_func), instr->m_func); } else { IR::RegOpnd * regNew = IR::RegOpnd::New(typeSpecSym, fromType, instr->m_func); Js::OpCode opcode = Js::OpCode::ToVar; regNew->SetIsJITOptimizedReg(true); newInstr = IR::Instr::New(opcode, regOpnd, regNew, instr->m_func); } newInstr->SetByteCodeOffset(instr); newInstr->GetDst()->AsRegOpnd()->SetIsJITOptimizedReg(true); ValueType valueType = valueInfo->Type(); if(fromType == TyInt32) { #if !INT32VAR // All 32-bit ints are taggable on 64-bit architectures IntConstantBounds constantBounds; AssertVerify(valueInfo->TryGetIntConstantBounds(&constantBounds)); if(constantBounds.IsTaggable()) #endif { // The value is within the taggable range, so set the opnd value types to TaggedInt to avoid the overflow check valueType = ValueType::GetTaggedInt(); } } newInstr->GetDst()->SetValueType(valueType); newInstr->GetSrc1()->SetValueType(valueType); IR::Instr *insertAfterInstr = instr->m_prev; if (instr == block->GetLastInstr() && (instr->IsBranchInstr() || instr->m_opcode == Js::OpCode::BailTarget)) { // Don't insert code between the branch and the preceding ByteCodeUses instrs... while(insertAfterInstr->m_opcode == Js::OpCode::ByteCodeUses) { insertAfterInstr = insertAfterInstr->m_prev; } } block->InsertInstrAfter(newInstr, insertAfterInstr); block->globOptData.liveVarSyms->Set(varSym->m_id); GOPT_TRACE_OPND(regOpnd, _u("Converting to var\n")); if (block->loop) { Assert(!this->IsLoopPrePass()); this->TryHoistInvariant(newInstr, block, value, value, nullptr, false); } if (needsUpdate) { // Make sure that the kill effect of the ToVar instruction is tracked and that the kill of a property // type is reflected in the current instruction. this->ProcessKills(newInstr); this->ValueNumberObjectType(newInstr->GetDst(), newInstr); if (instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd()) { // Reprocess the load source. We need to reset the PropertySymOpnd fields first. IR::PropertySymOpnd *propertySymOpnd = instr->GetSrc1()->AsPropertySymOpnd(); if (propertySymOpnd->IsTypeCheckSeqCandidate()) { propertySymOpnd->SetTypeChecked(false); propertySymOpnd->SetTypeAvailable(false); propertySymOpnd->SetWriteGuardChecked(false); } this->FinishOptPropOp(instr, propertySymOpnd); instr = this->SetTypeCheckBailOut(instr->GetSrc1(), instr, nullptr); } } return instr; } IR::Instr * GlobOpt::ToInt32(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, bool lossy) { return this->ToTypeSpecUse(instr, opnd, block, val, indir, TyInt32, IR::BailOutIntOnly, lossy); } IR::Instr * GlobOpt::ToFloat64(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, IR::BailOutKind bailOutKind) { return this->ToTypeSpecUse(instr, opnd, block, val, indir, TyFloat64, bailOutKind); } IR::Instr * GlobOpt::ToTypeSpecUse(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, IRType toType, IR::BailOutKind bailOutKind, bool lossy, IR::Instr *insertBeforeInstr) { Assert(bailOutKind != IR::BailOutInvalid); IR::Instr *newInstr; if (!val && opnd->IsRegOpnd()) { val = block->globOptData.FindValue(opnd->AsRegOpnd()->m_sym); } ValueInfo *valueInfo = val ? val->GetValueInfo() : nullptr; bool needReplaceSrc = false; bool updateBlockLastInstr = false; if (instr) { needReplaceSrc = true; if (!insertBeforeInstr) { insertBeforeInstr = instr; } } else if (!insertBeforeInstr) { // Insert it at the end of the block insertBeforeInstr = block->GetLastInstr(); if (insertBeforeInstr->IsBranchInstr() || insertBeforeInstr->m_opcode == Js::OpCode::BailTarget) { // Don't insert code between the branch and the preceding ByteCodeUses instrs... while(insertBeforeInstr->m_prev->m_opcode == Js::OpCode::ByteCodeUses) { insertBeforeInstr = insertBeforeInstr->m_prev; } } else { insertBeforeInstr = insertBeforeInstr->m_next; updateBlockLastInstr = true; } } // Int constant values will be propagated into the instruction. For ArgOut_A_InlineBuiltIn, there's no benefit from // const-propping, so those are excluded. if (opnd->IsRegOpnd() && !( valueInfo && (valueInfo->HasIntConstantValue() || valueInfo->IsFloatConstant()) && (!instr || instr->m_opcode != Js::OpCode::ArgOut_A_InlineBuiltIn) )) { IR::RegOpnd *regSrc = opnd->AsRegOpnd(); StackSym *varSym = regSrc->m_sym; Js::OpCode opcode = Js::OpCode::FromVar; if (varSym->IsTypeSpec() || !block->globOptData.liveVarSyms->Test(varSym->m_id)) { // Conversion between int32 and float64 if (varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(this->func); } opcode = Js::OpCode::Conv_Prim; } Assert(block->globOptData.liveVarSyms->Test(varSym->m_id) || block->globOptData.IsTypeSpecialized(varSym)); StackSym *typeSpecSym = nullptr; BOOL isLive = FALSE; BVSparse<JitArenaAllocator> *livenessBv = nullptr; if(valueInfo && valueInfo->IsInt()) { // If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value // would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as // lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the // lossy state. block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } if (toType == TyInt32) { // Need to determine whether the conversion is actually lossy or lossless. If the value is an int, then it's a // lossless conversion despite the type of conversion requested. The liveness of the converted int32 sym needs to be // set to reflect the actual type of conversion done. Also, a lossless conversion needs the value to determine // whether the conversion may need to bail out. Assert(valueInfo); if(valueInfo->IsInt()) { lossy = false; } else { Assert(IsLoopPrePass() || !block->globOptData.IsInt32TypeSpecialized(varSym)); } livenessBv = block->globOptData.liveInt32Syms; isLive = livenessBv->Test(varSym->m_id) && (lossy || !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)); if (this->IsLoopPrePass()) { if(!isLive) { livenessBv->Set(varSym->m_id); if(lossy) { block->globOptData.liveLossyInt32Syms->Set(varSym->m_id); } else { block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } } if(!lossy) { Assert(bailOutKind == IR::BailOutIntOnly || bailOutKind == IR::BailOutExpectingInteger); valueInfo = valueInfo->SpecializeToInt32(alloc); ChangeValueInfo(nullptr, val, valueInfo); if(needReplaceSrc) { opnd->SetValueType(valueInfo->Type()); } } return instr; } typeSpecSym = varSym->GetInt32EquivSym(this->func); if (!isLive) { if (!opnd->IsVar() || !block->globOptData.liveVarSyms->Test(varSym->m_id) || (block->globOptData.liveFloat64Syms->Test(varSym->m_id) && valueInfo && valueInfo->IsLikelyFloat())) { Assert(block->globOptData.liveFloat64Syms->Test(varSym->m_id)); if(!lossy && !valueInfo->IsInt()) { // Shouldn't try to do a lossless conversion from float64 to int32 when the value is not known to be an // int. There are cases where we need more than two passes over loops to flush out all dependencies. // It's possible for the loop prepass to think that a sym s1 remains an int because it acquires the // value of another sym s2 that is an int in the prepass at that time. However, s2 can become a float // later in the loop body, in which case s1 would become a float on the second iteration of the loop. By // that time, we would have already committed to having s1 live as a lossless int on entry into the // loop, and we end up having to compensate by doing a lossless conversion from float to int, which will // need a bailout and will most likely bail out. // // If s2 becomes a var instead of a float, then the compensation is legal although not ideal. After // enough bailouts, rejit would be triggered with aggressive int type spec turned off. For the // float-to-int conversion though, there's no point in emitting a bailout because we already know that // the value is a float and has high probability of bailing out (whereas a var has a chance to be a // tagged int), and so currently lossless conversion from float to int with bailout is not supported. // // So, treating this case as a compile-time bailout. The exception will trigger the jit work item to be // restarted with aggressive int type specialization disabled. if(bailOutKind == IR::BailOutExpectingInteger) { Assert(IsSwitchOptEnabledForIntTypeSpec()); throw Js::RejitException(RejitReason::DisableSwitchOptExpectingInteger); } else { Assert(DoAggressiveIntTypeSpec()); if(PHASE_TRACE(Js::BailOutPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("BailOut (compile-time): function: %s (%s) varSym: "), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), varSym->m_id); #if DBG_DUMP varSym->Dump(); #else Output::Print(_u("s%u"), varSym->m_id); #endif if(varSym->HasByteCodeRegSlot()) { Output::Print(_u(" byteCodeReg: R%u"), varSym->GetByteCodeRegSlot()); } Output::Print(_u(" (lossless conversion from float64 to int32)\n")); Output::Flush(); } if(!DoAggressiveIntTypeSpec()) { // Aggressive int type specialization is already off for some reason. Prevent trying to rejit again // because it won't help and the same thing will happen again. Just abort jitting this function. if(PHASE_TRACE(Js::BailOutPhase, this->func)) { Output::Print(_u(" Aborting JIT because AggressiveIntTypeSpec is already off\n")); Output::Flush(); } throw Js::OperationAbortedException(); } throw Js::RejitException(RejitReason::AggressiveIntTypeSpecDisabled); } } if(opnd->IsVar()) { regSrc->SetType(TyFloat64); regSrc->m_sym = varSym->GetFloat64EquivSym(this->func); opcode = Js::OpCode::Conv_Prim; } else { Assert(regSrc->IsFloat64()); Assert(regSrc->m_sym->IsFloat64()); Assert(opcode == Js::OpCode::Conv_Prim); } } } GOPT_TRACE_OPND(regSrc, _u("Converting to int32\n")); } else if (toType == TyFloat64) { // float64 typeSpecSym = varSym->GetFloat64EquivSym(this->func); if(!IsLoopPrePass() && typeSpecSym->m_requiresBailOnNotNumber && block->globOptData.IsFloat64TypeSpecialized(varSym)) { // This conversion is already protected by a BailOutNumberOnly bailout (or at least it will be after the // dead-store phase). Since 'requiresBailOnNotNumber' is not flow-based, change the value to definitely float. if(valueInfo) { if(!valueInfo->IsNumber()) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, val, valueInfo); opnd->SetValueType(valueInfo->Type()); } } else { val = NewGenericValue(ValueType::Float); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); opnd->SetValueType(valueInfo->Type()); } } if(bailOutKind == IR::BailOutNumberOnly) { if(!IsLoopPrePass()) { // Ensure that all bailout FromVars that generate a value for this type-specialized sym will bail out on any // non-number value, even ones that have already been generated before. The dead-store pass will update the // bailout kind on already-generated FromVars based on this bit. typeSpecSym->m_requiresBailOnNotNumber = true; } } else if(typeSpecSym->m_requiresBailOnNotNumber) { Assert(bailOutKind == IR::BailOutPrimitiveButString); bailOutKind = IR::BailOutNumberOnly; } livenessBv = block->globOptData.liveFloat64Syms; isLive = livenessBv->Test(varSym->m_id); if (this->IsLoopPrePass()) { if(!isLive) { livenessBv->Set(varSym->m_id); } if (this->OptIsInvariant(opnd, block, this->prePassLoop, val, false, true)) { this->prePassLoop->forceFloat64SymsOnEntry->Set(varSym->m_id); } else { Sym *symStore = (valueInfo ? valueInfo->GetSymStore() : NULL); if (symStore && symStore != varSym && this->OptIsInvariant(symStore, block, this->prePassLoop, block->globOptData.FindValue(symStore), false, true)) { // If symStore is assigned to sym and we want sym to be type-specialized, for symStore to be specialized // outside the loop. this->prePassLoop->forceFloat64SymsOnEntry->Set(symStore->m_id); } } if(bailOutKind == IR::BailOutNumberOnly) { if(valueInfo) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(ValueType::Float); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } if(needReplaceSrc) { opnd->SetValueType(valueInfo->Type()); } } return instr; } if (!isLive && regSrc->IsVar()) { if (!block->globOptData.liveVarSyms->Test(varSym->m_id) || ( block->globOptData.liveInt32Syms->Test(varSym->m_id) && !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id) && valueInfo && valueInfo->IsLikelyInt() )) { Assert(block->globOptData.liveInt32Syms->Test(varSym->m_id)); Assert(!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)); // Shouldn't try to convert a lossy int32 to anything regSrc->SetType(TyInt32); regSrc->m_sym = varSym->GetInt32EquivSym(this->func); opcode = Js::OpCode::Conv_Prim; } } GOPT_TRACE_OPND(regSrc, _u("Converting to float64\n")); } #ifdef ENABLE_SIMDJS else { // SIMD_JS Assert(IRType_IsSimd128(toType)); // Get or create type-spec sym typeSpecSym = varSym->GetSimd128EquivSym(toType, this->func); if (!IsLoopPrePass() && block->globOptData.IsSimd128TypeSpecialized(toType, varSym)) { // Consider: Is this needed ? Shouldn't this have been done at previous FromVar since the simd128 sym is alive ? if (valueInfo) { if (!valueInfo->IsSimd128(toType)) { valueInfo = valueInfo->SpecializeToSimd128(toType, alloc); ChangeValueInfo(block, val, valueInfo); opnd->SetValueType(valueInfo->Type()); } } else { val = NewGenericValue(GetValueTypeFromIRType(toType)); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); opnd->SetValueType(valueInfo->Type()); } } livenessBv = block->globOptData.GetSimd128LivenessBV(toType); isLive = livenessBv->Test(varSym->m_id); if (this->IsLoopPrePass()) { // FromVar Hoisting BVSparse<Memory::JitArenaAllocator> * forceSimd128SymsOnEntry; forceSimd128SymsOnEntry = \ toType == TySimd128F4 ? this->prePassLoop->forceSimd128F4SymsOnEntry : this->prePassLoop->forceSimd128I4SymsOnEntry; if (!isLive) { livenessBv->Set(varSym->m_id); } // Be aggressive with hoisting only if value is always initialized to SIMD type before entering loop. // This reduces the chance that the FromVar gets executed while the specialized instruction in the loop is not. Leading to unnecessary excessive bailouts. if (val && !val->GetValueInfo()->HasBeenUndefined() && !val->GetValueInfo()->HasBeenNull() && this->OptIsInvariant(opnd, block, this->prePassLoop, val, false, true)) { forceSimd128SymsOnEntry->Set(varSym->m_id); } else { Sym *symStore = (valueInfo ? valueInfo->GetSymStore() : NULL); Value * value = symStore ? block->globOptData.FindValue(symStore) : nullptr; if (symStore && symStore != varSym && value && !value->GetValueInfo()->HasBeenUndefined() && !value->GetValueInfo()->HasBeenNull() && this->OptIsInvariant(symStore, block, this->prePassLoop, value, true, true)) { // If symStore is assigned to sym and we want sym to be type-specialized, for symStore to be specialized // outside the loop. forceSimd128SymsOnEntry->Set(symStore->m_id); } } Assert(bailOutKind == IR::BailOutSimd128F4Only || bailOutKind == IR::BailOutSimd128I4Only); // We are in loop prepass, we haven't propagated the value info to the src. Do it now. if (valueInfo) { valueInfo = valueInfo->SpecializeToSimd128(toType, alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(GetValueTypeFromIRType(toType)); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } if (needReplaceSrc) { opnd->SetValueType(valueInfo->Type()); } return instr; } GOPT_TRACE_OPND(regSrc, _u("Converting to Simd128\n")); } #endif bool needLoad = false; if (needReplaceSrc) { bool wasDead = regSrc->GetIsDead(); // needReplaceSrc means we are type specializing a use, and need to replace the src on the instr if (!isLive) { needLoad = true; // ReplaceSrc will delete it. regSrc = regSrc->Copy(instr->m_func)->AsRegOpnd(); } IR::RegOpnd * regNew = IR::RegOpnd::New(typeSpecSym, toType, instr->m_func); if(valueInfo) { regNew->SetValueType(valueInfo->Type()); regNew->m_wasNegativeZeroPreventedByBailout = valueInfo->WasNegativeZeroPreventedByBailout(); } regNew->SetIsDead(wasDead); regNew->SetIsJITOptimizedReg(true); this->CaptureByteCodeSymUses(instr); if (indir == nullptr) { instr->ReplaceSrc(opnd, regNew); } else { indir->ReplaceIndexOpnd(regNew); } opnd = regNew; if (!needLoad) { Assert(isLive); return instr; } } else { // We just need to insert a load of a type spec sym if(isLive) { return instr; } // Insert it before the specified instruction instr = insertBeforeInstr; } IR::RegOpnd *regDst = IR::RegOpnd::New(typeSpecSym, toType, instr->m_func); bool isBailout = false; bool isHoisted = false; bool isInLandingPad = (block->next && !block->next->isDeleted && block->next->isLoopHeader); if (isInLandingPad) { Loop *loop = block->next->loop; Assert(loop && loop->landingPad == block); Assert(loop->bailOutInfo); } if (opcode == Js::OpCode::FromVar) { if (toType == TyInt32) { Assert(valueInfo); if (lossy) { if (!valueInfo->IsPrimitive() && !block->globOptData.IsTypeSpecialized(varSym)) { // Lossy conversions to int32 on non-primitive values may have implicit calls to toString or valueOf, which // may be overridden to have a side effect. The side effect needs to happen every time the conversion is // supposed to happen, so the resulting lossy int32 value cannot be reused. Bail out on implicit calls. Assert(DoLossyIntTypeSpec()); bailOutKind = IR::BailOutOnNotPrimitive; isBailout = true; } } else if (!valueInfo->IsInt()) { // The operand is likely an int (hence the request to convert to int), so bail out if it's not an int. Only // bail out if a lossless conversion to int is requested. Lossy conversions to int such as in (a | 0) don't // need to bail out. if (bailOutKind == IR::BailOutExpectingInteger) { Assert(IsSwitchOptEnabledForIntTypeSpec()); } else { Assert(DoAggressiveIntTypeSpec()); } isBailout = true; } } else if (toType == TyFloat64 && (!valueInfo || !valueInfo->IsNumber())) { // Bailout if converting vars to float if we can't prove they are floats: // x = str + float; -> need to bailout if str is a string // // x = obj * 0.1; // y = obj * 0.2; -> if obj has valueof, we'll only call valueof once on the FromVar conversion... Assert(bailOutKind != IR::BailOutInvalid); isBailout = true; } #ifdef ENABLE_SIMDJS else if (IRType_IsSimd128(toType) && (!valueInfo || !valueInfo->IsSimd128(toType))) { Assert(toType == TySimd128F4 && bailOutKind == IR::BailOutSimd128F4Only || toType == TySimd128I4 && bailOutKind == IR::BailOutSimd128I4Only); isBailout = true; } #endif } if (isBailout) { if (isInLandingPad) { Loop *loop = block->next->loop; this->EnsureBailTarget(loop); instr = loop->bailOutInfo->bailOutInstr; updateBlockLastInstr = false; newInstr = IR::BailOutInstr::New(opcode, bailOutKind, loop->bailOutInfo, instr->m_func); newInstr->SetDst(regDst); newInstr->SetSrc1(regSrc); } else { newInstr = IR::BailOutInstr::New(opcode, regDst, regSrc, bailOutKind, instr, instr->m_func); } } else { newInstr = IR::Instr::New(opcode, regDst, regSrc, instr->m_func); } newInstr->SetByteCodeOffset(instr); instr->InsertBefore(newInstr); if (updateBlockLastInstr) { block->SetLastInstr(newInstr); } regDst->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->AsRegOpnd()->SetIsJITOptimizedReg(true); ValueInfo *const oldValueInfo = valueInfo; if(valueInfo) { newInstr->GetSrc1()->SetValueType(valueInfo->Type()); } if(isBailout) { Assert(opcode == Js::OpCode::FromVar); if(toType == TyInt32) { Assert(valueInfo); if(!lossy) { Assert(bailOutKind == IR::BailOutIntOnly || bailOutKind == IR::BailOutExpectingInteger); valueInfo = valueInfo->SpecializeToInt32(alloc, isPerformingLoopBackEdgeCompensation); ChangeValueInfo(nullptr, val, valueInfo); int32 intConstantValue; if(indir && needReplaceSrc && valueInfo->TryGetIntConstantValue(&intConstantValue)) { // A likely-int value can have constant bounds due to conditional branches narrowing its range. Now that // the sym has been proven to be an int, the likely-int value, after specialization, will be constant. // Replace the index opnd in the indir with an offset. Assert(opnd == indir->GetIndexOpnd()); Assert(indir->GetScale() == 0); indir->UnlinkIndexOpnd()->Free(instr->m_func); opnd = nullptr; indir->SetOffset(intConstantValue); } } } else if (toType == TyFloat64) { if(bailOutKind == IR::BailOutNumberOnly) { if(valueInfo) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(ValueType::Float); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } } } else { Assert(IRType_IsSimd128(toType)); if (valueInfo) { valueInfo = valueInfo->SpecializeToSimd128(toType, alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(GetValueTypeFromIRType(toType)); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } } } if(valueInfo) { newInstr->GetDst()->SetValueType(valueInfo->Type()); if(needReplaceSrc && opnd) { opnd->SetValueType(valueInfo->Type()); } } if (block->loop) { Assert(!this->IsLoopPrePass()); isHoisted = this->TryHoistInvariant(newInstr, block, val, val, nullptr, false, lossy, false, bailOutKind); } if (isBailout) { if (!isHoisted && !isInLandingPad) { if(valueInfo) { // Since this is a pre-op bailout, the old value info should be used for the purposes of bailout. For // instance, the value info could be LikelyInt but with a constant range. Once specialized to int, the value // info would be an int constant. However, the int constant is only guaranteed if the value is actually an // int, which this conversion is verifying, so bailout cannot assume the constant value. if(oldValueInfo) { val->SetValueInfo(oldValueInfo); } else { block->globOptData.ClearSymValue(varSym); } } // Fill in bail out info if the FromVar is a bailout instr, and it wasn't hoisted as invariant. // If it was hoisted, the invariant code will fill out the bailout info with the loop landing pad bailout info. this->FillBailOutInfo(block, newInstr->GetBailOutInfo()); if(valueInfo) { // Restore the new value info after filling the bailout info if(oldValueInfo) { val->SetValueInfo(valueInfo); } else { block->globOptData.SetValue(val, varSym); } } } } // Now that we've captured the liveness in the bailout info, we can mark this as live. // This type specialized sym isn't live if the FromVar bails out. livenessBv->Set(varSym->m_id); if(toType == TyInt32) { if(lossy) { block->globOptData.liveLossyInt32Syms->Set(varSym->m_id); } else { block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } } } else { Assert(valueInfo); if(opnd->IsRegOpnd() && valueInfo->IsInt()) { // If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value // would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as // lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the // lossy state. block->globOptData.liveLossyInt32Syms->Clear(opnd->AsRegOpnd()->m_sym->m_id); if(toType == TyInt32) { lossy = false; } } if (this->IsLoopPrePass()) { if(opnd->IsRegOpnd()) { StackSym *const sym = opnd->AsRegOpnd()->m_sym; if(toType == TyInt32) { Assert(!sym->IsTypeSpec()); block->globOptData.liveInt32Syms->Set(sym->m_id); if(lossy) { block->globOptData.liveLossyInt32Syms->Set(sym->m_id); } else { block->globOptData.liveLossyInt32Syms->Clear(sym->m_id); } } else { Assert(toType == TyFloat64); AnalysisAssert(instr); StackSym *const varSym = sym->IsTypeSpec() ? sym->GetVarEquivSym(instr->m_func) : sym; block->globOptData.liveFloat64Syms->Set(varSym->m_id); } } return instr; } if (!needReplaceSrc) { instr = insertBeforeInstr; } IR::Opnd *constOpnd; int32 intConstantValue; if(valueInfo->TryGetIntConstantValue(&intConstantValue)) { if(toType == TyInt32) { constOpnd = IR::IntConstOpnd::New(intConstantValue, TyInt32, instr->m_func); } else { Assert(toType == TyFloat64); constOpnd = IR::FloatConstOpnd::New(static_cast<FloatConstType>(intConstantValue), TyFloat64, instr->m_func); } } else if(valueInfo->IsFloatConstant()) { const FloatConstType floatValue = valueInfo->AsFloatConstant()->FloatValue(); if(toType == TyInt32) { Assert(lossy); constOpnd = IR::IntConstOpnd::New( Js::JavascriptMath::ToInt32(floatValue), TyInt32, instr->m_func); } else { Assert(toType == TyFloat64); constOpnd = IR::FloatConstOpnd::New(floatValue, TyFloat64, instr->m_func); } } else { Assert(opnd->IsVar()); Assert(opnd->IsAddrOpnd()); AssertMsg(opnd->AsAddrOpnd()->IsVar(), "We only expect to see addr that are var before lower."); // Don't need to capture uses, we are only replacing an addr opnd if(toType == TyInt32) { constOpnd = IR::IntConstOpnd::New(Js::TaggedInt::ToInt32(opnd->AsAddrOpnd()->m_address), TyInt32, instr->m_func); } else { Assert(toType == TyFloat64); constOpnd = IR::FloatConstOpnd::New(Js::TaggedInt::ToDouble(opnd->AsAddrOpnd()->m_address), TyFloat64, instr->m_func); } } if (toType == TyInt32) { if (needReplaceSrc) { CaptureByteCodeSymUses(instr); if(indir) { Assert(opnd == indir->GetIndexOpnd()); Assert(indir->GetScale() == 0); indir->UnlinkIndexOpnd()->Free(instr->m_func); indir->SetOffset(constOpnd->AsIntConstOpnd()->AsInt32()); } else { instr->ReplaceSrc(opnd, constOpnd); } } else { StackSym *varSym = opnd->AsRegOpnd()->m_sym; if(varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(nullptr); Assert(varSym); } if(block->globOptData.liveInt32Syms->TestAndSet(varSym->m_id)) { Assert(!!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id) == lossy); } else { if(lossy) { block->globOptData.liveLossyInt32Syms->Set(varSym->m_id); } StackSym *int32Sym = varSym->GetInt32EquivSym(instr->m_func); IR::RegOpnd *int32Reg = IR::RegOpnd::New(int32Sym, TyInt32, instr->m_func); int32Reg->SetIsJITOptimizedReg(true); newInstr = IR::Instr::New(Js::OpCode::Ld_I4, int32Reg, constOpnd, instr->m_func); newInstr->SetByteCodeOffset(instr); instr->InsertBefore(newInstr); if (updateBlockLastInstr) { block->SetLastInstr(newInstr); } } } } else { StackSym *floatSym; bool newFloatSym = false; StackSym* varSym; if (opnd->IsRegOpnd()) { varSym = opnd->AsRegOpnd()->m_sym; if (varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(nullptr); Assert(varSym); } floatSym = varSym->GetFloat64EquivSym(instr->m_func); } else { varSym = block->globOptData.GetCopyPropSym(nullptr, val); if(!varSym) { // Clear the symstore to ensure it's set below to this new symbol this->SetSymStoreDirect(val->GetValueInfo(), nullptr); varSym = StackSym::New(TyVar, instr->m_func); newFloatSym = true; } floatSym = varSym->GetFloat64EquivSym(instr->m_func); } IR::RegOpnd *floatReg = IR::RegOpnd::New(floatSym, TyFloat64, instr->m_func); floatReg->SetIsJITOptimizedReg(true); // If the value is not live - let's load it. if(!block->globOptData.liveFloat64Syms->TestAndSet(varSym->m_id)) { newInstr = IR::Instr::New(Js::OpCode::LdC_F8_R8, floatReg, constOpnd, instr->m_func); newInstr->SetByteCodeOffset(instr); instr->InsertBefore(newInstr); if (updateBlockLastInstr) { block->SetLastInstr(newInstr); } if(newFloatSym) { block->globOptData.SetValue(val, varSym); } // Src is always invariant, but check if the dst is, and then hoist. if (block->loop && ( (newFloatSym && block->loop->CanHoistInvariants()) || this->OptIsInvariant(floatReg, block, block->loop, val, false, false) )) { Assert(!this->IsLoopPrePass()); this->OptHoistInvariant(newInstr, block, block->loop, val, val, nullptr, false); } } if (needReplaceSrc) { CaptureByteCodeSymUses(instr); instr->ReplaceSrc(opnd, floatReg); } } return instr; } return newInstr; } void GlobOpt::ToVarRegOpnd(IR::RegOpnd *dst, BasicBlock *block) { ToVarStackSym(dst->m_sym, block); } void GlobOpt::ToVarStackSym(StackSym *varSym, BasicBlock *block) { //added another check for sym , in case of asmjs there is mostly no var syms and hence added a new check to see if it is the primary sym Assert(!varSym->IsTypeSpec()); block->globOptData.liveVarSyms->Set(varSym->m_id); block->globOptData.liveInt32Syms->Clear(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); block->globOptData.liveFloat64Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } void GlobOpt::ToInt32Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); if (!this->IsLoopPrePass() && varSym->IsVar()) { StackSym *int32Sym = varSym->GetInt32EquivSym(instr->m_func); // Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly, // since we'll just be hammering the symbol. dst = instr->UnlinkDst()->AsRegOpnd(); dst->m_sym = int32Sym; dst->SetType(TyInt32); instr->SetDst(dst); } block->globOptData.liveInt32Syms->Set(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // The store makes it lossless block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveFloat64Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } void GlobOpt::ToUInt32Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { // We should be calling only for asmjs function Assert(GetIsAsmJSFunc()); StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); block->globOptData.liveInt32Syms->Set(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // The store makes it lossless block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveFloat64Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } void GlobOpt::ToFloat64Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); if (!this->IsLoopPrePass() && varSym->IsVar()) { StackSym *float64Sym = varSym->GetFloat64EquivSym(this->func); // Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly, // since we'll just be hammering the symbol. dst = instr->UnlinkDst()->AsRegOpnd(); dst->m_sym = float64Sym; dst->SetType(TyFloat64); instr->SetDst(dst); } block->globOptData.liveFloat64Syms->Set(varSym->m_id); block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveInt32Syms->Clear(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } #ifdef ENABLE_SIMDJS // SIMD_JS void GlobOpt::ToSimd128Dst(IRType toType, IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); BVSparse<JitArenaAllocator> * livenessBV = block->globOptData.GetSimd128LivenessBV(toType); Assert(livenessBV); if (!this->IsLoopPrePass() && varSym->IsVar()) { StackSym *simd128Sym = varSym->GetSimd128EquivSym(toType, this->func); // Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly, // since we'll just be hammering the symbol. dst = instr->UnlinkDst()->AsRegOpnd(); dst->m_sym = simd128Sym; dst->SetType(toType); instr->SetDst(dst); } block->globOptData.liveFloat64Syms->Clear(varSym->m_id); block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveInt32Syms->Clear(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); livenessBV->Set(varSym->m_id); } #endif static void SetIsConstFlag(StackSym* dstSym, int64 value) { Assert(dstSym); dstSym->SetIsInt64Const(); } static void SetIsConstFlag(StackSym* dstSym, int value) { Assert(dstSym); dstSym->SetIsIntConst(value); } static IR::Opnd* CreateIntConstOpnd(IR::Instr* instr, int64 value) { return (IR::Opnd*)IR::Int64ConstOpnd::New(value, instr->GetDst()->GetType(), instr->m_func); } static IR::Opnd* CreateIntConstOpnd(IR::Instr* instr, int value) { IntConstType constVal; if (instr->GetDst()->IsUnsigned()) { // we should zero extend in case of uint constVal = (uint32)value; } else { constVal = value; } return (IR::Opnd*)IR::IntConstOpnd::New(constVal, instr->GetDst()->GetType(), instr->m_func); } template <typename T> IR::Opnd* GlobOpt::ReplaceWConst(IR::Instr **pInstr, T value, Value **pDstVal) { IR::Instr * &instr = *pInstr; IR::Opnd * constOpnd = CreateIntConstOpnd(instr, value); instr->ReplaceSrc1(constOpnd); instr->FreeSrc2(); this->OptSrc(constOpnd, &instr); IR::Opnd *dst = instr->GetDst(); StackSym *dstSym = dst->AsRegOpnd()->m_sym; if (dstSym->IsSingleDef()) { SetIsConstFlag(dstSym, value); } GOPT_TRACE_INSTR(instr, _u("Constant folding to %d: \n"), value); *pDstVal = GetIntConstantValue(value, instr, dst); return dst; } template <typename T> bool GlobOpt::OptConstFoldBinaryWasm( IR::Instr** pInstr, const Value* src1, const Value* src2, Value **pDstVal) { IR::Instr* &instr = *pInstr; if (!DoConstFold()) { return false; } T src1IntConstantValue, src2IntConstantValue; if (!src1 || !src1->GetValueInfo()->TryGetIntConstantValue(&src1IntConstantValue, false) || //a bit sketchy: false for int32 means likelyInt = false !src2 || !src2->GetValueInfo()->TryGetIntConstantValue(&src2IntConstantValue, false) //and unsigned = false for int64 ) { return false; } int64 tmpValueOut; if (!instr->BinaryCalculatorT<T>(src1IntConstantValue, src2IntConstantValue, &tmpValueOut, func->GetJITFunctionBody()->IsWasmFunction())) { return false; } this->CaptureByteCodeSymUses(instr); IR::Opnd *dst = (instr->GetDst()->IsInt64()) ? //dst can be int32 for int64 comparison operators ReplaceWConst(pInstr, tmpValueOut, pDstVal) : ReplaceWConst(pInstr, (int)tmpValueOut, pDstVal); instr->m_opcode = Js::OpCode::Ld_I4; this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); return true; } bool GlobOpt::OptConstFoldBinary( IR::Instr * *pInstr, const IntConstantBounds &src1IntConstantBounds, const IntConstantBounds &src2IntConstantBounds, Value **pDstVal) { IR::Instr * &instr = *pInstr; int32 value; IR::IntConstOpnd *constOpnd; if (!DoConstFold()) { return false; } int32 src1IntConstantValue = -1; int32 src2IntConstantValue = -1; int32 src1MaxIntConstantValue = -1; int32 src2MaxIntConstantValue = -1; int32 src1MinIntConstantValue = -1; int32 src2MinIntConstantValue = -1; if (instr->IsBranchInstr()) { src1MinIntConstantValue = src1IntConstantBounds.LowerBound(); src1MaxIntConstantValue = src1IntConstantBounds.UpperBound(); src2MinIntConstantValue = src2IntConstantBounds.LowerBound(); src2MaxIntConstantValue = src2IntConstantBounds.UpperBound(); } else if (src1IntConstantBounds.IsConstant() && src2IntConstantBounds.IsConstant()) { src1IntConstantValue = src1IntConstantBounds.LowerBound(); src2IntConstantValue = src2IntConstantBounds.LowerBound(); } else { return false; } IntConstType tmpValueOut; if (!instr->BinaryCalculator(src1IntConstantValue, src2IntConstantValue, &tmpValueOut, TyInt32) || !Math::FitsInDWord(tmpValueOut)) { return false; } value = (int32)tmpValueOut; this->CaptureByteCodeSymUses(instr); constOpnd = IR::IntConstOpnd::New(value, TyInt32, instr->m_func); instr->ReplaceSrc1(constOpnd); instr->FreeSrc2(); this->OptSrc(constOpnd, &instr); IR::Opnd *dst = instr->GetDst(); Assert(dst->IsRegOpnd()); StackSym *dstSym = dst->AsRegOpnd()->m_sym; if (dstSym->IsSingleDef()) { dstSym->SetIsIntConst(value); } GOPT_TRACE_INSTR(instr, _u("Constant folding to %d: \n"), value); *pDstVal = GetIntConstantValue(value, instr, dst); if (IsTypeSpecPhaseOff(this->func)) { instr->m_opcode = Js::OpCode::LdC_A_I4; this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } else { instr->m_opcode = Js::OpCode::Ld_I4; this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); } // If this is an induction variable, then treat it the way the prepass would have if it had seen // the assignment and the resulting change to the value number, and mark it as indeterminate. for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) { InductionVariable *iv = nullptr; if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) { iv->SetChangeIsIndeterminate(); } } return true; } void GlobOpt::OptConstFoldBr(bool test, IR::Instr *instr, Value * src1Val, Value * src2Val) { GOPT_TRACE_INSTR(instr, _u("Constant folding to branch: ")); BasicBlock *deadBlock; if (src1Val) { this->ToInt32(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, false); } if (src2Val) { this->ToInt32(instr, instr->GetSrc2(), this->currentBlock, src2Val, nullptr, false); } this->CaptureByteCodeSymUses(instr); if (test) { instr->m_opcode = Js::OpCode::Br; instr->FreeSrc1(); if(instr->GetSrc2()) { instr->FreeSrc2(); } deadBlock = instr->m_next->AsLabelInstr()->GetBasicBlock(); } else { AssertMsg(instr->m_next->IsLabelInstr(), "Next instr of branch should be a label..."); if(instr->AsBranchInstr()->IsMultiBranch()) { return; } deadBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock(); instr->FreeSrc1(); if(instr->GetSrc2()) { instr->FreeSrc2(); } instr->m_opcode = Js::OpCode::Nop; } // Loop back edge: we would have already decremented data use count for the tail block when we processed the loop header. if (!(this->currentBlock->loop && this->currentBlock->loop->GetHeadBlock() == deadBlock)) { this->currentBlock->DecrementDataUseCount(); } this->currentBlock->RemoveDeadSucc(deadBlock, this->func->m_fg); if (deadBlock->GetPredList()->Count() == 0) { deadBlock->SetDataUseCount(0); } } void GlobOpt::ChangeValueType( BasicBlock *const block, Value *const value, const ValueType newValueType, const bool preserveSubclassInfo, const bool allowIncompatibleType) const { Assert(value); // Why are we trying to change the value type of the type sym value? Asserting here to make sure we don't deep copy the type sym's value info. Assert(!value->GetValueInfo()->IsJsType()); ValueInfo *const valueInfo = value->GetValueInfo(); const ValueType valueType(valueInfo->Type()); if(valueType == newValueType && (preserveSubclassInfo || valueInfo->IsGeneric())) { return; } // ArrayValueInfo has information specific to the array type, so make sure that doesn't change Assert( !preserveSubclassInfo || !valueInfo->IsArrayValueInfo() || newValueType.IsObject() && newValueType.GetObjectType() == valueInfo->GetObjectType()); Assert(!valueInfo->GetSymStore() || !valueInfo->GetSymStore()->IsStackSym() || !valueInfo->GetSymStore()->AsStackSym()->IsFromByteCodeConstantTable()); ValueInfo *const newValueInfo = preserveSubclassInfo ? valueInfo->Copy(alloc) : valueInfo->CopyWithGenericStructureKind(alloc); newValueInfo->Type() = newValueType; ChangeValueInfo(block, value, newValueInfo, allowIncompatibleType); } void GlobOpt::ChangeValueInfo(BasicBlock *const block, Value *const value, ValueInfo *const newValueInfo, const bool allowIncompatibleType, const bool compensated) const { Assert(value); Assert(newValueInfo); // The value type must be changed to something more specific or something more generic. For instance, it would be changed to // something more specific if the current value type is LikelyArray and checks have been done to ensure that it's an array, // and it would be changed to something more generic if a call kills the Array value type and it must be treated as // LikelyArray going forward. // There are cases where we change the type because of different profile information, and because of rejit, these profile information // may conflict. Need to allow incompatible type in those cause. However, the old type should be indefinite. Assert((allowIncompatibleType && !value->GetValueInfo()->IsDefinite()) || AreValueInfosCompatible(newValueInfo, value->GetValueInfo())); // ArrayValueInfo has information specific to the array type, so make sure that doesn't change Assert( !value->GetValueInfo()->IsArrayValueInfo() || !newValueInfo->IsArrayValueInfo() || newValueInfo->GetObjectType() == value->GetValueInfo()->GetObjectType()); if(block) { TrackValueInfoChangeForKills(block, value, newValueInfo, compensated); } value->SetValueInfo(newValueInfo); } bool GlobOpt::AreValueInfosCompatible(const ValueInfo *const v0, const ValueInfo *const v1) const { Assert(v0); Assert(v1); if(v0->IsUninitialized() || v1->IsUninitialized()) { return true; } const bool doAggressiveIntTypeSpec = DoAggressiveIntTypeSpec(); if(doAggressiveIntTypeSpec && (v0->IsInt() || v1->IsInt())) { // Int specialization in some uncommon loop cases involving dependencies, needs to allow specializing values of // arbitrary types, even values that are definitely not int, to compensate for aggressive assumptions made by a loop // prepass return true; } if ((v0->Type()).IsMixedTypedArrayPair(v1->Type()) || (v1->Type()).IsMixedTypedArrayPair(v0->Type())) { return true; } const bool doFloatTypeSpec = DoFloatTypeSpec(); if(doFloatTypeSpec && (v0->IsFloat() || v1->IsFloat())) { // Float specialization allows specializing values of arbitrary types, even values that are definitely not float return true; } #ifdef ENABLE_SIMDJS // SIMD_JS if (SIMD128_TYPE_SPEC_FLAG && v0->Type().IsSimd128()) { // We only type-spec Undefined values, Objects (possibly merged SIMD values), or actual SIMD values. if (v1->Type().IsLikelyUndefined() || v1->Type().IsLikelyNull()) { return true; } if (v1->Type().IsLikelyObject() && v1->Type().GetObjectType() == ObjectType::Object) { return true; } if (v1->Type().IsSimd128()) { return v0->Type().GetObjectType() == v1->Type().GetObjectType(); } } #endif const bool doArrayMissingValueCheckHoist = DoArrayMissingValueCheckHoist(); const bool doNativeArrayTypeSpec = DoNativeArrayTypeSpec(); const auto AreValueTypesCompatible = [=](const ValueType t0, const ValueType t1) { return t0.IsSubsetOf(t1, doAggressiveIntTypeSpec, doFloatTypeSpec, doArrayMissingValueCheckHoist, doNativeArrayTypeSpec) || t1.IsSubsetOf(t0, doAggressiveIntTypeSpec, doFloatTypeSpec, doArrayMissingValueCheckHoist, doNativeArrayTypeSpec); }; const ValueType t0(v0->Type().ToDefinite()), t1(v1->Type().ToDefinite()); if(t0.IsLikelyObject() && t1.IsLikelyObject()) { // Check compatibility for the primitive portions and the object portions of the value types separately if(AreValueTypesCompatible(t0.ToDefiniteObject(), t1.ToDefiniteObject()) && ( !t0.HasBeenPrimitive() || !t1.HasBeenPrimitive() || AreValueTypesCompatible(t0.ToDefinitePrimitiveSubset(), t1.ToDefinitePrimitiveSubset()) )) { return true; } } else if(AreValueTypesCompatible(t0, t1)) { return true; } const FloatConstantValueInfo *floatConstantValueInfo; const ValueInfo *likelyIntValueinfo; if(v0->IsFloatConstant() && v1->IsLikelyInt()) { floatConstantValueInfo = v0->AsFloatConstant(); likelyIntValueinfo = v1; } else if(v0->IsLikelyInt() && v1->IsFloatConstant()) { floatConstantValueInfo = v1->AsFloatConstant(); likelyIntValueinfo = v0; } else { return false; } // A float constant value with a value that is actually an int is a subset of a likely-int value. // Ideally, we should create an int constant value for this up front, such that IsInt() also returns true. There // were other issues with that, should see if that can be done. int32 int32Value; return Js::JavascriptNumber::TryGetInt32Value(floatConstantValueInfo->FloatValue(), &int32Value) && (!likelyIntValueinfo->IsLikelyTaggedInt() || !Js::TaggedInt::IsOverflow(int32Value)); } #if DBG void GlobOpt::VerifyArrayValueInfoForTracking( const ValueInfo *const valueInfo, const bool isJsArray, const BasicBlock *const block, const bool ignoreKnownImplicitCalls) const { Assert(valueInfo); Assert(valueInfo->IsAnyOptimizedArray()); Assert(isJsArray == valueInfo->IsArrayOrObjectWithArray()); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); Assert(block); Loop *implicitCallsLoop; if(block->next && !block->next->isDeleted && block->next->isLoopHeader) { // Since a loop's landing pad does not have user code, determine whether disabling implicit calls is allowed in the // landing pad based on the loop for which this block is the landing pad. implicitCallsLoop = block->next->loop; Assert(implicitCallsLoop); Assert(implicitCallsLoop->landingPad == block); } else { implicitCallsLoop = block->loop; } Assert( !isJsArray || DoArrayCheckHoist(valueInfo->Type(), implicitCallsLoop) || ( ignoreKnownImplicitCalls && !(implicitCallsLoop ? ImplicitCallFlagsAllowOpts(implicitCallsLoop) : ImplicitCallFlagsAllowOpts(func)) )); Assert(!(isJsArray && valueInfo->HasNoMissingValues() && !DoArrayMissingValueCheckHoist())); Assert( !( valueInfo->IsArrayValueInfo() && ( valueInfo->AsArrayValueInfo()->HeadSegmentSym() || valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() ) && !DoArraySegmentHoist(valueInfo->Type()) )); #if 0 // We can't assert here that there is only a head segment length sym if hoisting is allowed in the current block, // because we may have propagated the sym forward out of a loop, and hoisting may be allowed inside but not // outside the loop. Assert( isJsArray || !valueInfo->IsArrayValueInfo() || !valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() || DoTypedArraySegmentLengthHoist(implicitCallsLoop) || ignoreKnownImplicitCalls || (implicitCallsLoop ? ImplicitCallFlagsAllowOpts(implicitCallsLoop) : ImplicitCallFlagsAllowOpts(func)) ); #endif Assert( !( isJsArray && valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->LengthSym() && !DoArrayLengthHoist() )); } #endif void GlobOpt::TrackNewValueForKills(Value *const value) { Assert(value); if(!value->GetValueInfo()->IsAnyOptimizedArray()) { return; } DoTrackNewValueForKills(value); } void GlobOpt::DoTrackNewValueForKills(Value *const value) { Assert(value); ValueInfo *const valueInfo = value->GetValueInfo(); Assert(valueInfo->IsAnyOptimizedArray()); Assert(!valueInfo->IsArrayValueInfo()); // The value and value info here are new, so it's okay to modify the value info in-place Assert(!valueInfo->GetSymStore()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); Loop *implicitCallsLoop; if(currentBlock->next && !currentBlock->next->isDeleted && currentBlock->next->isLoopHeader) { // Since a loop's landing pad does not have user code, determine whether disabling implicit calls is allowed in the // landing pad based on the loop for which this block is the landing pad. implicitCallsLoop = currentBlock->next->loop; Assert(implicitCallsLoop); Assert(implicitCallsLoop->landingPad == currentBlock); } else { implicitCallsLoop = currentBlock->loop; } if(isJsArray) { if(!DoArrayCheckHoist(valueInfo->Type(), implicitCallsLoop)) { // Array opts are disabled for this value type, so treat it as an indefinite value type going forward valueInfo->Type() = valueInfo->Type().ToLikely(); return; } if(valueInfo->HasNoMissingValues() && !DoArrayMissingValueCheckHoist()) { valueInfo->Type() = valueInfo->Type().SetHasNoMissingValues(false); } } #if DBG VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock); #endif if(!isJsArray) { return; } // Can't assume going forward that it will definitely be an array without disabling implicit calls, because the // array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can // treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can // revert the value type to a likely version. CurrentBlockData()->valuesToKillOnCalls->Add(value); } void GlobOpt::TrackCopiedValueForKills(Value *const value) { Assert(value); if(!value->GetValueInfo()->IsAnyOptimizedArray()) { return; } DoTrackCopiedValueForKills(value); } void GlobOpt::DoTrackCopiedValueForKills(Value *const value) { Assert(value); ValueInfo *const valueInfo = value->GetValueInfo(); Assert(valueInfo->IsAnyOptimizedArray()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); #if DBG VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock); #endif if(!isJsArray && !(valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym())) { return; } // Can't assume going forward that it will definitely be an array without disabling implicit calls, because the // array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can // treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can // revert the value type to a likely version. CurrentBlockData()->valuesToKillOnCalls->Add(value); } void GlobOpt::TrackMergedValueForKills( Value *const value, GlobOptBlockData *const blockData, BVSparse<JitArenaAllocator> *const mergedValueTypesTrackedForKills) const { Assert(value); if(!value->GetValueInfo()->IsAnyOptimizedArray()) { return; } DoTrackMergedValueForKills(value, blockData, mergedValueTypesTrackedForKills); } void GlobOpt::DoTrackMergedValueForKills( Value *const value, GlobOptBlockData *const blockData, BVSparse<JitArenaAllocator> *const mergedValueTypesTrackedForKills) const { Assert(value); Assert(blockData); ValueInfo *valueInfo = value->GetValueInfo(); Assert(valueInfo->IsAnyOptimizedArray()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); #if DBG VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock, true); #endif if(!isJsArray && !(valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym())) { return; } // Can't assume going forward that it will definitely be an array without disabling implicit calls, because the // array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can // treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can // revert the value type to a likely version. if(!mergedValueTypesTrackedForKills || !mergedValueTypesTrackedForKills->TestAndSet(value->GetValueNumber())) { blockData->valuesToKillOnCalls->Add(value); } } void GlobOpt::TrackValueInfoChangeForKills(BasicBlock *const block, Value *const value, ValueInfo *const newValueInfo, const bool compensated) const { Assert(block); Assert(value); Assert(newValueInfo); ValueInfo *const oldValueInfo = value->GetValueInfo(); #if DBG if(oldValueInfo->IsAnyOptimizedArray()) { VerifyArrayValueInfoForTracking(oldValueInfo, oldValueInfo->IsArrayOrObjectWithArray(), block, compensated); } #endif const bool trackOldValueInfo = oldValueInfo->IsArrayOrObjectWithArray() || ( oldValueInfo->IsOptimizedTypedArray() && oldValueInfo->IsArrayValueInfo() && oldValueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() ); Assert(trackOldValueInfo == block->globOptData.valuesToKillOnCalls->ContainsKey(value)); #if DBG if(newValueInfo->IsAnyOptimizedArray()) { VerifyArrayValueInfoForTracking(newValueInfo, newValueInfo->IsArrayOrObjectWithArray(), block, compensated); } #endif const bool trackNewValueInfo = newValueInfo->IsArrayOrObjectWithArray() || ( newValueInfo->IsOptimizedTypedArray() && newValueInfo->IsArrayValueInfo() && newValueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() ); if(trackOldValueInfo == trackNewValueInfo) { return; } if(trackNewValueInfo) { block->globOptData.valuesToKillOnCalls->Add(value); } else { block->globOptData.valuesToKillOnCalls->Remove(value); } } void GlobOpt::ProcessValueKills(IR::Instr *const instr) { Assert(instr); ValueSet *const valuesToKillOnCalls = CurrentBlockData()->valuesToKillOnCalls; if(!IsLoopPrePass() && valuesToKillOnCalls->Count() == 0) { return; } const JsArrayKills kills = CheckJsArrayKills(instr); Assert(!kills.KillsArrayHeadSegments() || kills.KillsArrayHeadSegmentLengths()); if(IsLoopPrePass()) { rootLoopPrePass->jsArrayKills = rootLoopPrePass->jsArrayKills.Merge(kills); Assert( !rootLoopPrePass->parent || rootLoopPrePass->jsArrayKills.AreSubsetOf(rootLoopPrePass->parent->jsArrayKills)); if(kills.KillsAllArrays()) { rootLoopPrePass->needImplicitCallBailoutChecksForJsArrayCheckHoist = false; } if(valuesToKillOnCalls->Count() == 0) { return; } } if(kills.KillsAllArrays()) { Assert(kills.KillsTypedArrayHeadSegmentLengths()); // - Calls need to kill the value types of values in the following list. For instance, calls can transform a JS array // into an ES5 array, so any definitely-array value types need to be killed. Also, VirtualTypeArrays do not have // bounds checks; this can be problematic if the array is detached, so check to ensure that it is a virtual array. // Update the value types to likley to ensure a bailout that asserts Array type is generated. // - Calls also need to kill typed array head segment lengths. A typed array's array buffer may be transferred to a web // worker, in which case the typed array's length is set to zero. for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if (valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedVirtualTypedArray()) { ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); continue; } ChangeValueInfo( nullptr, value, valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true)); } valuesToKillOnCalls->Clear(); return; } if(kills.KillsArraysWithNoMissingValues()) { // Some operations may kill arrays with no missing values in unlikely circumstances. Convert their value types to likely // versions so that the checks have to be redone. for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(!valueInfo->IsArrayOrObjectWithArray() || !valueInfo->HasNoMissingValues()) { continue; } ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); it.RemoveCurrent(); } } if(kills.KillsNativeArrays()) { // Some operations may kill native arrays in (what should be) unlikely circumstances. Convert their value types to // likely versions so that the checks have to be redone. for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(!valueInfo->IsArrayOrObjectWithArray() || valueInfo->HasVarElements()) { continue; } ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); it.RemoveCurrent(); } } const bool likelyKillsJsArraysWithNoMissingValues = IsOperationThatLikelyKillsJsArraysWithNoMissingValues(instr); if(!kills.KillsArrayHeadSegmentLengths()) { Assert(!kills.KillsArrayHeadSegments()); if(!likelyKillsJsArraysWithNoMissingValues && !kills.KillsArrayLengths()) { return; } } for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(!valueInfo->IsArrayOrObjectWithArray()) { continue; } if(likelyKillsJsArraysWithNoMissingValues && valueInfo->HasNoMissingValues()) { ChangeValueType(nullptr, value, valueInfo->Type().SetHasNoMissingValues(false), true); valueInfo = value->GetValueInfo(); } if(!valueInfo->IsArrayValueInfo()) { continue; } ArrayValueInfo *const arrayValueInfo = valueInfo->AsArrayValueInfo(); const bool removeHeadSegment = kills.KillsArrayHeadSegments() && arrayValueInfo->HeadSegmentSym(); const bool removeHeadSegmentLength = kills.KillsArrayHeadSegmentLengths() && arrayValueInfo->HeadSegmentLengthSym(); const bool removeLength = kills.KillsArrayLengths() && arrayValueInfo->LengthSym(); if(removeHeadSegment || removeHeadSegmentLength || removeLength) { ChangeValueInfo( nullptr, value, arrayValueInfo->Copy(alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength)); valueInfo = value->GetValueInfo(); } } } void GlobOpt::ProcessValueKills(BasicBlock *const block, GlobOptBlockData *const blockData) { Assert(block); Assert(blockData); ValueSet *const valuesToKillOnCalls = blockData->valuesToKillOnCalls; if(!IsLoopPrePass() && valuesToKillOnCalls->Count() == 0) { return; } // If the current block or loop has implicit calls, kill all definitely-array value types, as using that info will cause // implicit calls to be disabled, resulting in unnecessary bailouts const bool killValuesOnImplicitCalls = (block->loop ? !this->ImplicitCallFlagsAllowOpts(block->loop) : !this->ImplicitCallFlagsAllowOpts(func)); if (!killValuesOnImplicitCalls) { return; } if(IsLoopPrePass() && block->loop == rootLoopPrePass) { AnalysisAssert(rootLoopPrePass); for (Loop * loop = rootLoopPrePass; loop != nullptr; loop = loop->parent) { loop->jsArrayKills.SetKillsAllArrays(); } Assert(!rootLoopPrePass->parent || rootLoopPrePass->jsArrayKills.AreSubsetOf(rootLoopPrePass->parent->jsArrayKills)); if(valuesToKillOnCalls->Count() == 0) { return; } } for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(valueInfo->IsArrayOrObjectWithArray()) { ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); continue; } ChangeValueInfo( nullptr, value, valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true)); } valuesToKillOnCalls->Clear(); } void GlobOpt::ProcessValueKillsForLoopHeaderAfterBackEdgeMerge(BasicBlock *const block, GlobOptBlockData *const blockData) { Assert(block); Assert(block->isLoopHeader); Assert(blockData); ValueSet *const valuesToKillOnCalls = blockData->valuesToKillOnCalls; if(valuesToKillOnCalls->Count() == 0) { return; } const JsArrayKills loopKills(block->loop->jsArrayKills); for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); if(isJsArray ? loopKills.KillsValueType(valueInfo->Type()) : loopKills.KillsTypedArrayHeadSegmentLengths()) { // Hoisting array checks and other related things for this type is disabled for the loop due to the kill, as // compensation code is currently not added on back-edges. When merging values from a back-edge, the array value // type cannot be definite, as that may require adding compensation code on the back-edge if the optimization pass // chooses to not optimize the array. if(isJsArray) { ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); } else { ChangeValueInfo( nullptr, value, valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true)); } it.RemoveCurrent(); continue; } if(!isJsArray || !valueInfo->IsArrayValueInfo()) { continue; } // Similarly, if the loop contains an operation that kills JS array segments, don't make the segment or other related // syms available initially inside the loop ArrayValueInfo *const arrayValueInfo = valueInfo->AsArrayValueInfo(); const bool removeHeadSegment = loopKills.KillsArrayHeadSegments() && arrayValueInfo->HeadSegmentSym(); const bool removeHeadSegmentLength = loopKills.KillsArrayHeadSegmentLengths() && arrayValueInfo->HeadSegmentLengthSym(); const bool removeLength = loopKills.KillsArrayLengths() && arrayValueInfo->LengthSym(); if(removeHeadSegment || removeHeadSegmentLength || removeLength) { ChangeValueInfo( nullptr, value, arrayValueInfo->Copy(alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength)); valueInfo = value->GetValueInfo(); } } } bool GlobOpt::NeedBailOnImplicitCallForLiveValues(BasicBlock const * const block, const bool isForwardPass) const { if(isForwardPass) { return block->globOptData.valuesToKillOnCalls->Count() != 0; } if(block->noImplicitCallUses->IsEmpty()) { Assert(block->noImplicitCallNoMissingValuesUses->IsEmpty()); Assert(block->noImplicitCallNativeArrayUses->IsEmpty()); Assert(block->noImplicitCallJsArrayHeadSegmentSymUses->IsEmpty()); Assert(block->noImplicitCallArrayLengthSymUses->IsEmpty()); return false; } return true; } IR::Instr* GlobOpt::CreateBoundsCheckInstr(IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset, Func* func) { IR::Instr* instr = IR::Instr::New(Js::OpCode::BoundCheck, func); return AttachBoundsCheckData(instr, lowerBound, upperBound, offset); } IR::Instr* GlobOpt::CreateBoundsCheckInstr(IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset, IR::BailOutKind bailoutkind, BailOutInfo* bailoutInfo, Func * func) { IR::Instr* instr = IR::BailOutInstr::New(Js::OpCode::BoundCheck, bailoutkind, bailoutInfo, func); return AttachBoundsCheckData(instr, lowerBound, upperBound, offset); } IR::Instr* GlobOpt::AttachBoundsCheckData(IR::Instr* instr, IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset) { instr->SetSrc1(lowerBound); instr->SetSrc2(upperBound); if (offset != 0) { instr->SetDst(IR::IntConstOpnd::New(offset, TyInt32, instr->m_func)); } return instr; } void GlobOpt::OptArraySrc(IR::Instr * *const instrRef) { Assert(instrRef); IR::Instr *&instr = *instrRef; Assert(instr); IR::Instr *baseOwnerInstr; IR::IndirOpnd *baseOwnerIndir; IR::RegOpnd *baseOpnd; bool isProfilableLdElem, isProfilableStElem; bool isLoad, isStore; bool needsHeadSegment, needsHeadSegmentLength, needsLength, needsBoundChecks; switch(instr->m_opcode) { // SIMD_JS case Js::OpCode::Simd128_LdArr_F4: case Js::OpCode::Simd128_LdArr_I4: // no type-spec for Asm.js if (this->GetIsAsmJSFunc()) { return; } // fall through case Js::OpCode::LdElemI_A: case Js::OpCode::LdMethodElem: if(!instr->GetSrc1()->IsIndirOpnd()) { return; } baseOwnerInstr = nullptr; baseOwnerIndir = instr->GetSrc1()->AsIndirOpnd(); baseOpnd = baseOwnerIndir->GetBaseOpnd(); isProfilableLdElem = instr->m_opcode == Js::OpCode::LdElemI_A; // LdMethodElem is currently not profiled isProfilableLdElem |= Js::IsSimd128Load(instr->m_opcode); needsBoundChecks = needsHeadSegmentLength = needsHeadSegment = isLoad = true; needsLength = isStore = isProfilableStElem = false; break; // SIMD_JS case Js::OpCode::Simd128_StArr_F4: case Js::OpCode::Simd128_StArr_I4: if (this->GetIsAsmJSFunc()) { return; } // fall through case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: if(!instr->GetDst()->IsIndirOpnd()) { return; } baseOwnerInstr = nullptr; baseOwnerIndir = instr->GetDst()->AsIndirOpnd(); baseOpnd = baseOwnerIndir->GetBaseOpnd(); needsBoundChecks = isProfilableStElem = instr->m_opcode != Js::OpCode::StElemC; isProfilableStElem |= Js::IsSimd128Store(instr->m_opcode); needsHeadSegmentLength = needsHeadSegment = isStore = true; needsLength = isLoad = isProfilableLdElem = false; break; case Js::OpCode::InlineArrayPush: case Js::OpCode::InlineArrayPop: { baseOwnerInstr = instr; baseOwnerIndir = nullptr; IR::Opnd * thisOpnd = instr->GetSrc1(); // Return if it not a LikelyArray or Object with Array - No point in doing array check elimination. if(!thisOpnd->IsRegOpnd() || !thisOpnd->GetValueType().IsLikelyArrayOrObjectWithArray()) { return; } baseOpnd = thisOpnd->AsRegOpnd(); isLoad = instr->m_opcode == Js::OpCode::InlineArrayPop; isStore = instr->m_opcode == Js::OpCode::InlineArrayPush; needsLength = needsHeadSegmentLength = needsHeadSegment = true; needsBoundChecks = isProfilableLdElem = isProfilableStElem = false; break; } case Js::OpCode::LdLen_A: if(!instr->GetSrc1()->IsRegOpnd()) { return; } baseOwnerInstr = instr; baseOwnerIndir = nullptr; baseOpnd = instr->GetSrc1()->AsRegOpnd(); if(baseOpnd->GetValueType().IsLikelyObject() && baseOpnd->GetValueType().GetObjectType() == ObjectType::ObjectWithArray) { return; } needsLength = true; needsBoundChecks = needsHeadSegmentLength = needsHeadSegment = isStore = isLoad = isProfilableStElem = isProfilableLdElem = false; break; default: return; } Assert(!(baseOwnerInstr && baseOwnerIndir)); Assert(!needsHeadSegmentLength || needsHeadSegment); if(baseOwnerIndir && !IsLoopPrePass()) { // Since this happens before type specialization, make sure that any necessary conversions are done, and that the index // is int-specialized if possible such that the const flags are correct. ToVarUses(instr, baseOwnerIndir, baseOwnerIndir == instr->GetDst(), nullptr); } if(isProfilableStElem && !IsLoopPrePass()) { // If the dead-store pass decides to add the bailout kind IR::BailOutInvalidatedArrayHeadSegment, and the fast path is // generated, it may bail out before the operation is done, so this would need to be a pre-op bailout. if(instr->HasBailOutInfo()) { Assert( instr->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset && instr->GetBailOutInfo()->bailOutOffset <= instr->GetByteCodeOffset()); const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); Assert( !(bailOutKind & ~IR::BailOutKindBits) || (bailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp); if(!(bailOutKind & ~IR::BailOutKindBits)) { instr->SetBailOutKind(bailOutKind + IR::BailOutOnImplicitCallsPreOp); } } else { GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp); } } Value *const baseValue = CurrentBlockData()->FindValue(baseOpnd->m_sym); if(!baseValue) { return; } ValueInfo *baseValueInfo = baseValue->GetValueInfo(); ValueType baseValueType(baseValueInfo->Type()); baseOpnd->SetValueType(baseValueType); if(!baseValueType.IsLikelyAnyOptimizedArray() || !DoArrayCheckHoist(baseValueType, currentBlock->loop, instr) || (baseOwnerIndir && !ShouldExpectConventionalArrayIndexValue(baseOwnerIndir))) { return; } const bool isLikelyJsArray = !baseValueType.IsLikelyTypedArray(); Assert(isLikelyJsArray == baseValueType.IsLikelyArrayOrObjectWithArray()); Assert(!isLikelyJsArray == baseValueType.IsLikelyOptimizedTypedArray()); if(!isLikelyJsArray && instr->m_opcode == Js::OpCode::LdMethodElem) { // Fast path is not generated in this case since the subsequent call will throw return; } ValueType newBaseValueType(baseValueType.ToDefiniteObject()); if(isLikelyJsArray && newBaseValueType.HasNoMissingValues() && !DoArrayMissingValueCheckHoist()) { newBaseValueType = newBaseValueType.SetHasNoMissingValues(false); } Assert((newBaseValueType == baseValueType) == baseValueType.IsObject()); ArrayValueInfo *baseArrayValueInfo = nullptr; const auto UpdateValue = [&](StackSym *newHeadSegmentSym, StackSym *newHeadSegmentLengthSym, StackSym *newLengthSym) { Assert(baseValueType.GetObjectType() == newBaseValueType.GetObjectType()); Assert(newBaseValueType.IsObject()); Assert(baseValueType.IsLikelyArray() || !newLengthSym); if(!(newHeadSegmentSym || newHeadSegmentLengthSym || newLengthSym)) { // We're not adding new information to the value other than changing the value type. Preserve any existing // information and just change the value type. ChangeValueType(currentBlock, baseValue, newBaseValueType, true); return; } // Merge the new syms into the value while preserving any existing information, and change the value type if(baseArrayValueInfo) { if(!newHeadSegmentSym) { newHeadSegmentSym = baseArrayValueInfo->HeadSegmentSym(); } if(!newHeadSegmentLengthSym) { newHeadSegmentLengthSym = baseArrayValueInfo->HeadSegmentLengthSym(); } if(!newLengthSym) { newLengthSym = baseArrayValueInfo->LengthSym(); } Assert( !baseArrayValueInfo->HeadSegmentSym() || newHeadSegmentSym == baseArrayValueInfo->HeadSegmentSym()); Assert( !baseArrayValueInfo->HeadSegmentLengthSym() || newHeadSegmentLengthSym == baseArrayValueInfo->HeadSegmentLengthSym()); Assert(!baseArrayValueInfo->LengthSym() || newLengthSym == baseArrayValueInfo->LengthSym()); } ArrayValueInfo *const newBaseArrayValueInfo = ArrayValueInfo::New( alloc, newBaseValueType, newHeadSegmentSym, newHeadSegmentLengthSym, newLengthSym, baseValueInfo->GetSymStore()); ChangeValueInfo(currentBlock, baseValue, newBaseArrayValueInfo); }; if(IsLoopPrePass()) { if(newBaseValueType != baseValueType) { UpdateValue(nullptr, nullptr, nullptr); } // For javascript arrays and objects with javascript arrays: // - Implicit calls need to be disabled and calls cannot be allowed in the loop since the array vtable may be changed // into an ES5 array. // For typed arrays: // - A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the // typed array's length is set to zero. Implicit calls need to be disabled if the typed array's head segment length // is going to be loaded and used later. // Since we don't know if the loop has kills after this instruction, the kill information may not be complete. If a kill // is found later, this information will be updated to not require disabling implicit calls. if(!( isLikelyJsArray ? rootLoopPrePass->jsArrayKills.KillsValueType(newBaseValueType) : rootLoopPrePass->jsArrayKills.KillsTypedArrayHeadSegmentLengths() )) { rootLoopPrePass->needImplicitCallBailoutChecksForJsArrayCheckHoist = true; } return; } if(baseValueInfo->IsArrayValueInfo()) { baseArrayValueInfo = baseValueInfo->AsArrayValueInfo(); } const bool doArrayChecks = !baseValueType.IsObject(); const bool doArraySegmentHoist = DoArraySegmentHoist(baseValueType) && instr->m_opcode != Js::OpCode::StElemC; const bool headSegmentIsAvailable = baseArrayValueInfo && baseArrayValueInfo->HeadSegmentSym(); const bool doHeadSegmentLoad = doArraySegmentHoist && needsHeadSegment && !headSegmentIsAvailable; const bool doArraySegmentLengthHoist = doArraySegmentHoist && (isLikelyJsArray || DoTypedArraySegmentLengthHoist(currentBlock->loop)); const bool headSegmentLengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->HeadSegmentLengthSym(); const bool doHeadSegmentLengthLoad = doArraySegmentLengthHoist && (needsHeadSegmentLength || (!isLikelyJsArray && needsLength)) && !headSegmentLengthIsAvailable; const bool lengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->LengthSym(); const bool doLengthLoad = DoArrayLengthHoist() && needsLength && !lengthIsAvailable && baseValueType.IsLikelyArray() && DoLdLenIntSpec(instr->m_opcode == Js::OpCode::LdLen_A ? instr : nullptr, baseValueType); StackSym *const newHeadSegmentSym = doHeadSegmentLoad ? StackSym::New(TyMachPtr, instr->m_func) : nullptr; StackSym *const newHeadSegmentLengthSym = doHeadSegmentLengthLoad ? StackSym::New(TyUint32, instr->m_func) : nullptr; StackSym *const newLengthSym = doLengthLoad ? StackSym::New(TyUint32, instr->m_func) : nullptr; bool canBailOutOnArrayAccessHelperCall; if (Js::IsSimd128LoadStore(instr->m_opcode)) { // SIMD_JS // simd load/store never call helper canBailOutOnArrayAccessHelperCall = true; } else { canBailOutOnArrayAccessHelperCall = (isProfilableLdElem || isProfilableStElem) && DoEliminateArrayAccessHelperCall() && !( instr->IsProfiledInstr() && ( isProfilableLdElem ? instr->AsProfiledInstr()->u.ldElemInfo->LikelyNeedsHelperCall() : instr->AsProfiledInstr()->u.stElemInfo->LikelyNeedsHelperCall() ) ); } bool doExtractBoundChecks = false, eliminatedLowerBoundCheck = false, eliminatedUpperBoundCheck = false; StackSym *indexVarSym = nullptr; Value *indexValue = nullptr; IntConstantBounds indexConstantBounds; Value *headSegmentLengthValue = nullptr; IntConstantBounds headSegmentLengthConstantBounds; #if ENABLE_FAST_ARRAYBUFFER if (baseValueType.IsLikelyOptimizedVirtualTypedArray() && !Js::IsSimd128LoadStore(instr->m_opcode) /*Always extract bounds for SIMD */) { if (isProfilableStElem || !instr->IsDstNotAlwaysConvertedToInt32() || ( (baseValueType.GetObjectType() == ObjectType::Float32VirtualArray || baseValueType.GetObjectType() == ObjectType::Float64VirtualArray) && !instr->IsDstNotAlwaysConvertedToNumber() ) ) { // Unless we're in asm.js (where it is guaranteed that virtual typed array accesses cannot read/write beyond 4GB), // check the range of the index to make sure we won't access beyond the reserved memory beforing eliminating bounds // checks in jitted code. if (!GetIsAsmJSFunc() && baseOwnerIndir) { IR::RegOpnd * idxOpnd = baseOwnerIndir->GetIndexOpnd(); if (idxOpnd) { StackSym * idxSym = idxOpnd->m_sym->IsTypeSpec() ? idxOpnd->m_sym->GetVarEquivSym(nullptr) : idxOpnd->m_sym; Value * idxValue = CurrentBlockData()->FindValue(idxSym); IntConstantBounds idxConstantBounds; if (idxValue && idxValue->GetValueInfo()->TryGetIntConstantBounds(&idxConstantBounds)) { BYTE indirScale = Lowerer::GetArrayIndirScale(baseValueType); int32 upperBound = idxConstantBounds.UpperBound(); int32 lowerBound = idxConstantBounds.LowerBound(); if (lowerBound >= 0 && ((static_cast<uint64>(upperBound) << indirScale) < MAX_ASMJS_ARRAYBUFFER_LENGTH)) { eliminatedLowerBoundCheck = true; eliminatedUpperBoundCheck = true; canBailOutOnArrayAccessHelperCall = false; } } } } else { if (!baseOwnerIndir) { Assert(instr->m_opcode == Js::OpCode::InlineArrayPush || instr->m_opcode == Js::OpCode::InlineArrayPop || instr->m_opcode == Js::OpCode::LdLen_A); } eliminatedLowerBoundCheck = true; eliminatedUpperBoundCheck = true; canBailOutOnArrayAccessHelperCall = false; } } } #endif if(needsBoundChecks && DoBoundCheckElimination()) { AnalysisAssert(baseOwnerIndir); Assert(needsHeadSegmentLength); // Bound checks can be separated from the instruction only if it can bail out instead of making a helper call when a // bound check fails. And only if it would bail out, can we use a bound check to eliminate redundant bound checks later // on that path. doExtractBoundChecks = (headSegmentLengthIsAvailable || doHeadSegmentLengthLoad) && canBailOutOnArrayAccessHelperCall; do { // Get the index value IR::RegOpnd *const indexOpnd = baseOwnerIndir->GetIndexOpnd(); if(indexOpnd) { StackSym *const indexSym = indexOpnd->m_sym; if(indexSym->IsTypeSpec()) { Assert(indexSym->IsInt32()); indexVarSym = indexSym->GetVarEquivSym(nullptr); Assert(indexVarSym); indexValue = CurrentBlockData()->FindValue(indexVarSym); Assert(indexValue); AssertVerify(indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds)); Assert(indexOpnd->GetType() == TyInt32 || indexOpnd->GetType() == TyUint32); Assert( (indexOpnd->GetType() == TyUint32) == ValueInfo::IsGreaterThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)); if(indexOpnd->GetType() == TyUint32) { eliminatedLowerBoundCheck = true; } } else { doExtractBoundChecks = false; // Bound check instruction operates only on int-specialized operands indexValue = CurrentBlockData()->FindValue(indexSym); if(!indexValue || !indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds)) { break; } if(ValueInfo::IsGreaterThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)) { eliminatedLowerBoundCheck = true; } } if(!eliminatedLowerBoundCheck && ValueInfo::IsLessThan( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)) { eliminatedUpperBoundCheck = true; doExtractBoundChecks = false; break; } } else { const int32 indexConstantValue = baseOwnerIndir->GetOffset(); if(indexConstantValue < 0) { eliminatedUpperBoundCheck = true; doExtractBoundChecks = false; break; } if(indexConstantValue == INT32_MAX) { eliminatedLowerBoundCheck = true; doExtractBoundChecks = false; break; } indexConstantBounds = IntConstantBounds(indexConstantValue, indexConstantValue); eliminatedLowerBoundCheck = true; } if(!headSegmentLengthIsAvailable) { break; } headSegmentLengthValue = CurrentBlockData()->FindValue(baseArrayValueInfo->HeadSegmentLengthSym()); if(!headSegmentLengthValue) { if(doExtractBoundChecks) { headSegmentLengthConstantBounds = IntConstantBounds(0, Js::SparseArraySegmentBase::MaxLength); } break; } AssertVerify(headSegmentLengthValue->GetValueInfo()->TryGetIntConstantBounds(&headSegmentLengthConstantBounds)); if (ValueInfo::IsLessThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), headSegmentLengthValue, headSegmentLengthConstantBounds.LowerBound(), headSegmentLengthConstantBounds.UpperBound(), GetBoundCheckOffsetForSimd(newBaseValueType, instr, -1) )) { eliminatedUpperBoundCheck = true; if(eliminatedLowerBoundCheck) { doExtractBoundChecks = false; } } } while(false); } if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad || doExtractBoundChecks) { // Find the loops out of which array checks and head segment loads need to be hoisted Loop *hoistChecksOutOfLoop = nullptr; Loop *hoistHeadSegmentLoadOutOfLoop = nullptr; Loop *hoistHeadSegmentLengthLoadOutOfLoop = nullptr; Loop *hoistLengthLoadOutOfLoop = nullptr; if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad) { for(Loop *loop = currentBlock->loop; loop; loop = loop->parent) { const JsArrayKills loopKills(loop->jsArrayKills); Value *baseValueInLoopLandingPad = nullptr; if((isLikelyJsArray && loopKills.KillsValueType(newBaseValueType)) || !OptIsInvariant(baseOpnd->m_sym, currentBlock, loop, baseValue, true, true, &baseValueInLoopLandingPad) || !(doArrayChecks || baseValueInLoopLandingPad->GetValueInfo()->IsObject())) { break; } // The value types should be the same, except: // - The value type in the landing pad is a type that can merge to a specific object type. Typically, these // cases will use BailOnNoProfile, but that can be disabled due to excessive bailouts. Those value types // merge aggressively to the other side's object type, so the value type may have started off as // Uninitialized, [Likely]Undefined|Null, [Likely]UninitializedObject, etc., and changed in the loop to an // array type during a prepass. // - StElems in the loop can kill the no-missing-values info. // - The native array type may be made more conservative based on profile data by an instruction in the loop. #if DBG if (!baseValueInLoopLandingPad->GetValueInfo()->CanMergeToSpecificObjectType()) { ValueType landingPadValueType = baseValueInLoopLandingPad->GetValueInfo()->Type(); Assert(landingPadValueType.IsSimilar(baseValueType) || ( landingPadValueType.IsLikelyNativeArray() && landingPadValueType.Merge(baseValueType).IsSimilar(baseValueType) ) ); } #endif if(doArrayChecks) { hoistChecksOutOfLoop = loop; } if(isLikelyJsArray && loopKills.KillsArrayHeadSegments()) { Assert(loopKills.KillsArrayHeadSegmentLengths()); if(!(doArrayChecks || doLengthLoad)) { break; } } else { if(doHeadSegmentLoad || headSegmentIsAvailable) { // If the head segment is already available, we may need to rehoist the value including other // information. So, need to track the loop out of which the head segment length can be hoisted even if // the head segment length is not being loaded here. hoistHeadSegmentLoadOutOfLoop = loop; } if(isLikelyJsArray ? loopKills.KillsArrayHeadSegmentLengths() : loopKills.KillsTypedArrayHeadSegmentLengths()) { if(!(doArrayChecks || doHeadSegmentLoad || doLengthLoad)) { break; } } else if(doHeadSegmentLengthLoad || headSegmentLengthIsAvailable) { // If the head segment length is already available, we may need to rehoist the value including other // information. So, need to track the loop out of which the head segment length can be hoisted even if // the head segment length is not being loaded here. hoistHeadSegmentLengthLoadOutOfLoop = loop; } } if(isLikelyJsArray && loopKills.KillsArrayLengths()) { if(!(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad)) { break; } } else if(doLengthLoad || lengthIsAvailable) { // If the length is already available, we may need to rehoist the value including other information. So, // need to track the loop out of which the head segment length can be hoisted even if the length is not // being loaded here. hoistLengthLoadOutOfLoop = loop; } } } IR::Instr *insertBeforeInstr = instr->GetInsertBeforeByteCodeUsesInstr(); const auto InsertInstrInLandingPad = [&](IR::Instr *const instr, Loop *const hoistOutOfLoop) { if(hoistOutOfLoop->bailOutInfo->bailOutInstr) { instr->SetByteCodeOffset(hoistOutOfLoop->bailOutInfo->bailOutInstr); hoistOutOfLoop->bailOutInfo->bailOutInstr->InsertBefore(instr); } else { instr->SetByteCodeOffset(hoistOutOfLoop->landingPad->GetLastInstr()); hoistOutOfLoop->landingPad->InsertAfter(instr); } }; BailOutInfo *shareableBailOutInfo = nullptr; IR::Instr *shareableBailOutInfoOriginalOwner = nullptr; const auto ShareBailOut = [&]() { Assert(shareableBailOutInfo); if(shareableBailOutInfo->bailOutInstr != shareableBailOutInfoOriginalOwner) { return; } Assert(shareableBailOutInfoOriginalOwner->GetBailOutInfo() == shareableBailOutInfo); IR::Instr *const sharedBailOut = shareableBailOutInfoOriginalOwner->ShareBailOut(); Assert(sharedBailOut->GetBailOutInfo() == shareableBailOutInfo); shareableBailOutInfoOriginalOwner = nullptr; sharedBailOut->Unlink(); insertBeforeInstr->InsertBefore(sharedBailOut); insertBeforeInstr = sharedBailOut; }; if(doArrayChecks) { TRACE_TESTTRACE_PHASE_INSTR(Js::ArrayCheckHoistPhase, instr, _u("Separating array checks with bailout\n")); IR::Instr *bailOnNotArray = IR::Instr::New(Js::OpCode::BailOnNotArray, instr->m_func); bailOnNotArray->SetSrc1(baseOpnd); bailOnNotArray->GetSrc1()->SetIsJITOptimizedReg(true); const IR::BailOutKind bailOutKind = newBaseValueType.IsLikelyNativeArray() ? IR::BailOutOnNotNativeArray : IR::BailOutOnNotArray; if(hoistChecksOutOfLoop) { Assert(!(isLikelyJsArray && hoistChecksOutOfLoop->jsArrayKills.KillsValueType(newBaseValueType))); TRACE_PHASE_INSTR( Js::ArrayCheckHoistPhase, instr, _u("Hoisting array checks with bailout out of loop %u to landing pad block %u\n"), hoistChecksOutOfLoop->GetLoopNumber(), hoistChecksOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::ArrayCheckHoistPhase, instr, _u("Hoisting array checks with bailout out of loop\n")); Assert(hoistChecksOutOfLoop->bailOutInfo); EnsureBailTarget(hoistChecksOutOfLoop); InsertInstrInLandingPad(bailOnNotArray, hoistChecksOutOfLoop); bailOnNotArray = bailOnNotArray->ConvertToBailOutInstr(hoistChecksOutOfLoop->bailOutInfo, bailOutKind); } else { bailOnNotArray->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(bailOnNotArray); GenerateBailAtOperation(&bailOnNotArray, bailOutKind); shareableBailOutInfo = bailOnNotArray->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = bailOnNotArray; } baseValueType = newBaseValueType; baseOpnd->SetValueType(newBaseValueType); } if(doLengthLoad) { Assert(baseValueType.IsArray()); Assert(newLengthSym); TRACE_TESTTRACE_PHASE_INSTR(Js::Phase::ArrayLengthHoistPhase, instr, _u("Separating array length load\n")); // Create an initial value for the length CurrentBlockData()->liveVarSyms->Set(newLengthSym->m_id); Value *const lengthValue = NewIntRangeValue(0, INT32_MAX, false); CurrentBlockData()->SetValue(lengthValue, newLengthSym); // SetValue above would have set the sym store to newLengthSym. This sym won't be used for copy-prop though, so // remove it as the sym store. this->SetSymStoreDirect(lengthValue->GetValueInfo(), nullptr); // length = [array + offsetOf(length)] IR::Instr *const loadLength = IR::Instr::New( Js::OpCode::LdIndir, IR::RegOpnd::New(newLengthSym, newLengthSym->GetType(), instr->m_func), IR::IndirOpnd::New( baseOpnd, Js::JavascriptArray::GetOffsetOfLength(), newLengthSym->GetType(), instr->m_func), instr->m_func); loadLength->GetDst()->SetIsJITOptimizedReg(true); loadLength->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->SetIsJITOptimizedReg(true); // BailOnNegative length (BailOutOnIrregularLength) IR::Instr *bailOnIrregularLength = IR::Instr::New(Js::OpCode::BailOnNegative, instr->m_func); bailOnIrregularLength->SetSrc1(loadLength->GetDst()); const IR::BailOutKind bailOutKind = IR::BailOutOnIrregularLength; if(hoistLengthLoadOutOfLoop) { Assert(!hoistLengthLoadOutOfLoop->jsArrayKills.KillsArrayLengths()); TRACE_PHASE_INSTR( Js::Phase::ArrayLengthHoistPhase, instr, _u("Hoisting array length load out of loop %u to landing pad block %u\n"), hoistLengthLoadOutOfLoop->GetLoopNumber(), hoistLengthLoadOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::Phase::ArrayLengthHoistPhase, instr, _u("Hoisting array length load out of loop\n")); Assert(hoistLengthLoadOutOfLoop->bailOutInfo); EnsureBailTarget(hoistLengthLoadOutOfLoop); InsertInstrInLandingPad(loadLength, hoistLengthLoadOutOfLoop); InsertInstrInLandingPad(bailOnIrregularLength, hoistLengthLoadOutOfLoop); bailOnIrregularLength = bailOnIrregularLength->ConvertToBailOutInstr(hoistLengthLoadOutOfLoop->bailOutInfo, bailOutKind); // Hoist the length value for(InvariantBlockBackwardIterator it( this, currentBlock, hoistLengthLoadOutOfLoop->landingPad, baseOpnd->m_sym, baseValue->GetValueNumber()); it.IsValid(); it.MoveNext()) { BasicBlock *const block = it.Block(); block->globOptData.liveVarSyms->Set(newLengthSym->m_id); Assert(!block->globOptData.FindValue(newLengthSym)); Value *const lengthValueCopy = CopyValue(lengthValue, lengthValue->GetValueNumber()); block->globOptData.SetValue(lengthValueCopy, newLengthSym); this->SetSymStoreDirect(lengthValueCopy->GetValueInfo(), nullptr); } } else { loadLength->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadLength); bailOnIrregularLength->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(bailOnIrregularLength); if(shareableBailOutInfo) { ShareBailOut(); bailOnIrregularLength = bailOnIrregularLength->ConvertToBailOutInstr(shareableBailOutInfo, bailOutKind); } else { GenerateBailAtOperation(&bailOnIrregularLength, bailOutKind); shareableBailOutInfo = bailOnIrregularLength->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = bailOnIrregularLength; } } } const auto InsertHeadSegmentLoad = [&]() { TRACE_TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Separating array segment load\n")); Assert(newHeadSegmentSym); IR::RegOpnd *const headSegmentOpnd = IR::RegOpnd::New(newHeadSegmentSym, newHeadSegmentSym->GetType(), instr->m_func); headSegmentOpnd->SetIsJITOptimizedReg(true); IR::RegOpnd *const jitOptimizedBaseOpnd = baseOpnd->Copy(instr->m_func)->AsRegOpnd(); jitOptimizedBaseOpnd->SetIsJITOptimizedReg(true); IR::Instr *loadObjectArray; if(baseValueType.GetObjectType() == ObjectType::ObjectWithArray) { loadObjectArray = IR::Instr::New( Js::OpCode::LdIndir, headSegmentOpnd, IR::IndirOpnd::New( jitOptimizedBaseOpnd, Js::DynamicObject::GetOffsetOfObjectArray(), jitOptimizedBaseOpnd->GetType(), instr->m_func), instr->m_func); } else { loadObjectArray = nullptr; } IR::Instr *const loadHeadSegment = IR::Instr::New( Js::OpCode::LdIndir, headSegmentOpnd, IR::IndirOpnd::New( loadObjectArray ? headSegmentOpnd : jitOptimizedBaseOpnd, Lowerer::GetArrayOffsetOfHeadSegment(baseValueType), headSegmentOpnd->GetType(), instr->m_func), instr->m_func); if(hoistHeadSegmentLoadOutOfLoop) { Assert(!(isLikelyJsArray && hoistHeadSegmentLoadOutOfLoop->jsArrayKills.KillsArrayHeadSegments())); TRACE_PHASE_INSTR( Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment load out of loop %u to landing pad block %u\n"), hoistHeadSegmentLoadOutOfLoop->GetLoopNumber(), hoistHeadSegmentLoadOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment load out of loop\n")); if(loadObjectArray) { InsertInstrInLandingPad(loadObjectArray, hoistHeadSegmentLoadOutOfLoop); } InsertInstrInLandingPad(loadHeadSegment, hoistHeadSegmentLoadOutOfLoop); } else { if(loadObjectArray) { loadObjectArray->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadObjectArray); } loadHeadSegment->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadHeadSegment); instr->loadedArrayHeadSegment = true; } }; if(doHeadSegmentLoad && isLikelyJsArray) { // For javascript arrays, the head segment is required to load the head segment length InsertHeadSegmentLoad(); } if(doHeadSegmentLengthLoad) { Assert(!isLikelyJsArray || newHeadSegmentSym || baseArrayValueInfo && baseArrayValueInfo->HeadSegmentSym()); Assert(newHeadSegmentLengthSym); Assert(!headSegmentLengthValue); TRACE_TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Separating array segment length load\n")); // Create an initial value for the head segment length CurrentBlockData()->liveVarSyms->Set(newHeadSegmentLengthSym->m_id); headSegmentLengthValue = NewIntRangeValue(0, Js::SparseArraySegmentBase::MaxLength, false); headSegmentLengthConstantBounds = IntConstantBounds(0, Js::SparseArraySegmentBase::MaxLength); CurrentBlockData()->SetValue(headSegmentLengthValue, newHeadSegmentLengthSym); // SetValue above would have set the sym store to newHeadSegmentLengthSym. This sym won't be used for copy-prop // though, so remove it as the sym store. this->SetSymStoreDirect(headSegmentLengthValue->GetValueInfo(), nullptr); StackSym *const headSegmentSym = isLikelyJsArray ? newHeadSegmentSym ? newHeadSegmentSym : baseArrayValueInfo->HeadSegmentSym() : nullptr; IR::Instr *const loadHeadSegmentLength = IR::Instr::New( Js::OpCode::LdIndir, IR::RegOpnd::New(newHeadSegmentLengthSym, newHeadSegmentLengthSym->GetType(), instr->m_func), IR::IndirOpnd::New( isLikelyJsArray ? IR::RegOpnd::New(headSegmentSym, headSegmentSym->GetType(), instr->m_func) : baseOpnd, isLikelyJsArray ? Js::SparseArraySegmentBase::GetOffsetOfLength() : Lowerer::GetArrayOffsetOfLength(baseValueType), newHeadSegmentLengthSym->GetType(), instr->m_func), instr->m_func); loadHeadSegmentLength->GetDst()->SetIsJITOptimizedReg(true); loadHeadSegmentLength->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->SetIsJITOptimizedReg(true); // We don't check the head segment length for negative (very large uint32) values. For JS arrays, the bound checks // cover that. For typed arrays, we currently don't allocate array buffers with more than 1 GB elements. if(hoistHeadSegmentLengthLoadOutOfLoop) { Assert( !( isLikelyJsArray ? hoistHeadSegmentLengthLoadOutOfLoop->jsArrayKills.KillsArrayHeadSegmentLengths() : hoistHeadSegmentLengthLoadOutOfLoop->jsArrayKills.KillsTypedArrayHeadSegmentLengths() )); TRACE_PHASE_INSTR( Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment length load out of loop %u to landing pad block %u\n"), hoistHeadSegmentLengthLoadOutOfLoop->GetLoopNumber(), hoistHeadSegmentLengthLoadOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment length load out of loop\n")); InsertInstrInLandingPad(loadHeadSegmentLength, hoistHeadSegmentLengthLoadOutOfLoop); // Hoist the head segment length value for(InvariantBlockBackwardIterator it( this, currentBlock, hoistHeadSegmentLengthLoadOutOfLoop->landingPad, baseOpnd->m_sym, baseValue->GetValueNumber()); it.IsValid(); it.MoveNext()) { BasicBlock *const block = it.Block(); block->globOptData.liveVarSyms->Set(newHeadSegmentLengthSym->m_id); Assert(!block->globOptData.FindValue(newHeadSegmentLengthSym)); Value *const headSegmentLengthValueCopy = CopyValue(headSegmentLengthValue, headSegmentLengthValue->GetValueNumber()); block->globOptData.SetValue(headSegmentLengthValueCopy, newHeadSegmentLengthSym); this->SetSymStoreDirect(headSegmentLengthValueCopy->GetValueInfo(), nullptr); } } else { loadHeadSegmentLength->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadHeadSegmentLength); instr->loadedArrayHeadSegmentLength = true; } } if(doExtractBoundChecks) { Assert(!(eliminatedLowerBoundCheck && eliminatedUpperBoundCheck)); Assert(baseOwnerIndir); Assert(!baseOwnerIndir->GetIndexOpnd() || baseOwnerIndir->GetIndexOpnd()->m_sym->IsTypeSpec()); Assert(doHeadSegmentLengthLoad || headSegmentLengthIsAvailable); Assert(canBailOutOnArrayAccessHelperCall); Assert(!isStore || instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || Js::IsSimd128LoadStore(instr->m_opcode)); StackSym *const headSegmentLengthSym = headSegmentLengthIsAvailable ? baseArrayValueInfo->HeadSegmentLengthSym() : newHeadSegmentLengthSym; Assert(headSegmentLengthSym); Assert(headSegmentLengthValue); ArrayLowerBoundCheckHoistInfo lowerBoundCheckHoistInfo; ArrayUpperBoundCheckHoistInfo upperBoundCheckHoistInfo; bool failedToUpdateCompatibleLowerBoundCheck = false, failedToUpdateCompatibleUpperBoundCheck = false; if(DoBoundCheckHoist()) { if(indexVarSym) { TRACE_PHASE_INSTR_VERBOSE( Js::Phase::BoundCheckHoistPhase, instr, _u("Determining array bound check hoistability for index s%u\n"), indexVarSym->m_id); } else { TRACE_PHASE_INSTR_VERBOSE( Js::Phase::BoundCheckHoistPhase, instr, _u("Determining array bound check hoistability for index %d\n"), indexConstantBounds.LowerBound()); } DetermineArrayBoundCheckHoistability( !eliminatedLowerBoundCheck, !eliminatedUpperBoundCheck, lowerBoundCheckHoistInfo, upperBoundCheckHoistInfo, isLikelyJsArray, indexVarSym, indexValue, indexConstantBounds, headSegmentLengthSym, headSegmentLengthValue, headSegmentLengthConstantBounds, hoistHeadSegmentLengthLoadOutOfLoop, failedToUpdateCompatibleLowerBoundCheck, failedToUpdateCompatibleUpperBoundCheck); #ifdef ENABLE_SIMDJS // SIMD_JS UpdateBoundCheckHoistInfoForSimd(upperBoundCheckHoistInfo, newBaseValueType, instr); #endif } if(!eliminatedLowerBoundCheck) { eliminatedLowerBoundCheck = true; Assert(indexVarSym); Assert(baseOwnerIndir->GetIndexOpnd()); Assert(indexValue); ArrayLowerBoundCheckHoistInfo &hoistInfo = lowerBoundCheckHoistInfo; if(hoistInfo.HasAnyInfo()) { BasicBlock *hoistBlock; if(hoistInfo.CompatibleBoundCheckBlock()) { hoistBlock = hoistInfo.CompatibleBoundCheckBlock(); TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check into existing bound check instruction in block %u\n"), hoistBlock->GetBlockNum()); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check into existing bound check instruction\n")); } else { Assert(hoistInfo.Loop()); BasicBlock *const landingPad = hoistInfo.Loop()->landingPad; hoistBlock = landingPad; StackSym *indexIntSym; if(hoistInfo.IndexSym() && hoistInfo.IndexSym()->IsVar()) { if(!landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())) { // Int-specialize the index sym, as the BoundCheck instruction requires int operands. Specialize // it in this block if it is invariant, as the conversion will be hoisted along with value // updates. BasicBlock *specializationBlock = hoistInfo.Loop()->landingPad; IR::Instr *specializeBeforeInstr = nullptr; if(!CurrentBlockData()->IsInt32TypeSpecialized(hoistInfo.IndexSym()) && OptIsInvariant( hoistInfo.IndexSym(), currentBlock, hoistInfo.Loop(), CurrentBlockData()->FindValue(hoistInfo.IndexSym()), false, true)) { specializationBlock = currentBlock; specializeBeforeInstr = insertBeforeInstr; } Assert(tempBv->IsEmpty()); tempBv->Set(hoistInfo.IndexSym()->m_id); ToInt32(tempBv, specializationBlock, false, specializeBeforeInstr); tempBv->ClearAll(); Assert(landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())); } indexIntSym = hoistInfo.IndexSym()->GetInt32EquivSym(nullptr); Assert(indexIntSym); } else { indexIntSym = hoistInfo.IndexSym(); Assert(!indexIntSym || indexIntSym->GetType() == TyInt32 || indexIntSym->GetType() == TyUint32); } // The info in the landing pad may be better than the info in the current block due to changes made to // the index sym inside the loop. Check if the bound check we intend to hoist is unnecessary in the // landing pad. if(!ValueInfo::IsLessThanOrEqualTo( nullptr, 0, 0, hoistInfo.IndexValue(), hoistInfo.IndexConstantBounds().LowerBound(), hoistInfo.IndexConstantBounds().UpperBound(), hoistInfo.Offset())) { Assert(hoistInfo.IndexSym()); Assert(hoistInfo.Loop()->bailOutInfo); EnsureBailTarget(hoistInfo.Loop()); if(hoistInfo.LoopCount()) { // Generate the loop count and loop count based bound that will be used for the bound check if(!hoistInfo.LoopCount()->HasBeenGenerated()) { GenerateLoopCount(hoistInfo.Loop(), hoistInfo.LoopCount()); } GenerateSecondaryInductionVariableBound( hoistInfo.Loop(), indexVarSym->GetInt32EquivSym(nullptr), hoistInfo.LoopCount(), hoistInfo.MaxMagnitudeChange(), hoistInfo.IndexSym()); } IR::Opnd* lowerBound = IR::IntConstOpnd::New(0, TyInt32, instr->m_func, true); IR::Opnd* upperBound = IR::RegOpnd::New(indexIntSym, TyInt32, instr->m_func); upperBound->SetIsJITOptimizedReg(true); // 0 <= indexSym + offset (src1 <= src2 + dst) IR::Instr *const boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, hoistInfo.Offset(), hoistInfo.IsLoopCountBasedBound() ? IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck : IR::BailOutOnFailedHoistedBoundCheck, hoistInfo.Loop()->bailOutInfo, hoistInfo.Loop()->bailOutInfo->bailOutFunc); InsertInstrInLandingPad(boundCheck, hoistInfo.Loop()); TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check out of loop %u to landing pad block %u, as (0 <= s%u + %d)\n"), hoistInfo.Loop()->GetLoopNumber(), landingPad->GetBlockNum(), hoistInfo.IndexSym()->m_id, hoistInfo.Offset()); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check out of loop\n")); // Record the bound check instruction as available const IntBoundCheck boundCheckInfo( ZeroValueNumber, hoistInfo.IndexValueNumber(), boundCheck, landingPad); { const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleLowerBoundCheck); } for(InvariantBlockBackwardIterator it(this, currentBlock, landingPad, nullptr); it.IsValid(); it.MoveNext()) { const bool added = it.Block()->globOptData.availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleLowerBoundCheck); } } } // Update values of the syms involved in the bound check to reflect the bound check if(hoistBlock != currentBlock && hoistInfo.IndexSym() && hoistInfo.Offset() != INT32_MIN) { for(InvariantBlockBackwardIterator it( this, currentBlock->next, hoistBlock, hoistInfo.IndexSym(), hoistInfo.IndexValueNumber()); it.IsValid(); it.MoveNext()) { Value *const value = it.InvariantSymValue(); IntConstantBounds constantBounds; AssertVerify(value->GetValueInfo()->TryGetIntConstantBounds(&constantBounds, true)); ValueInfo *const newValueInfo = UpdateIntBoundsForGreaterThanOrEqual( value, constantBounds, nullptr, IntConstantBounds(-hoistInfo.Offset(), -hoistInfo.Offset()), false); if(newValueInfo) { ChangeValueInfo(nullptr, value, newValueInfo); if(it.Block() == currentBlock && value == indexValue) { AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds)); } } } } } else { IR::Opnd* lowerBound = IR::IntConstOpnd::New(0, TyInt32, instr->m_func, true); IR::Opnd* upperBound = baseOwnerIndir->GetIndexOpnd(); upperBound->SetIsJITOptimizedReg(true); const int offset = 0; IR::Instr *boundCheck; if(shareableBailOutInfo) { ShareBailOut(); boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, IR::BailOutOnArrayAccessHelperCall, shareableBailOutInfo, shareableBailOutInfo->bailOutFunc); } else { boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, instr->m_func); } boundCheck->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(boundCheck); if(!shareableBailOutInfo) { GenerateBailAtOperation(&boundCheck, IR::BailOutOnArrayAccessHelperCall); shareableBailOutInfo = boundCheck->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = boundCheck; } TRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array lower bound check, as (0 <= s%u)\n"), indexVarSym->m_id); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array lower bound check\n")); if(DoBoundCheckHoist()) { // Record the bound check instruction as available const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew( IntBoundCheck(ZeroValueNumber, indexValue->GetValueNumber(), boundCheck, currentBlock)) >= 0; Assert(added || failedToUpdateCompatibleLowerBoundCheck); } } // Update the index value to reflect the bound check ValueInfo *const newValueInfo = UpdateIntBoundsForGreaterThanOrEqual( indexValue, indexConstantBounds, nullptr, IntConstantBounds(0, 0), false); if(newValueInfo) { ChangeValueInfo(nullptr, indexValue, newValueInfo); AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds)); } } if(!eliminatedUpperBoundCheck) { eliminatedUpperBoundCheck = true; ArrayUpperBoundCheckHoistInfo &hoistInfo = upperBoundCheckHoistInfo; if(hoistInfo.HasAnyInfo()) { BasicBlock *hoistBlock; if(hoistInfo.CompatibleBoundCheckBlock()) { hoistBlock = hoistInfo.CompatibleBoundCheckBlock(); TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check into existing bound check instruction in block %u\n"), hoistBlock->GetBlockNum()); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check into existing bound check instruction\n")); } else { Assert(hoistInfo.Loop()); BasicBlock *const landingPad = hoistInfo.Loop()->landingPad; hoistBlock = landingPad; StackSym *indexIntSym; if(hoistInfo.IndexSym() && hoistInfo.IndexSym()->IsVar()) { if(!landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())) { // Int-specialize the index sym, as the BoundCheck instruction requires int operands. Specialize it // in this block if it is invariant, as the conversion will be hoisted along with value updates. BasicBlock *specializationBlock = hoistInfo.Loop()->landingPad; IR::Instr *specializeBeforeInstr = nullptr; if(!CurrentBlockData()->IsInt32TypeSpecialized(hoistInfo.IndexSym()) && OptIsInvariant( hoistInfo.IndexSym(), currentBlock, hoistInfo.Loop(), CurrentBlockData()->FindValue(hoistInfo.IndexSym()), false, true)) { specializationBlock = currentBlock; specializeBeforeInstr = insertBeforeInstr; } Assert(tempBv->IsEmpty()); tempBv->Set(hoistInfo.IndexSym()->m_id); ToInt32(tempBv, specializationBlock, false, specializeBeforeInstr); tempBv->ClearAll(); Assert(landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())); } indexIntSym = hoistInfo.IndexSym()->GetInt32EquivSym(nullptr); Assert(indexIntSym); } else { indexIntSym = hoistInfo.IndexSym(); Assert(!indexIntSym || indexIntSym->GetType() == TyInt32 || indexIntSym->GetType() == TyUint32); } // The info in the landing pad may be better than the info in the current block due to changes made to the // index sym inside the loop. Check if the bound check we intend to hoist is unnecessary in the landing pad. if(!ValueInfo::IsLessThanOrEqualTo( hoistInfo.IndexValue(), hoistInfo.IndexConstantBounds().LowerBound(), hoistInfo.IndexConstantBounds().UpperBound(), hoistInfo.HeadSegmentLengthValue(), hoistInfo.HeadSegmentLengthConstantBounds().LowerBound(), hoistInfo.HeadSegmentLengthConstantBounds().UpperBound(), hoistInfo.Offset())) { Assert(hoistInfo.Loop()->bailOutInfo); EnsureBailTarget(hoistInfo.Loop()); if(hoistInfo.LoopCount()) { // Generate the loop count and loop count based bound that will be used for the bound check if(!hoistInfo.LoopCount()->HasBeenGenerated()) { GenerateLoopCount(hoistInfo.Loop(), hoistInfo.LoopCount()); } GenerateSecondaryInductionVariableBound( hoistInfo.Loop(), indexVarSym->GetInt32EquivSym(nullptr), hoistInfo.LoopCount(), hoistInfo.MaxMagnitudeChange(), hoistInfo.IndexSym()); } IR::Opnd* lowerBound = indexIntSym ? static_cast<IR::Opnd *>(IR::RegOpnd::New(indexIntSym, TyInt32, instr->m_func)) : IR::IntConstOpnd::New( hoistInfo.IndexConstantBounds().LowerBound(), TyInt32, instr->m_func); lowerBound->SetIsJITOptimizedReg(true); IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func); upperBound->SetIsJITOptimizedReg(true); // indexSym <= headSegmentLength + offset (src1 <= src2 + dst) IR::Instr *const boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, hoistInfo.Offset(), hoistInfo.IsLoopCountBasedBound() ? IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck : IR::BailOutOnFailedHoistedBoundCheck, hoistInfo.Loop()->bailOutInfo, hoistInfo.Loop()->bailOutInfo->bailOutFunc); InsertInstrInLandingPad(boundCheck, hoistInfo.Loop()); if(indexIntSym) { TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check out of loop %u to landing pad block %u, as (s%u <= s%u + %d)\n"), hoistInfo.Loop()->GetLoopNumber(), landingPad->GetBlockNum(), hoistInfo.IndexSym()->m_id, headSegmentLengthSym->m_id, hoistInfo.Offset()); } else { TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check out of loop %u to landing pad block %u, as (%d <= s%u + %d)\n"), hoistInfo.Loop()->GetLoopNumber(), landingPad->GetBlockNum(), hoistInfo.IndexConstantBounds().LowerBound(), headSegmentLengthSym->m_id, hoistInfo.Offset()); } TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check out of loop\n")); // Record the bound check instruction as available const IntBoundCheck boundCheckInfo( hoistInfo.IndexValue() ? hoistInfo.IndexValueNumber() : ZeroValueNumber, hoistInfo.HeadSegmentLengthValue()->GetValueNumber(), boundCheck, landingPad); { const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleUpperBoundCheck); } for(InvariantBlockBackwardIterator it(this, currentBlock, landingPad, nullptr); it.IsValid(); it.MoveNext()) { const bool added = it.Block()->globOptData.availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleUpperBoundCheck); } } } // Update values of the syms involved in the bound check to reflect the bound check Assert(!hoistInfo.Loop() || hoistBlock != currentBlock); if(hoistBlock != currentBlock) { for(InvariantBlockBackwardIterator it(this, currentBlock->next, hoistBlock, nullptr); it.IsValid(); it.MoveNext()) { BasicBlock *const block = it.Block(); Value *leftValue; IntConstantBounds leftConstantBounds; if(hoistInfo.IndexSym()) { leftValue = block->globOptData.FindValue(hoistInfo.IndexSym()); if(!leftValue || leftValue->GetValueNumber() != hoistInfo.IndexValueNumber()) { continue; } AssertVerify(leftValue->GetValueInfo()->TryGetIntConstantBounds(&leftConstantBounds, true)); } else { leftValue = nullptr; leftConstantBounds = hoistInfo.IndexConstantBounds(); } Value *const rightValue = block->globOptData.FindValue(headSegmentLengthSym); if(!rightValue) { continue; } Assert(rightValue->GetValueNumber() == headSegmentLengthValue->GetValueNumber()); IntConstantBounds rightConstantBounds; AssertVerify(rightValue->GetValueInfo()->TryGetIntConstantBounds(&rightConstantBounds)); ValueInfo *const newValueInfoForLessThanOrEqual = UpdateIntBoundsForLessThanOrEqual( leftValue, leftConstantBounds, rightValue, rightConstantBounds, hoistInfo.Offset(), false); if (newValueInfoForLessThanOrEqual) { ChangeValueInfo(nullptr, leftValue, newValueInfoForLessThanOrEqual); AssertVerify(newValueInfoForLessThanOrEqual->TryGetIntConstantBounds(&leftConstantBounds, true)); if(block == currentBlock && leftValue == indexValue) { Assert(newValueInfoForLessThanOrEqual->IsInt()); indexConstantBounds = leftConstantBounds; } } if(hoistInfo.Offset() != INT32_MIN) { ValueInfo *const newValueInfoForGreaterThanOrEqual = UpdateIntBoundsForGreaterThanOrEqual( rightValue, rightConstantBounds, leftValue, leftConstantBounds, -hoistInfo.Offset(), false); if (newValueInfoForGreaterThanOrEqual) { ChangeValueInfo(nullptr, rightValue, newValueInfoForGreaterThanOrEqual); if(block == currentBlock) { Assert(rightValue == headSegmentLengthValue); AssertVerify(newValueInfoForGreaterThanOrEqual->TryGetIntConstantBounds(&headSegmentLengthConstantBounds)); } } } } } } else { IR::Opnd* lowerBound = baseOwnerIndir->GetIndexOpnd() ? static_cast<IR::Opnd *>(baseOwnerIndir->GetIndexOpnd()) : IR::IntConstOpnd::New(baseOwnerIndir->GetOffset(), TyInt32, instr->m_func); lowerBound->SetIsJITOptimizedReg(true); IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func); upperBound->SetIsJITOptimizedReg(true); const int offset = GetBoundCheckOffsetForSimd(newBaseValueType, instr, -1); IR::Instr *boundCheck; // index <= headSegmentLength - 1 (src1 <= src2 + dst) if (shareableBailOutInfo) { ShareBailOut(); boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, IR::BailOutOnArrayAccessHelperCall, shareableBailOutInfo, shareableBailOutInfo->bailOutFunc); } else { boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, instr->m_func); } boundCheck->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(boundCheck); if(!shareableBailOutInfo) { GenerateBailAtOperation(&boundCheck, IR::BailOutOnArrayAccessHelperCall); shareableBailOutInfo = boundCheck->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = boundCheck; } instr->extractedUpperBoundCheckWithoutHoisting = true; if(baseOwnerIndir->GetIndexOpnd()) { TRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array upper bound check, as (s%u < s%u)\n"), indexVarSym->m_id, headSegmentLengthSym->m_id); } else { TRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array upper bound check, as (%d < s%u)\n"), baseOwnerIndir->GetOffset(), headSegmentLengthSym->m_id); } TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array upper bound check\n")); if(DoBoundCheckHoist()) { // Record the bound check instruction as available const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew( IntBoundCheck( indexValue ? indexValue->GetValueNumber() : ZeroValueNumber, headSegmentLengthValue->GetValueNumber(), boundCheck, currentBlock)) >= 0; Assert(added || failedToUpdateCompatibleUpperBoundCheck); } } // Update the index and head segment length values to reflect the bound check ValueInfo *newValueInfo = UpdateIntBoundsForLessThan( indexValue, indexConstantBounds, headSegmentLengthValue, headSegmentLengthConstantBounds, false); if(newValueInfo) { ChangeValueInfo(nullptr, indexValue, newValueInfo); AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds)); } newValueInfo = UpdateIntBoundsForGreaterThan( headSegmentLengthValue, headSegmentLengthConstantBounds, indexValue, indexConstantBounds, false); if(newValueInfo) { ChangeValueInfo(nullptr, headSegmentLengthValue, newValueInfo); } } } if(doHeadSegmentLoad && !isLikelyJsArray) { // For typed arrays, load the length first, followed by the bound checks, and then load the head segment. This // allows the length sym to become dead by the time of the head segment load, freeing up the register for use by the // head segment sym. InsertHeadSegmentLoad(); } if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad) { UpdateValue(newHeadSegmentSym, newHeadSegmentLengthSym, newLengthSym); baseValueInfo = baseValue->GetValueInfo(); baseArrayValueInfo = baseValueInfo->IsArrayValueInfo() ? baseValueInfo->AsArrayValueInfo() : nullptr; // Iterate up to the root loop's landing pad until all necessary value info is updated uint hoistItemCount = static_cast<uint>(!!hoistChecksOutOfLoop) + !!hoistHeadSegmentLoadOutOfLoop + !!hoistHeadSegmentLengthLoadOutOfLoop + !!hoistLengthLoadOutOfLoop; if(hoistItemCount != 0) { Loop *rootLoop = nullptr; for(Loop *loop = currentBlock->loop; loop; loop = loop->parent) { rootLoop = loop; } Assert(rootLoop); ValueInfo *valueInfoToHoist = baseValueInfo; bool removeHeadSegment, removeHeadSegmentLength, removeLength; if(baseArrayValueInfo) { removeHeadSegment = baseArrayValueInfo->HeadSegmentSym() && !hoistHeadSegmentLoadOutOfLoop; removeHeadSegmentLength = baseArrayValueInfo->HeadSegmentLengthSym() && !hoistHeadSegmentLengthLoadOutOfLoop; removeLength = baseArrayValueInfo->LengthSym() && !hoistLengthLoadOutOfLoop; } else { removeLength = removeHeadSegmentLength = removeHeadSegment = false; } for(InvariantBlockBackwardIterator it( this, currentBlock, rootLoop->landingPad, baseOpnd->m_sym, baseValue->GetValueNumber()); it.IsValid(); it.MoveNext()) { if(removeHeadSegment || removeHeadSegmentLength || removeLength) { // Remove information that shouldn't be there anymore, from the value info valueInfoToHoist = valueInfoToHoist->AsArrayValueInfo()->Copy( alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength); removeLength = removeHeadSegmentLength = removeHeadSegment = false; } BasicBlock *const block = it.Block(); Value *const blockBaseValue = it.InvariantSymValue(); HoistInvariantValueInfo(valueInfoToHoist, blockBaseValue, block); // See if we have completed hoisting value info for one of the items if(hoistChecksOutOfLoop && block == hoistChecksOutOfLoop->landingPad) { // All other items depend on array checks, so we can just stop here hoistChecksOutOfLoop = nullptr; break; } if(hoistHeadSegmentLoadOutOfLoop && block == hoistHeadSegmentLoadOutOfLoop->landingPad) { hoistHeadSegmentLoadOutOfLoop = nullptr; if(--hoistItemCount == 0) break; if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->HeadSegmentSym()) removeHeadSegment = true; } if(hoistHeadSegmentLengthLoadOutOfLoop && block == hoistHeadSegmentLengthLoadOutOfLoop->landingPad) { hoistHeadSegmentLengthLoadOutOfLoop = nullptr; if(--hoistItemCount == 0) break; if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->HeadSegmentLengthSym()) removeHeadSegmentLength = true; } if(hoistLengthLoadOutOfLoop && block == hoistLengthLoadOutOfLoop->landingPad) { hoistLengthLoadOutOfLoop = nullptr; if(--hoistItemCount == 0) break; if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->LengthSym()) removeLength = true; } } } } } IR::ArrayRegOpnd *baseArrayOpnd; if(baseArrayValueInfo) { // Update the opnd to include the associated syms baseArrayOpnd = baseArrayValueInfo->CreateOpnd( baseOpnd, needsHeadSegment, needsHeadSegmentLength || (!isLikelyJsArray && needsLength), needsLength, eliminatedLowerBoundCheck, eliminatedUpperBoundCheck, instr->m_func); if(baseOwnerInstr) { Assert(baseOwnerInstr->GetSrc1() == baseOpnd); baseOwnerInstr->ReplaceSrc1(baseArrayOpnd); } else { Assert(baseOwnerIndir); Assert(baseOwnerIndir->GetBaseOpnd() == baseOpnd); baseOwnerIndir->ReplaceBaseOpnd(baseArrayOpnd); } baseOpnd = baseArrayOpnd; } else { baseArrayOpnd = nullptr; } if(isLikelyJsArray) { // Insert an instruction to indicate to the dead-store pass that implicit calls need to be kept disabled until this // instruction. Operations other than LdElem and StElem don't benefit much from arrays having no missing values, so // no need to ensure that the array still has no missing values. For a particular array, if none of the accesses // benefit much from the no-missing-values information, it may be beneficial to avoid checking for no missing // values, especially in the case for a single array access, where the cost of the check could be relatively // significant. An StElem has to do additional checks in the common path if the array may have missing values, and // a StElem that operates on an array that has no missing values is more likely to keep the no-missing-values info // on the array more precise, so it still benefits a little from the no-missing-values info. CaptureNoImplicitCallUses(baseOpnd, isLoad || isStore); } else if(baseArrayOpnd && baseArrayOpnd->HeadSegmentLengthSym()) { // A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the typed // array's length is set to zero. Insert an instruction to indicate to the dead-store pass that implicit calls need to // be disabled until this instruction. IR::RegOpnd *const headSegmentLengthOpnd = IR::RegOpnd::New( baseArrayOpnd->HeadSegmentLengthSym(), baseArrayOpnd->HeadSegmentLengthSym()->GetType(), instr->m_func); const IR::AutoReuseOpnd autoReuseHeadSegmentLengthOpnd(headSegmentLengthOpnd, instr->m_func); CaptureNoImplicitCallUses(headSegmentLengthOpnd, false); } const auto OnEliminated = [&](const Js::Phase phase, const char *const eliminatedLoad) { TRACE_TESTTRACE_PHASE_INSTR(phase, instr, _u("Eliminating array %S\n"), eliminatedLoad); }; OnEliminated(Js::Phase::ArrayCheckHoistPhase, "checks"); if(baseArrayOpnd) { if(baseArrayOpnd->HeadSegmentSym()) { OnEliminated(Js::Phase::ArraySegmentHoistPhase, "head segment load"); } if(baseArrayOpnd->HeadSegmentLengthSym()) { OnEliminated(Js::Phase::ArraySegmentHoistPhase, "head segment length load"); } if(baseArrayOpnd->LengthSym()) { OnEliminated(Js::Phase::ArrayLengthHoistPhase, "length load"); } if(baseArrayOpnd->EliminatedLowerBoundCheck()) { OnEliminated(Js::Phase::BoundCheckEliminationPhase, "lower bound check"); } if(baseArrayOpnd->EliminatedUpperBoundCheck()) { OnEliminated(Js::Phase::BoundCheckEliminationPhase, "upper bound check"); } } if(!canBailOutOnArrayAccessHelperCall) { return; } // Bail out instead of generating a helper call. This helps to remove the array reference when the head segment and head // segment length are available, reduces code size, and allows bound checks to be separated. if(instr->HasBailOutInfo()) { const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); Assert( !(bailOutKind & ~IR::BailOutKindBits) || (bailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp); instr->SetBailOutKind(bailOutKind & IR::BailOutKindBits | IR::BailOutOnArrayAccessHelperCall); } else { GenerateBailAtOperation(&instr, IR::BailOutOnArrayAccessHelperCall); } } void GlobOpt::CaptureNoImplicitCallUses( IR::Opnd *opnd, const bool usesNoMissingValuesInfo, IR::Instr *const includeCurrentInstr) { Assert(!IsLoopPrePass()); Assert(noImplicitCallUsesToInsert); Assert(opnd); // The opnd may be deleted later, so make a copy to ensure it is alive for inserting NoImplicitCallUses later opnd = opnd->Copy(func); if(!usesNoMissingValuesInfo) { const ValueType valueType(opnd->GetValueType()); if(valueType.IsArrayOrObjectWithArray() && valueType.HasNoMissingValues()) { // Inserting NoImplicitCallUses for an opnd with a definitely-array-with-no-missing-values value type means that the // instruction following it uses the information that the array has no missing values in some way, for instance, it // may omit missing value checks. Based on that, the dead-store phase in turn ensures that the necessary bailouts // are inserted to ensure that the array still has no missing values until the following instruction. Since // 'usesNoMissingValuesInfo' is false, change the value type to indicate to the dead-store phase that the following // instruction does not use the no-missing-values information. opnd->SetValueType(valueType.SetHasNoMissingValues(false)); } } if(includeCurrentInstr) { IR::Instr *const noImplicitCallUses = IR::PragmaInstr::New(Js::OpCode::NoImplicitCallUses, 0, includeCurrentInstr->m_func); noImplicitCallUses->SetSrc1(opnd); noImplicitCallUses->GetSrc1()->SetIsJITOptimizedReg(true); includeCurrentInstr->InsertAfter(noImplicitCallUses); return; } noImplicitCallUsesToInsert->Add(opnd); } void GlobOpt::InsertNoImplicitCallUses(IR::Instr *const instr) { Assert(noImplicitCallUsesToInsert); const int n = noImplicitCallUsesToInsert->Count(); if(n == 0) { return; } IR::Instr *const insertBeforeInstr = instr->GetInsertBeforeByteCodeUsesInstr(); for(int i = 0; i < n;) { IR::Instr *const noImplicitCallUses = IR::PragmaInstr::New(Js::OpCode::NoImplicitCallUses, 0, instr->m_func); noImplicitCallUses->SetSrc1(noImplicitCallUsesToInsert->Item(i)); noImplicitCallUses->GetSrc1()->SetIsJITOptimizedReg(true); ++i; if(i < n) { noImplicitCallUses->SetSrc2(noImplicitCallUsesToInsert->Item(i)); noImplicitCallUses->GetSrc2()->SetIsJITOptimizedReg(true); ++i; } noImplicitCallUses->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(noImplicitCallUses); } noImplicitCallUsesToInsert->Clear(); } void GlobOpt::PrepareLoopArrayCheckHoist() { if(IsLoopPrePass() || !currentBlock->loop || !currentBlock->isLoopHeader || !currentBlock->loop->parent) { return; } if(currentBlock->loop->parent->needImplicitCallBailoutChecksForJsArrayCheckHoist) { // If the parent loop is an array check elimination candidate, so is the current loop. Even though the current loop may // not have array accesses, if the parent loop hoists array checks, the current loop also needs implicit call checks. currentBlock->loop->needImplicitCallBailoutChecksForJsArrayCheckHoist = true; } } JsArrayKills GlobOpt::CheckJsArrayKills(IR::Instr *const instr) { Assert(instr); JsArrayKills kills; if(instr->UsesAllFields()) { // Calls can (but are unlikely to) change a javascript array into an ES5 array, which may have different behavior for // index properties. kills.SetKillsAllArrays(); return kills; } const bool doArrayMissingValueCheckHoist = DoArrayMissingValueCheckHoist(); const bool doNativeArrayTypeSpec = DoNativeArrayTypeSpec(); const bool doArraySegmentHoist = DoArraySegmentHoist(ValueType::GetObject(ObjectType::Array)); Assert(doArraySegmentHoist == DoArraySegmentHoist(ValueType::GetObject(ObjectType::ObjectWithArray))); const bool doArrayLengthHoist = DoArrayLengthHoist(); if(!doArrayMissingValueCheckHoist && !doNativeArrayTypeSpec && !doArraySegmentHoist && !doArrayLengthHoist) { return kills; } // The following operations may create missing values in an array in an unlikely circumstance. Even though they don't kill // the fact that the 'this' parameter is an array (when implicit calls are disabled), we don't have a way to say the value // type is definitely array but it likely has no missing values. So, these will kill the definite value type as well, making // it likely array, such that the array checks will have to be redone. const bool useValueTypes = !IsLoopPrePass(); // Source value types are not guaranteed to be correct in a loop prepass switch(instr->m_opcode) { case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: { Assert(instr->GetDst()); if(!instr->GetDst()->IsIndirOpnd()) { break; } const ValueType baseValueType = useValueTypes ? instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType() : ValueType::Uninitialized; if(useValueTypes && baseValueType.IsNotArrayOrObjectWithArray()) { break; } if(instr->IsProfiledInstr()) { const Js::StElemInfo *const stElemInfo = instr->AsProfiledInstr()->u.stElemInfo; if(doArraySegmentHoist && stElemInfo->LikelyStoresOutsideHeadSegmentBounds()) { kills.SetKillsArrayHeadSegments(); kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist && !(useValueTypes && baseValueType.IsNotArray()) && stElemInfo->LikelyStoresOutsideArrayBounds()) { kills.SetKillsArrayLengths(); } } break; } case Js::OpCode::DeleteElemI_A: case Js::OpCode::DeleteElemIStrict_A: Assert(instr->GetSrc1()); if(!instr->GetSrc1()->IsIndirOpnd() || (useValueTypes && instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsNotArrayOrObjectWithArray())) { break; } if(doArrayMissingValueCheckHoist) { kills.SetKillsArraysWithNoMissingValues(); } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegmentLengths(); } break; case Js::OpCode::StFld: case Js::OpCode::StFldStrict: { Assert(instr->GetDst()); if(!doArraySegmentHoist && !doArrayLengthHoist) { break; } IR::SymOpnd *const symDst = instr->GetDst()->AsSymOpnd(); if(!symDst->IsPropertySymOpnd()) { break; } IR::PropertySymOpnd *const dst = symDst->AsPropertySymOpnd(); if(dst->m_sym->AsPropertySym()->m_propertyId != Js::PropertyIds::length) { break; } if(useValueTypes && dst->GetPropertyOwnerValueType().IsNotArray()) { // Setting the 'length' property of an object that is not an array, even if it has an internal array, does // not kill the head segment or head segment length of any arrays. break; } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist) { kills.SetKillsArrayLengths(); } break; } case Js::OpCode::InlineArrayPush: { Assert(instr->GetSrc2()); IR::Opnd *const arrayOpnd = instr->GetSrc1(); Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } if(doArrayMissingValueCheckHoist) { kills.SetKillsArraysWithNoMissingValues(); } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegments(); kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray())) { kills.SetKillsArrayLengths(); } // Don't kill NativeArray, if there is no mismatch between array's type and element's type. if(doNativeArrayTypeSpec && !(useValueTypes && arrayValueType.IsNativeArray() && ((arrayValueType.IsLikelyNativeIntArray() && instr->GetSrc2()->IsInt32()) || (arrayValueType.IsLikelyNativeFloatArray() && instr->GetSrc2()->IsFloat())) ) && !(useValueTypes && arrayValueType.IsNotNativeArray())) { kills.SetKillsNativeArrays(); } break; } case Js::OpCode::InlineArrayPop: { IR::Opnd *const arrayOpnd = instr->GetSrc1(); Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray())) { kills.SetKillsArrayLengths(); } break; } case Js::OpCode::CallDirect: { Assert(instr->GetSrc1()); // Find the 'this' parameter and check if it's possible for it to be an array IR::Opnd *const arrayOpnd = instr->FindCallArgumentOpnd(1); Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } const IR::JnHelperMethod helperMethod = instr->GetSrc1()->AsHelperCallOpnd()->m_fnHelper; if(doArrayMissingValueCheckHoist) { switch(helperMethod) { case IR::HelperArray_Reverse: case IR::HelperArray_Shift: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsArraysWithNoMissingValues(); break; } } if(doArraySegmentHoist) { switch(helperMethod) { case IR::HelperArray_Reverse: case IR::HelperArray_Shift: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsArrayHeadSegments(); kills.SetKillsArrayHeadSegmentLengths(); break; } } if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray())) { switch(helperMethod) { case IR::HelperArray_Shift: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsArrayLengths(); break; } } if(doNativeArrayTypeSpec && !(useValueTypes && arrayValueType.IsNotNativeArray())) { switch(helperMethod) { case IR::HelperArray_Reverse: case IR::HelperArray_Shift: case IR::HelperArray_Slice: // Currently not inlined. //case IR::HelperArray_Sort: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsNativeArrays(); break; } } break; } } return kills; } GlobOptBlockData const * GlobOpt::CurrentBlockData() const { return &this->currentBlock->globOptData; } GlobOptBlockData * GlobOpt::CurrentBlockData() { return &this->currentBlock->globOptData; } void GlobOpt::CommitCapturedValuesCandidate() { GlobOptBlockData * globOptData = CurrentBlockData(); globOptData->changedSyms->ClearAll(); if (!this->changedSymsAfterIncBailoutCandidate->IsEmpty()) { // // some symbols are changed after the values for current bailout have been // captured (GlobOpt::CapturedValues), need to restore such symbols as changed // for following incremental bailout construction, or we will miss capturing // values for later bailout // // swap changedSyms and changedSymsAfterIncBailoutCandidate // because both are from this->alloc BVSparse<JitArenaAllocator> * tempBvSwap = globOptData->changedSyms; globOptData->changedSyms = this->changedSymsAfterIncBailoutCandidate; this->changedSymsAfterIncBailoutCandidate = tempBvSwap; } if (globOptData->capturedValues) { globOptData->capturedValues->DecrementRefCount(); } globOptData->capturedValues = globOptData->capturedValuesCandidate; // null out capturedValuesCandidate to stop tracking symbols change for it globOptData->capturedValuesCandidate = nullptr; } bool GlobOpt::IsOperationThatLikelyKillsJsArraysWithNoMissingValues(IR::Instr *const instr) { // StElem is profiled with information indicating whether it will likely create a missing value in the array. In that case, // we prefer to kill the no-missing-values information in the value so that we don't bail out in a likely circumstance. return (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) && DoArrayMissingValueCheckHoist() && instr->IsProfiledInstr() && instr->AsProfiledInstr()->u.stElemInfo->LikelyCreatesMissingValue(); } bool GlobOpt::NeedBailOnImplicitCallForArrayCheckHoist(BasicBlock const * const block, const bool isForwardPass) const { Assert(block); return isForwardPass && block->loop && block->loop->needImplicitCallBailoutChecksForJsArrayCheckHoist; } bool GlobOpt::PrepareForIgnoringIntOverflow(IR::Instr *const instr) { Assert(instr); const bool isBoundary = instr->m_opcode == Js::OpCode::NoIntOverflowBoundary; // Update the instruction's "int overflow matters" flag based on whether we are currently allowing ignoring int overflows. // Some operations convert their srcs to int32s, those can still ignore int overflow. if(instr->ignoreIntOverflowInRange) { instr->ignoreIntOverflowInRange = !intOverflowCurrentlyMattersInRange || OpCodeAttr::IsInt32(instr->m_opcode); } if(!intOverflowDoesNotMatterRange) { Assert(intOverflowCurrentlyMattersInRange); // There are no more ranges of instructions where int overflow does not matter, in this block. return isBoundary; } if(instr == intOverflowDoesNotMatterRange->LastInstr()) { Assert(isBoundary); // Reached the last instruction in the range intOverflowCurrentlyMattersInRange = true; intOverflowDoesNotMatterRange = intOverflowDoesNotMatterRange->Next(); return isBoundary; } if(!intOverflowCurrentlyMattersInRange) { return isBoundary; } if(instr != intOverflowDoesNotMatterRange->FirstInstr()) { // Have not reached the next range return isBoundary; } Assert(isBoundary); // This is the first instruction in a range of instructions where int overflow does not matter. There can be many inputs to // instructions in the range, some of which are inputs to the range itself (that is, the values are not defined in the // range). Ignoring int overflow is only valid for int operations, so we need to ensure that all inputs to the range are // int (not "likely int") before ignoring any overflows in the range. Ensuring that a sym with a "likely int" value is an // int requires a bail-out. These bail-out check need to happen before any overflows are ignored, otherwise it's too late. // The backward pass tracked all inputs into the range. Iterate over them and verify the values, and insert lossless // conversions to int as necessary, before the first instruction in the range. If for any reason all values cannot be // guaranteed to be ints, the optimization will be disabled for this range. intOverflowCurrentlyMattersInRange = false; { BVSparse<JitArenaAllocator> tempBv1(tempAlloc); BVSparse<JitArenaAllocator> tempBv2(tempAlloc); { // Just renaming the temp BVs for this section to indicate how they're used so that it makes sense BVSparse<JitArenaAllocator> &symsToExclude = tempBv1; BVSparse<JitArenaAllocator> &symsToInclude = tempBv2; #if DBG_DUMP SymID couldNotConvertSymId = 0; #endif FOREACH_BITSET_IN_SPARSEBV(id, intOverflowDoesNotMatterRange->SymsRequiredToBeInt()) { Sym *const sym = func->m_symTable->Find(id); Assert(sym); // Some instructions with property syms are also tracked by the backward pass, and may be included in the range // (LdSlot for instance). These property syms don't get their values until either copy-prop resolves a value for // them, or a new value is created once the use of the property sym is reached. In either case, we're not that // far yet, so we need to find the future value of the property sym by evaluating copy-prop in reverse. Value *const value = sym->IsStackSym() ? CurrentBlockData()->FindValue(sym) : CurrentBlockData()->FindFuturePropertyValue(sym->AsPropertySym()); if(!value) { #if DBG_DUMP couldNotConvertSymId = id; #endif intOverflowCurrentlyMattersInRange = true; BREAK_BITSET_IN_SPARSEBV; } const bool isInt32OrUInt32Float = value->GetValueInfo()->IsFloatConstant() && Js::JavascriptNumber::IsInt32OrUInt32(value->GetValueInfo()->AsFloatConstant()->FloatValue()); if(value->GetValueInfo()->IsInt() || isInt32OrUInt32Float) { if(!IsLoopPrePass()) { // Input values that are already int can be excluded from int-specialization. We can treat unsigned // int32 values as int32 values (ignoring the overflow), since the values will only be used inside the // range where overflow does not matter. symsToExclude.Set(sym->m_id); } continue; } if(!DoAggressiveIntTypeSpec() || !value->GetValueInfo()->IsLikelyInt()) { // When aggressive int specialization is off, syms with "likely int" values cannot be forced to int since // int bail-out checks are not allowed in that mode. Similarly, with aggressive int specialization on, it // wouldn't make sense to force non-"likely int" values to int since it would almost guarantee a bail-out at // runtime. In both cases, just disable ignoring overflow for this range. #if DBG_DUMP couldNotConvertSymId = id; #endif intOverflowCurrentlyMattersInRange = true; BREAK_BITSET_IN_SPARSEBV; } if(IsLoopPrePass()) { // The loop prepass does not modify bit-vectors. Since it doesn't add bail-out checks, it also does not need // to specialize anything up-front. It only needs to be consistent in how it determines whether to allow // ignoring overflow for a range, based on the values of inputs into the range. continue; } // Since input syms are tracked in the backward pass, where there is no value tracking, it will not be aware of // copy-prop. If a copy-prop sym is available, it will be used instead, so exclude the original sym and include // the copy-prop sym for specialization. StackSym *const copyPropSym = CurrentBlockData()->GetCopyPropSym(sym, value); if(copyPropSym) { symsToExclude.Set(sym->m_id); Assert(!symsToExclude.Test(copyPropSym->m_id)); const bool needsToBeLossless = !intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Test(sym->m_id); if(intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Test(copyPropSym->m_id) || symsToInclude.TestAndSet(copyPropSym->m_id)) { // The copy-prop sym is already included if(needsToBeLossless) { // The original sym needs to be lossless, so make the copy-prop sym lossless as well. intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Clear(copyPropSym->m_id); } } else if(!needsToBeLossless) { // The copy-prop sym was not included before, and the original sym can be lossy, so make it lossy. intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Set(copyPropSym->m_id); } } else if(!sym->IsStackSym()) { // Only stack syms can be converted to int, and copy-prop syms are stack syms. If a copy-prop sym was not // found for the property sym, we can't ignore overflows in this range. #if DBG_DUMP couldNotConvertSymId = id; #endif intOverflowCurrentlyMattersInRange = true; BREAK_BITSET_IN_SPARSEBV; } } NEXT_BITSET_IN_SPARSEBV; if(intOverflowCurrentlyMattersInRange) { #if DBG_DUMP if(PHASE_TRACE(Js::TrackCompoundedIntOverflowPhase, func) && !IsLoopPrePass()) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("TrackCompoundedIntOverflow - Top function: %s (%s), Phase: %s, Block: %u, Disabled ignoring overflows\n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer), Js::PhaseNames[Js::ForwardPhase], currentBlock->GetBlockNum()); Output::Print(_u(" Input sym could not be turned into an int: %u\n"), couldNotConvertSymId); Output::Print(_u(" First instr: ")); instr->m_next->Dump(); Output::Flush(); } #endif intOverflowDoesNotMatterRange = intOverflowDoesNotMatterRange->Next(); return isBoundary; } if(IsLoopPrePass()) { return isBoundary; } // Update the syms to specialize after enumeration intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(&symsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Minus(&symsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Or(&symsToInclude); } { // Exclude syms that are already live as lossless int32, and exclude lossy conversions of syms that are already live // as lossy int32. // symsToExclude = liveInt32Syms - liveLossyInt32Syms // syms live as lossless int // lossySymsToExclude = symsRequiredToBeLossyInt & liveLossyInt32Syms; // syms we want as lossy int that are already live as lossy int // symsToExclude |= lossySymsToExclude // symsRequiredToBeInt -= symsToExclude // symsRequiredToBeLossyInt -= symsToExclude BVSparse<JitArenaAllocator> &symsToExclude = tempBv1; BVSparse<JitArenaAllocator> &lossySymsToExclude = tempBv2; symsToExclude.Minus(CurrentBlockData()->liveInt32Syms, CurrentBlockData()->liveLossyInt32Syms); lossySymsToExclude.And( intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt(), CurrentBlockData()->liveLossyInt32Syms); symsToExclude.Or(&lossySymsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(&symsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Minus(&symsToExclude); } #if DBG { // Verify that the syms to be converted are live // liveSyms = liveInt32Syms | liveFloat64Syms | liveVarSyms // deadSymsRequiredToBeInt = symsRequiredToBeInt - liveSyms BVSparse<JitArenaAllocator> &liveSyms = tempBv1; BVSparse<JitArenaAllocator> &deadSymsRequiredToBeInt = tempBv2; liveSyms.Or(CurrentBlockData()->liveInt32Syms, CurrentBlockData()->liveFloat64Syms); liveSyms.Or(CurrentBlockData()->liveVarSyms); deadSymsRequiredToBeInt.Minus(intOverflowDoesNotMatterRange->SymsRequiredToBeInt(), &liveSyms); Assert(deadSymsRequiredToBeInt.IsEmpty()); } #endif } // Int-specialize the syms before the first instruction of the range (the current instruction) intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()); #if DBG_DUMP if(PHASE_TRACE(Js::TrackCompoundedIntOverflowPhase, func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("TrackCompoundedIntOverflow - Top function: %s (%s), Phase: %s, Block: %u\n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer), Js::PhaseNames[Js::ForwardPhase], currentBlock->GetBlockNum()); Output::Print(_u(" Input syms to be int-specialized (lossless): ")); intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Dump(); Output::Print(_u(" Input syms to be converted to int (lossy): ")); intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Dump(); Output::Print(_u(" First instr: ")); instr->m_next->Dump(); Output::Flush(); } #endif ToInt32(intOverflowDoesNotMatterRange->SymsRequiredToBeInt(), currentBlock, false /* lossy */, instr); ToInt32(intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt(), currentBlock, true /* lossy */, instr); return isBoundary; } void GlobOpt::VerifyIntSpecForIgnoringIntOverflow(IR::Instr *const instr) { if(intOverflowCurrentlyMattersInRange || IsLoopPrePass()) { return; } Assert(instr->m_opcode != Js::OpCode::Mul_I4 || (instr->m_opcode == Js::OpCode::Mul_I4 && !instr->ShouldCheckFor32BitOverflow() && instr->ShouldCheckForNon32BitOverflow() )); // Instructions that are marked as "overflow doesn't matter" in the range must guarantee that they operate on int values and // result in int values, for ignoring overflow to be valid. So, int-specialization is required for such instructions in the // range. Ld_A is an exception because it only specializes if the src sym is available as a required specialized sym, and it // doesn't generate bailouts or cause ignoring int overflow to be invalid. // MULs are allowed to start a region and have BailOutInfo since they will bailout on non-32 bit overflow. if(instr->m_opcode == Js::OpCode::Ld_A || ((!instr->HasBailOutInfo() || instr->m_opcode == Js::OpCode::Mul_I4) && (!instr->GetDst() || instr->GetDst()->IsInt32()) && (!instr->GetSrc1() || instr->GetSrc1()->IsInt32()) && (!instr->GetSrc2() || instr->GetSrc2()->IsInt32()))) { return; } if (!instr->HasBailOutInfo() && !instr->HasAnySideEffects()) { return; } // This can happen for Neg_A if it needs to bail out on negative zero, and perhaps other cases as well. It's too late to fix // the problem (overflows may already be ignored), so handle it by bailing out at compile-time and disabling tracking int // overflow. Assert(!func->IsTrackCompoundedIntOverflowDisabled()); if(PHASE_TRACE(Js::BailOutPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("BailOut (compile-time): function: %s (%s) instr: "), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer)); #if DBG_DUMP instr->Dump(); #else Output::Print(_u("%s "), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); #endif Output::Print(_u("(overflow does not matter but could not int-spec or needed bailout)\n")); Output::Flush(); } if(func->IsTrackCompoundedIntOverflowDisabled()) { // Tracking int overflows is already off for some reason. Prevent trying to rejit again because it won't help and the // same thing will happen again and cause an infinite loop. Just abort jitting this function. if(PHASE_TRACE(Js::BailOutPhase, this->func)) { Output::Print(_u(" Aborting JIT because TrackIntOverflow is already off\n")); Output::Flush(); } throw Js::OperationAbortedException(); } throw Js::RejitException(RejitReason::TrackIntOverflowDisabled); } // It makes lowering easier if it can assume that the first src is never a constant, // at least for commutative operators. For non-commutative, just hoist the constant. void GlobOpt::PreLowerCanonicalize(IR::Instr *instr, Value **pSrc1Val, Value **pSrc2Val) { IR::Opnd *dst = instr->GetDst(); IR::Opnd *src1 = instr->GetSrc1(); IR::Opnd *src2 = instr->GetSrc2(); if (src1->IsImmediateOpnd()) { // Swap for dst, src } else if (src2 && dst && src2->IsRegOpnd()) { if (src2->GetIsDead() && !src1->GetIsDead() && !src1->IsEqual(dst)) { // Swap if src2 is dead, as the reg can be reuse for the dst for opEqs like on x86 (ADD r1, r2) } else if (src2->IsEqual(dst)) { // Helps lowering of opEqs } else { return; } // Make sure we don't swap 2 srcs with valueOf calls. if (OpCodeAttr::OpndHasImplicitCall(instr->m_opcode)) { if (instr->IsBranchInstr()) { if (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive()) { return; } } else if (!src1->GetValueType().IsPrimitive() && !src2->GetValueType().IsPrimitive()) { return; } } } else { return; } Js::OpCode opcode = instr->m_opcode; switch (opcode) { case Js::OpCode::And_A: case Js::OpCode::Mul_A: case Js::OpCode::Or_A: case Js::OpCode::Xor_A: case Js::OpCode::And_I4: case Js::OpCode::Mul_I4: case Js::OpCode::Or_I4: case Js::OpCode::Xor_I4: case Js::OpCode::Add_I4: swap_srcs: if (!instr->GetSrc2()->IsImmediateOpnd()) { instr->m_opcode = opcode; instr->SwapOpnds(); Value *tempVal = *pSrc1Val; *pSrc1Val = *pSrc2Val; *pSrc2Val = tempVal; return; } break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrSrNotNeq_A: case Js::OpCode::BrEq_I4: goto swap_srcs; case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrNeq_A: case Js::OpCode::BrSrNotEq_A: case Js::OpCode::BrNotEq_A: case Js::OpCode::BrNeq_I4: goto swap_srcs; case Js::OpCode::BrGe_A: opcode = Js::OpCode::BrLe_A; goto swap_srcs; case Js::OpCode::BrNotGe_A: opcode = Js::OpCode::BrNotLe_A; goto swap_srcs; case Js::OpCode::BrGe_I4: opcode = Js::OpCode::BrLe_I4; goto swap_srcs; case Js::OpCode::BrGt_A: opcode = Js::OpCode::BrLt_A; goto swap_srcs; case Js::OpCode::BrNotGt_A: opcode = Js::OpCode::BrNotLt_A; goto swap_srcs; case Js::OpCode::BrGt_I4: opcode = Js::OpCode::BrLt_I4; goto swap_srcs; case Js::OpCode::BrLe_A: opcode = Js::OpCode::BrGe_A; goto swap_srcs; case Js::OpCode::BrNotLe_A: opcode = Js::OpCode::BrNotGe_A; goto swap_srcs; case Js::OpCode::BrLe_I4: opcode = Js::OpCode::BrGe_I4; goto swap_srcs; case Js::OpCode::BrLt_A: opcode = Js::OpCode::BrGt_A; goto swap_srcs; case Js::OpCode::BrNotLt_A: opcode = Js::OpCode::BrNotGt_A; goto swap_srcs; case Js::OpCode::BrLt_I4: opcode = Js::OpCode::BrGt_I4; goto swap_srcs; case Js::OpCode::BrEq_A: case Js::OpCode::BrNotNeq_A: case Js::OpCode::CmEq_A: case Js::OpCode::CmNeq_A: // this == "" not the same as "" == this... if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } goto swap_srcs; case Js::OpCode::CmGe_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmLe_A; goto swap_srcs; case Js::OpCode::CmGt_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmLt_A; goto swap_srcs; case Js::OpCode::CmLe_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmGe_A; goto swap_srcs; case Js::OpCode::CmLt_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmGt_A; goto swap_srcs; case Js::OpCode::CallI: case Js::OpCode::CallIFixed: case Js::OpCode::NewScObject: case Js::OpCode::NewScObjectSpread: case Js::OpCode::NewScObjArray: case Js::OpCode::NewScObjArraySpread: case Js::OpCode::NewScObjectNoCtor: // Don't insert load to register if the function operand is a fixed function. if (instr->HasFixedFunctionAddressTarget()) { return; } break; // Can't do add because <32 + "Hello"> isn't equal to <"Hello" + 32> // Lower can do the swap. Other op-codes listed below don't need immediate source hoisting, as the fast paths handle it, // or the lowering handles the hoisting. case Js::OpCode::Add_A: if (src1->IsFloat()) { goto swap_srcs; } return; case Js::OpCode::Sub_I4: case Js::OpCode::Neg_I4: case Js::OpCode::Not_I4: case Js::OpCode::NewScFunc: case Js::OpCode::NewScGenFunc: case Js::OpCode::NewScArray: case Js::OpCode::NewScIntArray: case Js::OpCode::NewScFltArray: case Js::OpCode::NewScArrayWithMissingValues: case Js::OpCode::NewRegEx: case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: case Js::OpCode::ThrowRuntimeError: case Js::OpCode::TrapIfMinIntOverNegOne: case Js::OpCode::TrapIfTruncOverflow: case Js::OpCode::TrapIfZero: case Js::OpCode::FromVar: case Js::OpCode::Conv_Prim: case Js::OpCode::LdC_A_I4: case Js::OpCode::LdStr: case Js::OpCode::InitFld: case Js::OpCode::InitRootFld: case Js::OpCode::StartCall: case Js::OpCode::ArgOut_A: case Js::OpCode::ArgOut_A_Inline: case Js::OpCode::ArgOut_A_Dynamic: case Js::OpCode::ArgOut_A_FromStackArgs: case Js::OpCode::ArgOut_A_InlineBuiltIn: case Js::OpCode::ArgOut_A_InlineSpecialized: case Js::OpCode::ArgOut_A_SpreadArg: case Js::OpCode::InlineeEnd: case Js::OpCode::EndCallForPolymorphicInlinee: case Js::OpCode::InlineeMetaArg: case Js::OpCode::InlineBuiltInEnd: case Js::OpCode::InlineNonTrackingBuiltInEnd: case Js::OpCode::CallHelper: case Js::OpCode::LdElemUndef: case Js::OpCode::LdElemUndefScoped: case Js::OpCode::RuntimeTypeError: case Js::OpCode::RuntimeReferenceError: case Js::OpCode::Ret: case Js::OpCode::NewScObjectSimple: case Js::OpCode::NewScObjectLiteral: case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StSlot: case Js::OpCode::StSlotChkUndecl: case Js::OpCode::StElemC: case Js::OpCode::StArrSegElemC: case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::CallDirect: case Js::OpCode::BrNotHasSideEffects: case Js::OpCode::NewConcatStrMulti: case Js::OpCode::NewConcatStrMultiBE: case Js::OpCode::ExtendArg_A: #ifdef ENABLE_DOM_FAST_PATH case Js::OpCode::DOMFastPathGetter: case Js::OpCode::DOMFastPathSetter: #endif case Js::OpCode::NewScopeSlots: case Js::OpCode::NewScopeSlotsWithoutPropIds: case Js::OpCode::NewStackScopeSlots: case Js::OpCode::IsInst: case Js::OpCode::BailOnEqual: case Js::OpCode::BailOnNotEqual: case Js::OpCode::StArrViewElem: return; } if (!src1->IsImmediateOpnd()) { return; } // The fast paths or lowering of the remaining instructions may not support handling immediate opnds for the first src. The // immediate src1 is hoisted here into a separate instruction. if (src1->IsIntConstOpnd()) { IR::Instr *newInstr = instr->HoistSrc1(Js::OpCode::Ld_I4); ToInt32Dst(newInstr, newInstr->GetDst()->AsRegOpnd(), this->currentBlock); } else if (src1->IsInt64ConstOpnd()) { instr->HoistSrc1(Js::OpCode::Ld_I4); } else { instr->HoistSrc1(Js::OpCode::Ld_A); } src1 = instr->GetSrc1(); src1->AsRegOpnd()->m_sym->SetIsConst(); } // Clear the ValueMap pf the values invalidated by this instr. void GlobOpt::ProcessKills(IR::Instr *instr) { this->ProcessFieldKills(instr); this->ProcessValueKills(instr); this->ProcessArrayValueKills(instr); } bool GlobOpt::OptIsInvariant(IR::Opnd *src, BasicBlock *block, Loop *loop, Value *srcVal, bool isNotTypeSpecConv, bool allowNonPrimitives) { if(!loop->CanHoistInvariants()) { return false; } Sym *sym; switch(src->GetKind()) { case IR::OpndKindAddr: case IR::OpndKindFloatConst: case IR::OpndKindIntConst: return true; case IR::OpndKindReg: sym = src->AsRegOpnd()->m_sym; break; case IR::OpndKindSym: sym = src->AsSymOpnd()->m_sym; if (src->AsSymOpnd()->IsPropertySymOpnd()) { if (src->AsSymOpnd()->AsPropertySymOpnd()->IsTypeChecked()) { // We do not handle hoisting these yet. We might be hoisting this across the instr with the type check protecting this one. // And somehow, the dead-store pass now removes the type check on that instr later on... // For CheckFixedFld, there is no benefit hoisting these if they don't have a type check as they won't generate code. return false; } } break; case IR::OpndKindHelperCall: // Helper calls, like the private slot getter, can be invariant. // Consider moving more math builtin to invariant? return HelperMethodAttributes::IsInVariant(src->AsHelperCallOpnd()->m_fnHelper); default: return false; } return OptIsInvariant(sym, block, loop, srcVal, isNotTypeSpecConv, allowNonPrimitives); } bool GlobOpt::OptIsInvariant(Sym *sym, BasicBlock *block, Loop *loop, Value *srcVal, bool isNotTypeSpecConv, bool allowNonPrimitives, Value **loopHeadValRef) { Value *localLoopHeadVal; if(!loopHeadValRef) { loopHeadValRef = &localLoopHeadVal; } Value *&loopHeadVal = *loopHeadValRef; loopHeadVal = nullptr; if(!loop->CanHoistInvariants()) { return false; } if (sym->IsStackSym()) { if (sym->AsStackSym()->IsTypeSpec()) { StackSym *varSym = sym->AsStackSym()->GetVarEquivSym(this->func); // Make sure the int32/float64 version of this is available. // Note: We could handle this by converting the src, but usually the // conversion is hoistable if this is hoistable anyway. // In some weird cases it may not be however, so we'll bail out. if (sym->AsStackSym()->IsInt32()) { Assert(block->globOptData.liveInt32Syms->Test(varSym->m_id)); if (!loop->landingPad->globOptData.liveInt32Syms->Test(varSym->m_id) || (loop->landingPad->globOptData.liveLossyInt32Syms->Test(varSym->m_id) && !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id))) { // Either the int32 sym is not live in the landing pad, or it's lossy in the landing pad and the // instruction's block is using the lossless version. In either case, the instruction cannot be hoisted // without doing a conversion of this operand. return false; } } else if (sym->AsStackSym()->IsFloat64()) { if (!loop->landingPad->globOptData.liveFloat64Syms->Test(varSym->m_id)) { return false; } } #ifdef ENABLE_SIMDJS else { Assert(sym->AsStackSym()->IsSimd128()); if (!loop->landingPad->globOptData.liveSimd128F4Syms->Test(varSym->m_id) && !loop->landingPad->globOptData.liveSimd128I4Syms->Test(varSym->m_id)) { return false; } } #endif sym = sym->AsStackSym()->GetVarEquivSym(this->func); } else { // Make sure the var version of this is available. // Note: We could handle this by converting the src, but usually the // conversion is hoistable if this is hoistable anyway. // In some weird cases it may not be however, so we'll bail out. if (!loop->landingPad->globOptData.liveVarSyms->Test(sym->m_id)) { return false; } } } else if (sym->IsPropertySym()) { if (!loop->landingPad->globOptData.liveFields->Test(sym->m_id)) { return false; } } else { return false; } // We rely on having a value. if (srcVal == NULL) { return false; } // A symbol is invariant if its current value is the same as it was upon entering the loop. loopHeadVal = loop->landingPad->globOptData.FindValue(sym); if (loopHeadVal == NULL || loopHeadVal->GetValueNumber() != srcVal->GetValueNumber()) { return false; } // Can't hoist non-primitives, unless we have safeguards against valueof/tostring. Additionally, we need to consider // the value annotations on the source *before* the loop: if we hoist this instruction outside the loop, we can't // necessarily rely on type annotations added (and enforced) earlier in the loop's body. // // It might look as though !loopHeadVal->GetValueInfo()->IsPrimitive() implies // !loop->landingPad->globOptData.IsTypeSpecialized(sym), but it turns out that this is not always the case. We // encountered a test case in which we had previously hoisted a FromVar (to float 64) instruction, but its bailout code was // BailoutPrimitiveButString, rather than BailoutNumberOnly, which would have allowed us to conclude that the dest was // definitely a float64. Instead, it was only *likely* a float64, causing IsPrimitive to return false. if (!allowNonPrimitives && !loopHeadVal->GetValueInfo()->IsPrimitive() && !loop->landingPad->globOptData.IsTypeSpecialized(sym)) { return false; } if(!isNotTypeSpecConv && loop->symsDefInLoop->Test(sym->m_id)) { // Typically, a sym is considered invariant if it has the same value in the current block and in the loop landing pad. // The sym may have had a different value earlier in the loop or on the back-edge, but as long as it's reassigned to its // value outside the loop, it would be considered invariant in this block. Consider that case: // s1 = s2[invariant] // <loop start> // s1 = s2[invariant] // // s1 now has the same value as in the landing pad, and is considered invariant // s1 += s3 // // s1 is not invariant here, or on the back-edge // ++s3 // s3 is not invariant, so the add above cannot be hoisted // <loop end> // // A problem occurs at the point of (s1 += s3) when: // - At (s1 = s2) inside the loop, s1 was made to be the sym store of that value. This by itself is legal, because // after that transfer, s1 and s2 have the same value. // - (s1 += s3) is type-specialized but s1 is not specialized in the loop header. This happens when s1 is not // specialized entering the loop, and since s1 is not used before it's defined in the loop, it's not specialized // on back-edges. // // With that, at (s1 += s3), the conversion of s1 to the type-specialized version would be hoisted because s1 is // invariant just before that instruction. Since this add is specialized, the specialized version of the sym is modified // in the loop without a reassignment at (s1 = s2) inside the loop, and (s1 += s3) would then use an incorrect value of // s1 (it would use the value of s1 from the previous loop iteration, instead of using the value of s2). // // The problem here, is that we cannot hoist the conversion of s1 into its specialized version across the assignment // (s1 = s2) inside the loop. So for the purposes of type specialization, don't consider a sym invariant if it has a def // inside the loop. return false; } // For values with an int range, require additionally that the range is the same as in the landing pad, as the range may // have been changed on this path based on branches, and int specialization and invariant hoisting may rely on the range // being the same. For type spec conversions, only require that if the value is an int constant in the current block, that // it is also an int constant with the same value in the landing pad. Other range differences don't matter for type spec. IntConstantBounds srcIntConstantBounds, loopHeadIntConstantBounds; if(srcVal->GetValueInfo()->TryGetIntConstantBounds(&srcIntConstantBounds) && (isNotTypeSpecConv || srcIntConstantBounds.IsConstant()) && ( !loopHeadVal->GetValueInfo()->TryGetIntConstantBounds(&loopHeadIntConstantBounds) || loopHeadIntConstantBounds.LowerBound() != srcIntConstantBounds.LowerBound() || loopHeadIntConstantBounds.UpperBound() != srcIntConstantBounds.UpperBound() )) { return false; } // If the loopHeadVal is primitive, the current value should be as well. This really should be // srcVal->GetValueInfo()->IsPrimitive() instead of IsLikelyPrimitive, but this stronger assertion // doesn't hold in some cases when this method is called out of the array code. Assert((!loopHeadVal->GetValueInfo()->IsPrimitive()) || srcVal->GetValueInfo()->IsLikelyPrimitive()); return true; } bool GlobOpt::OptIsInvariant( IR::Instr *instr, BasicBlock *block, Loop *loop, Value *src1Val, Value *src2Val, bool isNotTypeSpecConv, const bool forceInvariantHoisting) { if (!loop->CanHoistInvariants()) { return false; } if (!OpCodeAttr::CanCSE(instr->m_opcode)) { return false; } bool allowNonPrimitives = !OpCodeAttr::OpndHasImplicitCall(instr->m_opcode); switch(instr->m_opcode) { // Can't legally hoist these case Js::OpCode::LdLen_A: return false; //Can't Hoist BailOnNotStackArgs, as it is necessary as InlineArgsOptimization relies on this opcode //to decide whether to throw rejit exception or not. case Js::OpCode::BailOnNotStackArgs: return false; // Usually not worth hoisting these case Js::OpCode::LdStr: case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: case Js::OpCode::LdC_A_I4: if(!forceInvariantHoisting) { return false; } break; // Can't hoist these outside the function it's for. The LdArgumentsFromFrame for an inlinee depends on the inlinee meta arg // that holds the arguments object, which is only initialized at the start of the inlinee. So, can't hoist this outside the // inlinee. case Js::OpCode::LdArgumentsFromFrame: if(instr->m_func != loop->GetFunc()) { return false; } break; case Js::OpCode::FromVar: if (instr->HasBailOutInfo()) { allowNonPrimitives = true; } break; case Js::OpCode::CheckObjType: // Bug 11712101: If the operand is a field, ensure that its containing object type is invariant // before hoisting -- that is, don't hoist a CheckObjType over a DeleteFld on that object. // (CheckObjType only checks the operand and its immediate parent, so we don't need to go // any farther up the object graph.) Assert(instr->GetSrc1()); PropertySym *propertySym = instr->GetSrc1()->AsPropertySymOpnd()->GetPropertySym(); if (propertySym->HasObjectTypeSym()) { StackSym *objectTypeSym = propertySym->GetObjectTypeSym(); if (!this->OptIsInvariant(objectTypeSym, block, loop, this->CurrentBlockData()->FindValue(objectTypeSym), true, true)) { return false; } } break; } IR::Opnd *dst = instr->GetDst(); if (dst && !dst->IsRegOpnd()) { return false; } IR::Opnd *src1 = instr->GetSrc1(); if (src1) { if (!this->OptIsInvariant(src1, block, loop, src1Val, isNotTypeSpecConv, allowNonPrimitives)) { return false; } IR::Opnd *src2 = instr->GetSrc2(); if (src2) { if (!this->OptIsInvariant(src2, block, loop, src2Val, isNotTypeSpecConv, allowNonPrimitives)) { return false; } } } return true; } bool GlobOpt::OptDstIsInvariant(IR::RegOpnd *dst) { StackSym *dstSym = dst->m_sym; if (dstSym->IsTypeSpec()) { // The type-specialized sym may be single def, but not the original... dstSym = dstSym->GetVarEquivSym(this->func); } return (dstSym->m_isSingleDef); } void GlobOpt::OptHoistUpdateValueType( Loop* loop, IR::Instr* instr, IR::Opnd* srcOpnd, Value* opndVal) { if (opndVal == nullptr || instr->m_opcode == Js::OpCode::FromVar) { return; } Sym* opndSym = srcOpnd->GetSym();; if (opndSym) { BasicBlock* landingPad = loop->landingPad; Value* opndValueInLandingPad = landingPad->globOptData.FindValue(opndSym); Assert(opndVal->GetValueNumber() == opndValueInLandingPad->GetValueNumber()); ValueType opndValueTypeInLandingPad = opndValueInLandingPad->GetValueInfo()->Type(); if (srcOpnd->GetValueType() != opndValueTypeInLandingPad) { if (instr->m_opcode == Js::OpCode::SetConcatStrMultiItemBE) { Assert(!opndValueTypeInLandingPad.IsString()); Assert(instr->GetDst()); IR::RegOpnd* strOpnd = IR::RegOpnd::New(TyVar, instr->m_func); strOpnd->SetValueType(ValueType::String); strOpnd->SetValueTypeFixed(); IR::Instr* convPrimStrInstr = IR::Instr::New(Js::OpCode::Conv_PrimStr, strOpnd, srcOpnd->Use(instr->m_func), instr->m_func); instr->ReplaceSrc(srcOpnd, strOpnd); if (loop->bailOutInfo->bailOutInstr) { loop->bailOutInfo->bailOutInstr->InsertBefore(convPrimStrInstr); } else { landingPad->InsertAfter(convPrimStrInstr); } } srcOpnd->SetValueType(opndValueTypeInLandingPad); } if (opndSym->IsPropertySym()) { // Also fix valueInfo on objPtr StackSym* opndObjPtrSym = opndSym->AsPropertySym()->m_stackSym; Value* opndObjPtrSymValInLandingPad = landingPad->globOptData.FindValue(opndObjPtrSym); ValueInfo* opndObjPtrSymValueInfoInLandingPad = opndObjPtrSymValInLandingPad->GetValueInfo(); srcOpnd->AsSymOpnd()->SetPropertyOwnerValueType(opndObjPtrSymValueInfoInLandingPad->Type()); } } } void GlobOpt::OptHoistInvariant( IR::Instr *instr, BasicBlock *block, Loop *loop, Value *dstVal, Value *const src1Val, Value *const src2Val, bool isNotTypeSpecConv, bool lossy, IR::BailOutKind bailoutKind) { BasicBlock *landingPad = loop->landingPad; IR::Opnd* src1 = instr->GetSrc1(); if (src1) { // We are hoisting this instruction possibly past other uses, which might invalidate the last use info. Clear it. OptHoistUpdateValueType(loop, instr, src1, src1Val); if (src1->IsRegOpnd()) { src1->AsRegOpnd()->m_isTempLastUse = false; } IR::Opnd* src2 = instr->GetSrc2(); if (src2) { OptHoistUpdateValueType(loop, instr, src2, src2Val); if (src2->IsRegOpnd()) { src2->AsRegOpnd()->m_isTempLastUse = false; } } } IR::RegOpnd *dst = instr->GetDst() ? instr->GetDst()->AsRegOpnd() : nullptr; if(dst) { switch (instr->m_opcode) { case Js::OpCode::CmEq_I4: case Js::OpCode::CmNeq_I4: case Js::OpCode::CmLt_I4: case Js::OpCode::CmLe_I4: case Js::OpCode::CmGt_I4: case Js::OpCode::CmGe_I4: case Js::OpCode::CmUnLt_I4: case Js::OpCode::CmUnLe_I4: case Js::OpCode::CmUnGt_I4: case Js::OpCode::CmUnGe_I4: // These operations are a special case. They generate a lossy int value, and the var sym is initialized using // Conv_Bool. A sym cannot be live only as a lossy int sym, the var needs to be live as well since the lossy int // sym cannot be used to convert to var. We don't know however, whether the Conv_Bool will be hoisted. The idea // currently is that the sym is only used on the path in which it is initialized inside the loop. So, don't // hoist any liveness info for the dst. if (!this->GetIsAsmJSFunc()) { lossy = true; } break; case Js::OpCode::FromVar: { StackSym* src1StackSym = IR::RegOpnd::TryGetStackSym(instr->GetSrc1()); if (instr->HasBailOutInfo()) { IR::BailOutKind instrBailoutKind = instr->GetBailOutKind(); #ifdef ENABLE_SIMDJS Assert(instrBailoutKind == IR::BailOutIntOnly || instrBailoutKind == IR::BailOutExpectingInteger || instrBailoutKind == IR::BailOutOnNotPrimitive || instrBailoutKind == IR::BailOutNumberOnly || instrBailoutKind == IR::BailOutPrimitiveButString || instrBailoutKind == IR::BailOutSimd128F4Only || instrBailoutKind == IR::BailOutSimd128I4Only); #else Assert(instrBailoutKind == IR::BailOutIntOnly || instrBailoutKind == IR::BailOutExpectingInteger || instrBailoutKind == IR::BailOutOnNotPrimitive || instrBailoutKind == IR::BailOutNumberOnly || instrBailoutKind == IR::BailOutPrimitiveButString); #endif } else if (src1StackSym && bailoutKind != IR::BailOutInvalid) { // We may be hoisting FromVar from a region where it didn't need a bailout (src1 had a definite value type) to a region // where it would. In such cases, the FromVar needs a bailout based on the value type of src1 in its new position. Assert(!src1StackSym->IsTypeSpec()); Value* landingPadSrc1val = landingPad->globOptData.FindValue(src1StackSym); Assert(src1Val->GetValueNumber() == landingPadSrc1val->GetValueNumber()); ValueInfo *src1ValueInfo = src1Val->GetValueInfo(); ValueInfo *landingPadSrc1ValueInfo = landingPadSrc1val->GetValueInfo(); IRType dstType = dst->GetType(); const auto AddBailOutToFromVar = [&]() { instr->GetSrc1()->SetValueType(landingPadSrc1val->GetValueInfo()->Type()); EnsureBailTarget(loop); if (block->IsLandingPad()) { instr = instr->ConvertToBailOutInstr(instr, bailoutKind, loop->bailOutInfo->bailOutOffset); } else { instr = instr->ConvertToBailOutInstr(instr, bailoutKind); } }; // A definite type in the source position and not a definite type in the destination (landing pad) // and no bailout on the instruction; we should put a bailout on the hoisted instruction. if (dstType == TyInt32) { if (lossy) { if ((src1ValueInfo->IsPrimitive() || block->globOptData.IsTypeSpecialized(src1StackSym)) && // didn't need a lossy type spec bailout in the source block (!landingPadSrc1ValueInfo->IsPrimitive() && !landingPad->globOptData.IsTypeSpecialized(src1StackSym))) // needs a lossy type spec bailout in the landing pad { bailoutKind = IR::BailOutOnNotPrimitive; AddBailOutToFromVar(); } } else if (src1ValueInfo->IsInt() && !landingPadSrc1ValueInfo->IsInt()) { AddBailOutToFromVar(); } } else if ((dstType == TyFloat64 && src1ValueInfo->IsNumber() && !landingPadSrc1ValueInfo->IsNumber()) || (IRType_IsSimd128(dstType) && src1ValueInfo->IsSimd128() && !landingPadSrc1ValueInfo->IsSimd128())) { AddBailOutToFromVar(); } } break; } } if (dstVal == NULL) { dstVal = this->NewGenericValue(ValueType::Uninitialized, dst); } // ToVar/FromVar don't need a new dst because it has to be invariant if their src is invariant. bool dstDoesntNeedLoad = (!isNotTypeSpecConv && instr->m_opcode != Js::OpCode::LdC_A_I4); StackSym *varSym = dst->m_sym; if (varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(this->func); } Value *const landingPadDstVal = loop->landingPad->globOptData.FindValue(varSym); if(landingPadDstVal ? dstVal->GetValueNumber() != landingPadDstVal->GetValueNumber() : loop->symsDefInLoop->Test(varSym->m_id)) { // We need a temp for FromVar/ToVar if dst changes in the loop. dstDoesntNeedLoad = false; } if (!dstDoesntNeedLoad && this->OptDstIsInvariant(dst) == false) { // Keep dst in place, hoist instr using a new dst. instr->UnlinkDst(); // Set type specialization info correctly for this new sym StackSym *copyVarSym; IR::RegOpnd *copyReg; if (dst->m_sym->IsTypeSpec()) { copyVarSym = StackSym::New(TyVar, instr->m_func); StackSym *copySym = copyVarSym; if (dst->m_sym->IsInt32()) { if(lossy) { // The new sym would only be live as a lossy int since we're only hoisting the store to the int version // of the sym, and cannot be converted to var. It is not legal to have a sym only live as a lossy int, // so don't update liveness info for this sym. } else { block->globOptData.liveInt32Syms->Set(copyVarSym->m_id); } copySym = copySym->GetInt32EquivSym(instr->m_func); } else if (dst->m_sym->IsFloat64()) { block->globOptData.liveFloat64Syms->Set(copyVarSym->m_id); copySym = copySym->GetFloat64EquivSym(instr->m_func); } #ifdef ENABLE_SIMDJS else if (dst->IsSimd128()) { // SIMD_JS if (dst->IsSimd128F4()) { block->globOptData.liveSimd128F4Syms->Set(copyVarSym->m_id); copySym = copySym->GetSimd128F4EquivSym(instr->m_func); } else { Assert(dst->IsSimd128I4()); block->globOptData.liveSimd128I4Syms->Set(copyVarSym->m_id); copySym = copySym->GetSimd128I4EquivSym(instr->m_func); } } #endif copyReg = IR::RegOpnd::New(copySym, copySym->GetType(), instr->m_func); } else { copyReg = IR::RegOpnd::New(dst->GetType(), instr->m_func); copyVarSym = copyReg->m_sym; block->globOptData.liveVarSyms->Set(copyVarSym->m_id); } copyReg->SetValueType(dst->GetValueType()); IR::Instr *copyInstr = IR::Instr::New(Js::OpCode::Ld_A, dst, copyReg, instr->m_func); copyInstr->SetByteCodeOffset(instr); instr->SetDst(copyReg); instr->InsertBefore(copyInstr); dst->m_sym->m_mayNotBeTempLastUse = true; if (instr->GetSrc1() && instr->GetSrc1()->IsImmediateOpnd()) { // Propagate IsIntConst if appropriate switch(instr->m_opcode) { case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: case Js::OpCode::LdC_A_I4: copyReg->m_sym->SetIsConst(); break; } } ValueInfo *dstValueInfo = dstVal->GetValueInfo(); if((!dstValueInfo->GetSymStore() || dstValueInfo->GetSymStore() == varSym) && !lossy) { // The destination's value may have been transferred from one of the invariant sources, in which case we should // keep the sym store intact, as that sym will likely have a better lifetime than this new copy sym. For // instance, if we're inside a conditioned block, because we don't make the copy sym live and set its value in // all preceding blocks, this sym would not be live after exiting this block, causing this value to not // participate in copy-prop after this block. this->SetSymStoreDirect(dstValueInfo, copyVarSym); } block->globOptData.InsertNewValue(dstVal, copyReg); dst = copyReg; } } // Move to landing pad block->UnlinkInstr(instr); if (loop->bailOutInfo->bailOutInstr) { loop->bailOutInfo->bailOutInstr->InsertBefore(instr); } else { landingPad->InsertAfter(instr); } GlobOpt::MarkNonByteCodeUsed(instr); if (instr->HasBailOutInfo() || instr->HasAuxBailOut()) { Assert(loop->bailOutInfo); EnsureBailTarget(loop); // Copy bailout info of loop top. instr->ReplaceBailOutInfo(loop->bailOutInfo); } if(!dst) { return; } // The bailout info's liveness for the dst sym is not updated in loop landing pads because bailout instructions previously // hoisted into the loop's landing pad may bail out before the current type of the dst sym became live (perhaps due to this // instruction). Since the landing pad will have a shared bailout point, the bailout info cannot assume that the current // type of the dst sym was live during every bailout hoisted into the landing pad. StackSym *const dstSym = dst->m_sym; StackSym *const dstVarSym = dstSym->IsTypeSpec() ? dstSym->GetVarEquivSym(nullptr) : dstSym; Assert(dstVarSym); if(isNotTypeSpecConv || !loop->landingPad->globOptData.IsLive(dstVarSym)) { // A new dst is being hoisted, or the same single-def dst that would not be live before this block. So, make it live and // update the value info with the same value info in this block. if(lossy) { // This is a lossy conversion to int. The instruction was given a new dst specifically for hoisting, so this new dst // will not be live as a var before this block. A sym cannot be live only as a lossy int sym, the var needs to be // live as well since the lossy int sym cannot be used to convert to var. Since the var version of the sym is not // going to be initialized, don't hoist any liveness info for the dst. The sym is only going to be used on the path // in which it is initialized inside the loop. Assert(dstSym->IsTypeSpec()); Assert(dstSym->IsInt32()); return; } // Check if the dst value was transferred from the src. If so, the value transfer needs to be replicated. bool isTransfer = dstVal == src1Val; StackSym *transferValueOfSym = nullptr; if(isTransfer) { Assert(instr->GetSrc1()); if(instr->GetSrc1()->IsRegOpnd()) { StackSym *src1Sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if(src1Sym->IsTypeSpec()) { src1Sym = src1Sym->GetVarEquivSym(nullptr); Assert(src1Sym); } if(dstVal == block->globOptData.FindValue(src1Sym)) { transferValueOfSym = src1Sym; } } } // SIMD_JS if (instr->m_opcode == Js::OpCode::ExtendArg_A) { // Check if we should have CSE'ed this EA Assert(instr->GetSrc1()); // If the dstVal symstore is not the dst itself, then we copied the Value from another expression. if (dstVal->GetValueInfo()->GetSymStore() != instr->GetDst()->GetStackSym()) { isTransfer = true; transferValueOfSym = dstVal->GetValueInfo()->GetSymStore()->AsStackSym(); } } const ValueNumber dstValueNumber = dstVal->GetValueNumber(); ValueNumber dstNewValueNumber = InvalidValueNumber; for(InvariantBlockBackwardIterator it(this, block, loop->landingPad, nullptr); it.IsValid(); it.MoveNext()) { BasicBlock *const hoistBlock = it.Block(); GlobOptBlockData &hoistBlockData = hoistBlock->globOptData; Assert(!hoistBlockData.IsLive(dstVarSym)); hoistBlockData.MakeLive(dstSym, lossy); Value *newDstValue; do { if(isTransfer) { if(transferValueOfSym) { newDstValue = hoistBlockData.FindValue(transferValueOfSym); if(newDstValue && newDstValue->GetValueNumber() == dstValueNumber) { break; } } // It's a transfer, but we don't have a sym whose value number matches in the target block. Use a new value // number since we don't know if there is already a value with the current number for the target block. if(dstNewValueNumber == InvalidValueNumber) { dstNewValueNumber = NewValueNumber(); } newDstValue = CopyValue(dstVal, dstNewValueNumber); break; } newDstValue = CopyValue(dstVal, dstValueNumber); } while(false); hoistBlockData.SetValue(newDstValue, dstVarSym); } return; } #if DBG if(instr->GetSrc1()->IsRegOpnd()) // Type spec conversion may load a constant into a dst sym { StackSym *const srcSym = instr->GetSrc1()->AsRegOpnd()->m_sym; Assert(srcSym != dstSym); // Type spec conversion must be changing the type, so the syms must be different StackSym *const srcVarSym = srcSym->IsTypeSpec() ? srcSym->GetVarEquivSym(nullptr) : srcSym; Assert(srcVarSym == dstVarSym); // Type spec conversion must be between variants of the same var sym } #endif bool changeValueType = false, changeValueTypeToInt = false; if(dstSym->IsTypeSpec()) { if(dst->IsInt32()) { if(!lossy) { Assert( !instr->HasBailOutInfo() || instr->GetBailOutKind() == IR::BailOutIntOnly || instr->GetBailOutKind() == IR::BailOutExpectingInteger); changeValueType = changeValueTypeToInt = true; } } else if (dst->IsFloat64()) { if(instr->HasBailOutInfo() && instr->GetBailOutKind() == IR::BailOutNumberOnly) { changeValueType = true; } } #ifdef ENABLE_SIMDJS else { // SIMD_JS Assert(dst->IsSimd128()); if (instr->HasBailOutInfo() && (instr->GetBailOutKind() == IR::BailOutSimd128F4Only || instr->GetBailOutKind() == IR::BailOutSimd128I4Only)) { changeValueType = true; } } #endif } ValueInfo *previousValueInfoBeforeUpdate = nullptr, *previousValueInfoAfterUpdate = nullptr; for(InvariantBlockBackwardIterator it( this, block, loop->landingPad, dstVarSym, dstVal->GetValueNumber()); it.IsValid(); it.MoveNext()) { BasicBlock *const hoistBlock = it.Block(); GlobOptBlockData &hoistBlockData = hoistBlock->globOptData; #if DBG // TODO: There are some odd cases with field hoisting where the sym is invariant in only part of the loop and the info // does not flow through all blocks. Un-comment the verification below after PRE replaces field hoisting. //// Verify that the src sym is live as the required type, and that the conversion is valid //Assert(IsLive(dstVarSym, &hoistBlockData)); //if(instr->GetSrc1()->IsRegOpnd()) //{ // IR::RegOpnd *const src = instr->GetSrc1()->AsRegOpnd(); // StackSym *const srcSym = instr->GetSrc1()->AsRegOpnd()->m_sym; // if(srcSym->IsTypeSpec()) // { // if(src->IsInt32()) // { // Assert(hoistBlockData.liveInt32Syms->Test(dstVarSym->m_id)); // Assert(!hoistBlockData.liveLossyInt32Syms->Test(dstVarSym->m_id)); // shouldn't try to convert a lossy int32 to anything // } // else // { // Assert(src->IsFloat64()); // Assert(hoistBlockData.liveFloat64Syms->Test(dstVarSym->m_id)); // if(dstSym->IsTypeSpec() && dst->IsInt32()) // { // Assert(lossy); // shouldn't try to do a lossless conversion from float64 to int32 // } // } // } // else // { // Assert(hoistBlockData.liveVarSyms->Test(dstVarSym->m_id)); // } //} //if(dstSym->IsTypeSpec() && dst->IsInt32()) //{ // // If the sym is already specialized as required in the block to which we are attempting to hoist the conversion, // // that info should have flowed into this block // if(lossy) // { // Assert(!hoistBlockData.liveInt32Syms->Test(dstVarSym->m_id)); // } // else // { // Assert(!IsInt32TypeSpecialized(dstVarSym, hoistBlock)); // } //} #endif hoistBlockData.MakeLive(dstSym, lossy); if(!changeValueType) { continue; } Value *const hoistBlockValue = it.InvariantSymValue(); ValueInfo *const hoistBlockValueInfo = hoistBlockValue->GetValueInfo(); if(hoistBlockValueInfo == previousValueInfoBeforeUpdate) { if(hoistBlockValueInfo != previousValueInfoAfterUpdate) { HoistInvariantValueInfo(previousValueInfoAfterUpdate, hoistBlockValue, hoistBlock); } } else { previousValueInfoBeforeUpdate = hoistBlockValueInfo; ValueInfo *const newValueInfo = changeValueTypeToInt ? hoistBlockValueInfo->SpecializeToInt32(alloc) : hoistBlockValueInfo->SpecializeToFloat64(alloc); previousValueInfoAfterUpdate = newValueInfo; ChangeValueInfo(changeValueTypeToInt ? nullptr : hoistBlock, hoistBlockValue, newValueInfo); } } } bool GlobOpt::TryHoistInvariant( IR::Instr *instr, BasicBlock *block, Value *dstVal, Value *src1Val, Value *src2Val, bool isNotTypeSpecConv, const bool lossy, const bool forceInvariantHoisting, IR::BailOutKind bailoutKind) { Assert(!this->IsLoopPrePass()); if (OptIsInvariant(instr, block, block->loop, src1Val, src2Val, isNotTypeSpecConv, forceInvariantHoisting)) { #if DBG if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::InvariantsPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId())) { Output::Print(_u(" **** INVARIANT *** ")); instr->Dump(); } #endif #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::InvariantsPhase)) { Output::Print(_u(" **** INVARIANT *** ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif Loop *loop = block->loop; // Try hoisting from to outer most loop while (loop->parent && OptIsInvariant(instr, block, loop->parent, src1Val, src2Val, isNotTypeSpecConv, forceInvariantHoisting)) { loop = loop->parent; } // Record the byte code use here since we are going to move this instruction up if (isNotTypeSpecConv) { InsertNoImplicitCallUses(instr); this->CaptureByteCodeSymUses(instr); this->InsertByteCodeUses(instr, true); } #if DBG else { PropertySym *propertySymUse = NULL; NoRecoverMemoryJitArenaAllocator tempAllocator(_u("BE-GlobOpt-Temp"), this->alloc->GetPageAllocator(), Js::Throw::OutOfMemory); BVSparse<JitArenaAllocator> * tempByteCodeUse = JitAnew(&tempAllocator, BVSparse<JitArenaAllocator>, &tempAllocator); GlobOpt::TrackByteCodeSymUsed(instr, tempByteCodeUse, &propertySymUse); Assert(tempByteCodeUse->Count() == 0 && propertySymUse == NULL); } #endif OptHoistInvariant(instr, block, loop, dstVal, src1Val, src2Val, isNotTypeSpecConv, lossy, bailoutKind); return true; } return false; } InvariantBlockBackwardIterator::InvariantBlockBackwardIterator( GlobOpt *const globOpt, BasicBlock *const exclusiveBeginBlock, BasicBlock *const inclusiveEndBlock, StackSym *const invariantSym, const ValueNumber invariantSymValueNumber) : globOpt(globOpt), exclusiveEndBlock(inclusiveEndBlock->prev), invariantSym(invariantSym), invariantSymValueNumber(invariantSymValueNumber), block(exclusiveBeginBlock) #if DBG , inclusiveEndBlock(inclusiveEndBlock) #endif { Assert(exclusiveBeginBlock); Assert(inclusiveEndBlock); Assert(!inclusiveEndBlock->isDeleted); Assert(exclusiveBeginBlock != inclusiveEndBlock); Assert(!invariantSym == (invariantSymValueNumber == InvalidValueNumber)); MoveNext(); } bool InvariantBlockBackwardIterator::IsValid() const { return block != exclusiveEndBlock; } void InvariantBlockBackwardIterator::MoveNext() { Assert(IsValid()); while(true) { #if DBG BasicBlock *const previouslyIteratedBlock = block; #endif block = block->prev; if(!IsValid()) { Assert(previouslyIteratedBlock == inclusiveEndBlock); break; } if(block->isDeleted) { continue; } if(!block->globOptData.HasData()) { // This block's info has already been merged with all of its successors continue; } if(!invariantSym) { break; } invariantSymValue = block->globOptData.FindValue(invariantSym); if(!invariantSymValue || invariantSymValue->GetValueNumber() != invariantSymValueNumber) { // BailOnNoProfile and throw blocks are not moved outside loops. A sym table cleanup on these paths may delete the // values. Field hoisting also has some odd cases where the hoisted stack sym is invariant in only part of the loop. continue; } break; } } BasicBlock * InvariantBlockBackwardIterator::Block() const { Assert(IsValid()); return block; } Value * InvariantBlockBackwardIterator::InvariantSymValue() const { Assert(IsValid()); Assert(invariantSym); return invariantSymValue; } void GlobOpt::HoistInvariantValueInfo( ValueInfo *const invariantValueInfoToHoist, Value *const valueToUpdate, BasicBlock *const targetBlock) { Assert(invariantValueInfoToHoist); Assert(valueToUpdate); Assert(targetBlock); // Why are we trying to change the value type of the type sym value? Asserting here to make sure we don't deep copy the type sym's value info. Assert(!invariantValueInfoToHoist->IsJsType()); Sym *const symStore = valueToUpdate->GetValueInfo()->GetSymStore(); ValueInfo *newValueInfo; if(invariantValueInfoToHoist->GetSymStore() == symStore) { newValueInfo = invariantValueInfoToHoist; } else { newValueInfo = invariantValueInfoToHoist->Copy(alloc); this->SetSymStoreDirect(newValueInfo, symStore); } ChangeValueInfo(targetBlock, valueToUpdate, newValueInfo, true); } // static bool GlobOpt::DoInlineArgsOpt(Func const * func) { Func const * topFunc = func->GetTopFunc(); Assert(topFunc != func); bool doInlineArgsOpt = !PHASE_OFF(Js::InlineArgsOptPhase, topFunc) && !func->GetHasCalls() && !func->GetHasUnoptimizedArgumentsAccess() && func->m_canDoInlineArgsOpt; return doInlineArgsOpt; } bool GlobOpt::IsSwitchOptEnabled(Func const * func) { Assert(func->IsTopFunc()); return !PHASE_OFF(Js::SwitchOptPhase, func) && !func->IsSwitchOptDisabled() && func->DoGlobOpt(); } bool GlobOpt::IsSwitchOptEnabledForIntTypeSpec(Func const * func) { return IsSwitchOptEnabled(func) && !IsTypeSpecPhaseOff(func) && DoAggressiveIntTypeSpec(func); } bool GlobOpt::DoConstFold() const { return !PHASE_OFF(Js::ConstFoldPhase, func); } bool GlobOpt::IsTypeSpecPhaseOff(Func const *func) { return PHASE_OFF(Js::TypeSpecPhase, func) || func->IsJitInDebugMode() || !func->DoGlobOptsForGeneratorFunc(); } bool GlobOpt::DoTypeSpec() const { return doTypeSpec; } bool GlobOpt::DoAggressiveIntTypeSpec(Func const * func) { return !PHASE_OFF(Js::AggressiveIntTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && !func->IsAggressiveIntTypeSpecDisabled(); } bool GlobOpt::DoAggressiveIntTypeSpec() const { return doAggressiveIntTypeSpec; } bool GlobOpt::DoAggressiveMulIntTypeSpec() const { return doAggressiveMulIntTypeSpec; } bool GlobOpt::DoDivIntTypeSpec() const { return doDivIntTypeSpec; } // static bool GlobOpt::DoLossyIntTypeSpec(Func const * func) { return !PHASE_OFF(Js::LossyIntTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsLossyIntTypeSpecDisabled()); } bool GlobOpt::DoLossyIntTypeSpec() const { return doLossyIntTypeSpec; } // static bool GlobOpt::DoFloatTypeSpec(Func const * func) { return !PHASE_OFF(Js::FloatTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsFloatTypeSpecDisabled()) && AutoSystemInfo::Data.SSE2Available(); } bool GlobOpt::DoFloatTypeSpec() const { return doFloatTypeSpec; } bool GlobOpt::DoStringTypeSpec(Func const * func) { return !PHASE_OFF(Js::StringTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func); } // static bool GlobOpt::DoTypedArrayTypeSpec(Func const * func) { return !PHASE_OFF(Js::TypedArrayTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsTypedArrayTypeSpecDisabled(func->IsLoopBody())) #if defined(_M_IX86) && AutoSystemInfo::Data.SSE2Available() #endif ; } // static bool GlobOpt::DoNativeArrayTypeSpec(Func const * func) { return !PHASE_OFF(Js::NativeArrayPhase, func) && !IsTypeSpecPhaseOff(func) #if defined(_M_IX86) && AutoSystemInfo::Data.SSE2Available() #endif ; } bool GlobOpt::DoArrayCheckHoist(Func const * const func) { Assert(func->IsTopFunc()); return !PHASE_OFF(Js::ArrayCheckHoistPhase, func) && !func->IsArrayCheckHoistDisabled() && !func->IsJitInDebugMode() && // StElemI fast path is not allowed when in debug mode, so it cannot have bailout func->DoGlobOptsForGeneratorFunc(); } bool GlobOpt::DoArrayCheckHoist() const { return doArrayCheckHoist; } bool GlobOpt::DoArrayCheckHoist(const ValueType baseValueType, Loop* loop, IR::Instr const * const instr) const { if(!DoArrayCheckHoist() || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func))) { return false; } if(!baseValueType.IsLikelyArrayOrObjectWithArray() || (loop ? ImplicitCallFlagsAllowOpts(loop) : ImplicitCallFlagsAllowOpts(func))) { return true; } // The function or loop does not allow disabling implicit calls, which is required to eliminate redundant JS array checks #if DBG_DUMP if((((loop ? loop->GetImplicitCallFlags() : func->m_fg->implicitCallFlags) & ~Js::ImplicitCall_External) == 0) && Js::Configuration::Global.flags.Trace.IsEnabled(Js::HostOptPhase)) { Output::Print(_u("DoArrayCheckHoist disabled for JS arrays because of external: ")); func->DumpFullFunctionName(); Output::Print(_u("\n")); Output::Flush(); } #endif return false; } bool GlobOpt::DoArrayMissingValueCheckHoist(Func const * const func) { return DoArrayCheckHoist(func) && !PHASE_OFF(Js::ArrayMissingValueCheckHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsArrayMissingValueCheckHoistDisabled(func->IsLoopBody())); } bool GlobOpt::DoArrayMissingValueCheckHoist() const { return doArrayMissingValueCheckHoist; } bool GlobOpt::DoArraySegmentHoist(const ValueType baseValueType, Func const * const func) { Assert(baseValueType.IsLikelyAnyOptimizedArray()); if(!DoArrayCheckHoist(func) || PHASE_OFF(Js::ArraySegmentHoistPhase, func)) { return false; } if(!baseValueType.IsLikelyArrayOrObjectWithArray()) { return true; } return !PHASE_OFF(Js::JsArraySegmentHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsJsArraySegmentHoistDisabled(func->IsLoopBody())); } bool GlobOpt::DoArraySegmentHoist(const ValueType baseValueType) const { Assert(baseValueType.IsLikelyAnyOptimizedArray()); return baseValueType.IsLikelyArrayOrObjectWithArray() ? doJsArraySegmentHoist : doArraySegmentHoist; } bool GlobOpt::DoTypedArraySegmentLengthHoist(Loop *const loop) const { if(!DoArraySegmentHoist(ValueType::GetObject(ObjectType::Int32Array))) { return false; } if(loop ? ImplicitCallFlagsAllowOpts(loop) : ImplicitCallFlagsAllowOpts(func)) { return true; } // The function or loop does not allow disabling implicit calls, which is required to eliminate redundant typed array // segment length loads. #if DBG_DUMP if((((loop ? loop->GetImplicitCallFlags() : func->m_fg->implicitCallFlags) & ~Js::ImplicitCall_External) == 0) && Js::Configuration::Global.flags.Trace.IsEnabled(Js::HostOptPhase)) { Output::Print(_u("DoArraySegmentLengthHoist disabled for typed arrays because of external: ")); func->DumpFullFunctionName(); Output::Print(_u("\n")); Output::Flush(); } #endif return false; } bool GlobOpt::DoArrayLengthHoist(Func const * const func) { return DoArrayCheckHoist(func) && !PHASE_OFF(Js::Phase::ArrayLengthHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsArrayLengthHoistDisabled(func->IsLoopBody())); } bool GlobOpt::DoArrayLengthHoist() const { return doArrayLengthHoist; } bool GlobOpt::DoEliminateArrayAccessHelperCall(Func *const func) { return DoArrayCheckHoist(func); } bool GlobOpt::DoEliminateArrayAccessHelperCall() const { return doEliminateArrayAccessHelperCall; } bool GlobOpt::DoLdLenIntSpec(IR::Instr * const instr, const ValueType baseValueType) { Assert(!instr || instr->m_opcode == Js::OpCode::LdLen_A); Assert(!instr || instr->GetDst()); Assert(!instr || instr->GetSrc1()); if(PHASE_OFF(Js::LdLenIntSpecPhase, func) || IsTypeSpecPhaseOff(func) || (func->HasProfileInfo() && func->GetReadOnlyProfileInfo()->IsLdLenIntSpecDisabled()) || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func))) { return false; } if(instr && instr->IsProfiledInstr() && ( !instr->AsProfiledInstr()->u.ldElemInfo->GetElementType().IsLikelyInt() || instr->GetDst()->AsRegOpnd()->m_sym->m_isNotInt )) { return false; } Assert(!instr || baseValueType == instr->GetSrc1()->GetValueType()); return baseValueType.HasBeenString() || (baseValueType.IsLikelyAnyOptimizedArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray); } bool GlobOpt::DoPathDependentValues() const { return !PHASE_OFF(Js::Phase::PathDependentValuesPhase, func); } bool GlobOpt::DoTrackRelativeIntBounds() const { return doTrackRelativeIntBounds; } bool GlobOpt::DoBoundCheckElimination() const { return doBoundCheckElimination; } bool GlobOpt::DoBoundCheckHoist() const { return doBoundCheckHoist; } bool GlobOpt::DoLoopCountBasedBoundCheckHoist() const { return doLoopCountBasedBoundCheckHoist; } bool GlobOpt::DoPowIntIntTypeSpec() const { return doPowIntIntTypeSpec; } bool GlobOpt::DoTagChecks() const { return doTagChecks; } bool GlobOpt::TrackArgumentsObject() { if (PHASE_OFF(Js::StackArgOptPhase, this->func)) { this->CannotAllocateArgumentsObjectOnStack(); return false; } return func->GetHasStackArgs(); } void GlobOpt::CannotAllocateArgumentsObjectOnStack() { func->SetHasStackArgs(false); #ifdef ENABLE_DEBUG_CONFIG_OPTIONS if (PHASE_TESTTRACE(Js::StackArgOptPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print(_u("Stack args disabled for function %s(%s)\n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer)); Output::Flush(); } #endif } IR::Instr * GlobOpt::PreOptPeep(IR::Instr *instr) { if (OpCodeAttr::HasDeadFallThrough(instr->m_opcode)) { switch (instr->m_opcode) { case Js::OpCode::BailOnNoProfile: { // Handle BailOnNoProfile if (instr->HasBailOutInfo()) { if (!this->prePassLoop) { FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo()); } // Already processed. return instr; } // Convert to bailout instr IR::Instr *nextBytecodeOffsetInstr = instr->GetNextRealInstrOrLabel(); while(nextBytecodeOffsetInstr->GetByteCodeOffset() == Js::Constants::NoByteCodeOffset) { nextBytecodeOffsetInstr = nextBytecodeOffsetInstr->GetNextRealInstrOrLabel(); Assert(!nextBytecodeOffsetInstr->IsLabelInstr()); } instr = instr->ConvertToBailOutInstr(nextBytecodeOffsetInstr, IR::BailOutOnNoProfile); instr->ClearByteCodeOffset(); instr->SetByteCodeOffset(nextBytecodeOffsetInstr); if (!this->currentBlock->loop) { FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo()); } else { Assert(this->prePassLoop); } break; } case Js::OpCode::BailOnException: { Assert( ( this->func->HasTry() && this->func->DoOptimizeTry() && instr->m_prev->m_opcode == Js::OpCode::Catch && instr->m_prev->m_prev->IsLabelInstr() && instr->m_prev->m_prev->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeCatch ) || ( this->func->HasFinally() && this->func->DoOptimizeTry() && instr->m_prev->AsLabelInstr() && instr->m_prev->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeFinally ) ); break; } case Js::OpCode::BailOnEarlyExit: { Assert(this->func->HasFinally() && this->func->DoOptimizeTry()); break; } default: { if(this->currentBlock->loop && !this->IsLoopPrePass()) { return instr; } break; } } RemoveCodeAfterNoFallthroughInstr(instr); } return instr; } void GlobOpt::RemoveCodeAfterNoFallthroughInstr(IR::Instr *instr) { if (instr != this->currentBlock->GetLastInstr()) { // Remove dead code after bailout IR::Instr *instrDead = instr->m_next; IR::Instr *instrNext; for (; instrDead != this->currentBlock->GetLastInstr(); instrDead = instrNext) { instrNext = instrDead->m_next; if (instrNext->m_opcode == Js::OpCode::FunctionExit) { break; } this->func->m_fg->RemoveInstr(instrDead, this); } IR::Instr *instrNextBlock = instrDead->m_next; this->func->m_fg->RemoveInstr(instrDead, this); this->currentBlock->SetLastInstr(instrNextBlock->m_prev); } // Cleanup dead successors FOREACH_SUCCESSOR_BLOCK_EDITING(deadBlock, this->currentBlock, iter) { this->currentBlock->RemoveDeadSucc(deadBlock, this->func->m_fg); if (this->currentBlock->GetDataUseCount() > 0) { this->currentBlock->DecrementDataUseCount(); } } NEXT_SUCCESSOR_BLOCK_EDITING; } void GlobOpt::ProcessTryHandler(IR::Instr* instr) { Assert(instr->m_next->IsLabelInstr() && instr->m_next->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeTry); Region* tryRegion = instr->m_next->AsLabelInstr()->GetRegion(); BVSparse<JitArenaAllocator> * writeThroughSymbolsSet = tryRegion->writeThroughSymbolsSet; ToVar(writeThroughSymbolsSet, this->currentBlock); } bool GlobOpt::ProcessExceptionHandlingEdges(IR::Instr* instr) { Assert(instr->m_opcode == Js::OpCode::BrOnException || instr->m_opcode == Js::OpCode::BrOnNoException); if (instr->m_opcode == Js::OpCode::BrOnException) { if (instr->AsBranchInstr()->GetTarget()->GetRegion()->GetType() == RegionType::RegionTypeCatch) { // BrOnException was added to model flow from try region to the catch region to assist // the backward pass in propagating bytecode upward exposed info from the catch block // to the try, and to handle break blocks. Removing it here as it has served its purpose // and keeping it around might also have unintended effects while merging block data for // the catch block's predecessors. // Note that the Deadstore pass will still be able to propagate bytecode upward exposed info // because it doesn't skip dead blocks for that. this->RemoveFlowEdgeToCatchBlock(instr); this->currentBlock->RemoveInstr(instr); return true; } else { // We add BrOnException from a finally region to early exit, remove that since it has served its purpose return this->RemoveFlowEdgeToFinallyOnExceptionBlock(instr); } } else if (instr->m_opcode == Js::OpCode::BrOnNoException) { if (instr->AsBranchInstr()->GetTarget()->GetRegion()->GetType() == RegionType::RegionTypeCatch) { this->RemoveFlowEdgeToCatchBlock(instr); } else { this->RemoveFlowEdgeToFinallyOnExceptionBlock(instr); } } return false; } void GlobOpt::InsertToVarAtDefInTryRegion(IR::Instr * instr, IR::Opnd * dstOpnd) { if ((this->currentRegion->GetType() == RegionTypeTry || this->currentRegion->GetType() == RegionTypeFinally) && dstOpnd->IsRegOpnd() && dstOpnd->AsRegOpnd()->m_sym->HasByteCodeRegSlot()) { StackSym * sym = dstOpnd->AsRegOpnd()->m_sym; if (sym->IsVar()) { return; } StackSym * varSym = sym->GetVarEquivSym(nullptr); if ((this->currentRegion->GetType() == RegionTypeTry && this->currentRegion->writeThroughSymbolsSet->Test(varSym->m_id)) || ((this->currentRegion->GetType() == RegionTypeFinally && this->currentRegion->GetMatchingTryRegion()->writeThroughSymbolsSet->Test(varSym->m_id)))) { IR::RegOpnd * regOpnd = IR::RegOpnd::New(varSym, IRType::TyVar, instr->m_func); this->ToVar(instr->m_next, regOpnd, this->currentBlock, NULL, false); } } } void GlobOpt::RemoveFlowEdgeToCatchBlock(IR::Instr * instr) { Assert(instr->IsBranchInstr()); BasicBlock * catchBlock = nullptr; BasicBlock * predBlock = nullptr; if (instr->m_opcode == Js::OpCode::BrOnException) { catchBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock(); predBlock = this->currentBlock; } else { Assert(instr->m_opcode == Js::OpCode::BrOnNoException); IR::Instr * nextInstr = instr->GetNextRealInstrOrLabel(); Assert(nextInstr->IsLabelInstr()); IR::LabelInstr * nextLabel = nextInstr->AsLabelInstr(); if (nextLabel->GetRegion() && nextLabel->GetRegion()->GetType() == RegionTypeCatch) { catchBlock = nextLabel->GetBasicBlock(); predBlock = this->currentBlock; } else { Assert(nextLabel->m_next->IsBranchInstr() && nextLabel->m_next->AsBranchInstr()->IsUnconditional()); BasicBlock * nextBlock = nextLabel->GetBasicBlock(); IR::BranchInstr * branchToCatchBlock = nextLabel->m_next->AsBranchInstr(); IR::LabelInstr * catchBlockLabel = branchToCatchBlock->GetTarget(); Assert(catchBlockLabel->GetRegion()->GetType() == RegionTypeCatch); catchBlock = catchBlockLabel->GetBasicBlock(); predBlock = nextBlock; } } Assert(catchBlock); Assert(predBlock); if (this->func->m_fg->FindEdge(predBlock, catchBlock)) { predBlock->RemoveDeadSucc(catchBlock, this->func->m_fg); if (predBlock == this->currentBlock) { predBlock->DecrementDataUseCount(); } } } bool GlobOpt::RemoveFlowEdgeToFinallyOnExceptionBlock(IR::Instr * instr) { Assert(instr->IsBranchInstr()); if (instr->m_opcode == Js::OpCode::BrOnNoException && instr->AsBranchInstr()->m_brFinallyToEarlyExit) { // We add edge from finally to early exit block // We should not remove this edge // If a loop has continue, and we add edge in finally to continue // Break block removal can move all continues inside the loop to branch to the continue added within finally // If we get rid of this edge, then loop may loose all backedges // Ideally, doing tail duplication before globopt would enable us to remove these edges, but since we do it after globopt, keep it this way for now // See test1() in core/test/tryfinallytests.js return false; } BasicBlock * finallyBlock = nullptr; BasicBlock * predBlock = nullptr; if (instr->m_opcode == Js::OpCode::BrOnException) { finallyBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock(); predBlock = this->currentBlock; } else { Assert(instr->m_opcode == Js::OpCode::BrOnNoException); IR::Instr * nextInstr = instr->GetNextRealInstrOrLabel(); Assert(nextInstr->IsLabelInstr()); IR::LabelInstr * nextLabel = nextInstr->AsLabelInstr(); if (nextLabel->GetRegion() && nextLabel->GetRegion()->GetType() == RegionTypeFinally) { finallyBlock = nextLabel->GetBasicBlock(); predBlock = this->currentBlock; } else { if (!(nextLabel->m_next->IsBranchInstr() && nextLabel->m_next->AsBranchInstr()->IsUnconditional())) { return false; } BasicBlock * nextBlock = nextLabel->GetBasicBlock(); IR::BranchInstr * branchTofinallyBlockOrEarlyExit = nextLabel->m_next->AsBranchInstr(); IR::LabelInstr * finallyBlockLabelOrEarlyExitLabel = branchTofinallyBlockOrEarlyExit->GetTarget(); finallyBlock = finallyBlockLabelOrEarlyExitLabel->GetBasicBlock(); predBlock = nextBlock; } } Assert(finallyBlock && predBlock); if (this->func->m_fg->FindEdge(predBlock, finallyBlock)) { predBlock->RemoveDeadSucc(finallyBlock, this->func->m_fg); if (instr->m_opcode == Js::OpCode::BrOnException) { this->currentBlock->RemoveInstr(instr); } if (finallyBlock->GetFirstInstr()->AsLabelInstr()->IsUnreferenced()) { // Traverse predBlocks of finallyBlock, if any of the preds have a different region, set m_hasNonBranchRef to true // If not, this label can get eliminated and an incorrect region from the predecessor can get propagated in lowered code // See test3() in tryfinallytests.js Region * finallyRegion = finallyBlock->GetFirstInstr()->AsLabelInstr()->GetRegion(); FOREACH_PREDECESSOR_BLOCK(pred, finallyBlock) { Region * predRegion = pred->GetFirstInstr()->AsLabelInstr()->GetRegion(); if (predRegion != finallyRegion) { finallyBlock->GetFirstInstr()->AsLabelInstr()->m_hasNonBranchRef = true; } } NEXT_PREDECESSOR_BLOCK; } if (predBlock == this->currentBlock) { predBlock->DecrementDataUseCount(); } } return true; } IR::Instr * GlobOpt::OptPeep(IR::Instr *instr, Value *src1Val, Value *src2Val) { IR::Opnd *dst, *src1, *src2; if (this->IsLoopPrePass()) { return instr; } switch (instr->m_opcode) { case Js::OpCode::DeadBrEqual: case Js::OpCode::DeadBrRelational: case Js::OpCode::DeadBrSrEqual: src1 = instr->GetSrc1(); src2 = instr->GetSrc2(); // These branches were turned into dead branches because they were unnecessary (branch to next, ...). // The DeadBr are necessary in case the evaluation of the sources have side-effects. // If we know for sure the srcs are primitive or have been type specialized, we don't need these instructions if (((src1Val && src1Val->GetValueInfo()->IsPrimitive()) || (src1->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src1->AsRegOpnd()->m_sym))) && ((src2Val && src2Val->GetValueInfo()->IsPrimitive()) || (src2->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src2->AsRegOpnd()->m_sym)))) { this->CaptureByteCodeSymUses(instr); instr->m_opcode = Js::OpCode::Nop; } break; case Js::OpCode::DeadBrOnHasProperty: src1 = instr->GetSrc1(); if (((src1Val && src1Val->GetValueInfo()->IsPrimitive()) || (src1->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src1->AsRegOpnd()->m_sym)))) { this->CaptureByteCodeSymUses(instr); instr->m_opcode = Js::OpCode::Nop; } break; case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: src1 = instr->GetSrc1(); dst = instr->GetDst(); if (dst->IsRegOpnd() && dst->IsEqual(src1)) { dst = instr->UnlinkDst(); if (!dst->GetIsJITOptimizedReg()) { IR::ByteCodeUsesInstr *bytecodeUse = IR::ByteCodeUsesInstr::New(instr); bytecodeUse->SetDst(dst); instr->InsertAfter(bytecodeUse); } instr->FreeSrc1(); instr->m_opcode = Js::OpCode::Nop; } break; } return instr; } void GlobOpt::OptimizeIndirUses(IR::IndirOpnd *indirOpnd, IR::Instr * *pInstr, Value **indirIndexValRef) { IR::Instr * &instr = *pInstr; Assert(!indirIndexValRef || !*indirIndexValRef); // Update value types and copy-prop the base OptSrc(indirOpnd->GetBaseOpnd(), &instr, nullptr, indirOpnd); IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); if (!indexOpnd) { return; } // Update value types and copy-prop the index Value *indexVal = OptSrc(indexOpnd, &instr, nullptr, indirOpnd); if(indirIndexValRef) { *indirIndexValRef = indexVal; } } bool GlobOpt::IsPREInstrCandidateLoad(Js::OpCode opcode) { switch (opcode) { case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdRootFld: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: return true; } return false; } bool GlobOpt::IsPREInstrCandidateStore(Js::OpCode opcode) { switch (opcode) { case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StSlot: return true; } return false; } bool GlobOpt::ImplicitCallFlagsAllowOpts(Loop *loop) { return loop->GetImplicitCallFlags() != Js::ImplicitCall_HasNoInfo && (((loop->GetImplicitCallFlags() & ~Js::ImplicitCall_Accessor) | Js::ImplicitCall_None) == Js::ImplicitCall_None); } bool GlobOpt::ImplicitCallFlagsAllowOpts(Func const *func) { return func->m_fg->implicitCallFlags != Js::ImplicitCall_HasNoInfo && (((func->m_fg->implicitCallFlags & ~Js::ImplicitCall_Accessor) | Js::ImplicitCall_None) == Js::ImplicitCall_None); } #if DBG_DUMP void GlobOpt::Dump() const { this->DumpSymToValueMap(); } void GlobOpt::DumpSymToValueMap(BasicBlock const * block) const { Output::Print(_u("\n*** SymToValueMap ***\n")); block->globOptData.DumpSymToValueMap(); } void GlobOpt::DumpSymToValueMap() const { DumpSymToValueMap(this->currentBlock); } void GlobOpt::DumpSymVal(int index) { SymID id = index; extern Func *CurrentFunc; Sym *sym = this->func->m_symTable->Find(id); AssertMsg(sym, "Sym not found!!!"); Output::Print(_u("Sym: ")); sym->Dump(); Output::Print(_u("\t\tValueNumber: ")); Value * pValue = CurrentBlockData()->FindValueFromMapDirect(sym->m_id); pValue->Dump(); Output::Print(_u("\n")); } void GlobOpt::Trace(BasicBlock * block, bool before) const { bool globOptTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::GlobOptPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool typeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::TypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool floatTypeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FloatTypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool fieldHoistTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldHoistPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool fieldCopyPropTrace = fieldHoistTrace || Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool objTypeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::ObjTypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool valueTableTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::ValueTablePhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool fieldPRETrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldPREPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool anyTrace = globOptTrace || typeSpecTrace || floatTypeSpecTrace || fieldCopyPropTrace || fieldHoistTrace || objTypeSpecTrace || valueTableTrace || fieldPRETrace; if (!anyTrace) { return; } if (fieldPRETrace && this->IsLoopPrePass()) { if (block->isLoopHeader && before) { Output::Print(_u("==== Loop Prepass block header #%-3d, Visiting Loop block head #%-3d\n"), this->prePassLoop->GetHeadBlock()->GetBlockNum(), block->GetBlockNum()); } } if (!typeSpecTrace && !floatTypeSpecTrace && !valueTableTrace && !Js::Configuration::Global.flags.Verbose) { return; } if (before) { Output::Print(_u("========================================================================\n")); Output::Print(_u("Begin OptBlock: Block #%-3d"), block->GetBlockNum()); if (block->loop) { Output::Print(_u(" Loop block header:%-3d currentLoop block head:%-3d %s"), block->loop->GetHeadBlock()->GetBlockNum(), this->prePassLoop ? this->prePassLoop->GetHeadBlock()->GetBlockNum() : 0, this->IsLoopPrePass() ? _u("PrePass") : _u("")); } Output::Print(_u("\n")); } else { Output::Print(_u("-----------------------------------------------------------------------\n")); Output::Print(_u("After OptBlock: Block #%-3d\n"), block->GetBlockNum()); } if ((typeSpecTrace || floatTypeSpecTrace) && !block->globOptData.liveVarSyms->IsEmpty()) { Output::Print(_u(" Live var syms: ")); block->globOptData.liveVarSyms->Dump(); } if (typeSpecTrace && !block->globOptData.liveInt32Syms->IsEmpty()) { Assert(this->tempBv->IsEmpty()); this->tempBv->Minus(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms); if(!this->tempBv->IsEmpty()) { Output::Print(_u(" Int32 type specialized (lossless) syms: ")); this->tempBv->Dump(); } this->tempBv->ClearAll(); if(!block->globOptData.liveLossyInt32Syms->IsEmpty()) { Output::Print(_u(" Int32 converted (lossy) syms: ")); block->globOptData.liveLossyInt32Syms->Dump(); } } if (floatTypeSpecTrace && !block->globOptData.liveFloat64Syms->IsEmpty()) { Output::Print(_u(" Float64 type specialized syms: ")); block->globOptData.liveFloat64Syms->Dump(); } if ((fieldCopyPropTrace || objTypeSpecTrace) && this->DoFieldCopyProp(block->loop) && !block->globOptData.liveFields->IsEmpty()) { Output::Print(_u(" Live field syms: ")); block->globOptData.liveFields->Dump(); } if ((fieldHoistTrace || objTypeSpecTrace) && this->DoFieldHoisting(block->loop) && HasHoistableFields(block)) { Output::Print(_u(" Hoistable field sym: ")); block->globOptData.hoistableFields->Dump(); } if (objTypeSpecTrace || valueTableTrace) { Output::Print(_u(" Value table:\n")); block->globOptData.DumpSymToValueMap(); } if (before) { Output::Print(_u("-----------------------------------------------------------------------\n")); \ } Output::Flush(); } void GlobOpt::TraceSettings() const { Output::Print(_u("GlobOpt Settings:\r\n")); Output::Print(_u(" FloatTypeSpec: %s\r\n"), this->DoFloatTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" AggressiveIntTypeSpec: %s\r\n"), this->DoAggressiveIntTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" LossyIntTypeSpec: %s\r\n"), this->DoLossyIntTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" ArrayCheckHoist: %s\r\n"), this->func->IsArrayCheckHoistDisabled() ? _u("disabled") : _u("enabled")); Output::Print(_u(" ImplicitCallFlags: %s\r\n"), Js::DynamicProfileInfo::GetImplicitCallFlagsString(this->func->m_fg->implicitCallFlags)); for (Loop * loop = this->func->m_fg->loopList; loop != NULL; loop = loop->next) { Output::Print(_u(" loop: %d, ImplicitCallFlags: %s\r\n"), loop->GetLoopNumber(), Js::DynamicProfileInfo::GetImplicitCallFlagsString(loop->GetImplicitCallFlags())); } Output::Flush(); } #endif // DBG_DUMP IR::Instr * GlobOpt::TrackMarkTempObject(IR::Instr * instrStart, IR::Instr * instrLast) { if (!this->func->GetHasMarkTempObjects()) { return instrLast; } IR::Instr * instr = instrStart; IR::Instr * instrEnd = instrLast->m_next; IR::Instr * lastInstr = nullptr; GlobOptBlockData& globOptData = *CurrentBlockData(); do { bool mayNeedBailOnImplicitCallsPreOp = !this->IsLoopPrePass() && instr->HasAnyImplicitCalls() && globOptData.maybeTempObjectSyms != nullptr; if (mayNeedBailOnImplicitCallsPreOp) { IR::Opnd * src1 = instr->GetSrc1(); if (src1) { instr = GenerateBailOutMarkTempObjectIfNeeded(instr, src1, false); IR::Opnd * src2 = instr->GetSrc2(); if (src2) { instr = GenerateBailOutMarkTempObjectIfNeeded(instr, src2, false); } } } IR::Opnd *dst = instr->GetDst(); if (dst) { if (dst->IsRegOpnd()) { TrackTempObjectSyms(instr, dst->AsRegOpnd()); } else if (mayNeedBailOnImplicitCallsPreOp) { instr = GenerateBailOutMarkTempObjectIfNeeded(instr, dst, true); } } lastInstr = instr; instr = instr->m_next; } while (instr != instrEnd); return lastInstr; } void GlobOpt::TrackTempObjectSyms(IR::Instr * instr, IR::RegOpnd * opnd) { // If it is marked as dstIsTempObject, we should have mark temped it, or type specialized it to Ld_I4. Assert(!instr->dstIsTempObject || ObjectTempVerify::CanMarkTemp(instr, nullptr)); GlobOptBlockData& globOptData = *CurrentBlockData(); bool canStoreTemp = false; bool maybeTemp = false; if (OpCodeAttr::TempObjectProducing(instr->m_opcode)) { maybeTemp = instr->dstIsTempObject; // We have to make sure that lower will always generate code to do stack allocation // before we can store any other stack instance onto it. Otherwise, we would not // walk object to box the stack property. canStoreTemp = instr->dstIsTempObject && ObjectTemp::CanStoreTemp(instr); } else if (OpCodeAttr::TempObjectTransfer(instr->m_opcode)) { // Need to check both sources, GetNewScObject has two srcs for transfer. // No need to get var equiv sym here as transfer of type spec value does not transfer a mark temp object. maybeTemp = globOptData.maybeTempObjectSyms && ( (instr->GetSrc1()->IsRegOpnd() && globOptData.maybeTempObjectSyms->Test(instr->GetSrc1()->AsRegOpnd()->m_sym->m_id)) || (instr->GetSrc2() && instr->GetSrc2()->IsRegOpnd() && globOptData.maybeTempObjectSyms->Test(instr->GetSrc2()->AsRegOpnd()->m_sym->m_id))); canStoreTemp = globOptData.canStoreTempObjectSyms && ( (instr->GetSrc1()->IsRegOpnd() && globOptData.canStoreTempObjectSyms->Test(instr->GetSrc1()->AsRegOpnd()->m_sym->m_id)) && (!instr->GetSrc2() || (instr->GetSrc2()->IsRegOpnd() && globOptData.canStoreTempObjectSyms->Test(instr->GetSrc2()->AsRegOpnd()->m_sym->m_id)))); AssertOrFailFast(!canStoreTemp || instr->dstIsTempObject); AssertOrFailFast(!maybeTemp || instr->dstIsTempObject); } // Need to get the var equiv sym as assignment of type specialized sym kill the var sym value anyway. StackSym * sym = opnd->m_sym; if (!sym->IsVar()) { sym = sym->GetVarEquivSym(nullptr); if (sym == nullptr) { return; } } SymID symId = sym->m_id; if (maybeTemp) { // Only var sym should be temp objects Assert(opnd->m_sym == sym); if (globOptData.maybeTempObjectSyms == nullptr) { globOptData.maybeTempObjectSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); } globOptData.maybeTempObjectSyms->Set(symId); if (canStoreTemp) { if (instr->m_opcode == Js::OpCode::NewScObjectLiteral && !this->IsLoopPrePass()) { // For object literal, we install the final type up front. // If there are bailout before we finish initializing all the fields, we need to // zero out the rest if we stack allocate the literal, so that the boxing would not // try to box trash pointer in the properties. // Although object Literal initialization can be done lexically, BailOnNoProfile may cause some path // to disappear. Do it is flow base make it easier to stop propagate those entries. IR::IntConstOpnd * propertyArrayIdOpnd = instr->GetSrc1()->AsIntConstOpnd(); const Js::PropertyIdArray * propIds = instr->m_func->GetJITFunctionBody()->ReadPropertyIdArrayFromAuxData(propertyArrayIdOpnd->AsUint32()); // Duplicates are removed by parser Assert(!propIds->hadDuplicates); if (globOptData.stackLiteralInitFldDataMap == nullptr) { globOptData.stackLiteralInitFldDataMap = JitAnew(alloc, StackLiteralInitFldDataMap, alloc); } else { Assert(!globOptData.stackLiteralInitFldDataMap->ContainsKey(sym)); } StackLiteralInitFldData data = { propIds, 0}; globOptData.stackLiteralInitFldDataMap->AddNew(sym, data); } if (globOptData.canStoreTempObjectSyms == nullptr) { globOptData.canStoreTempObjectSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); } globOptData.canStoreTempObjectSyms->Set(symId); } else if (globOptData.canStoreTempObjectSyms) { globOptData.canStoreTempObjectSyms->Clear(symId); } } else { Assert(!canStoreTemp); if (globOptData.maybeTempObjectSyms) { if (globOptData.canStoreTempObjectSyms) { globOptData.canStoreTempObjectSyms->Clear(symId); } globOptData.maybeTempObjectSyms->Clear(symId); } else { Assert(!globOptData.canStoreTempObjectSyms); } // The symbol is being assigned to, the sym shouldn't still be in the stackLiteralInitFldDataMap Assert(this->IsLoopPrePass() || globOptData.stackLiteralInitFldDataMap == nullptr || globOptData.stackLiteralInitFldDataMap->Count() == 0 || !globOptData.stackLiteralInitFldDataMap->ContainsKey(sym)); } } IR::Instr * GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opnd, bool isDst) { Assert(opnd); Assert(isDst == (opnd == instr->GetDst())); Assert(opnd != instr->GetDst() || !opnd->IsRegOpnd()); Assert(!this->IsLoopPrePass()); Assert(instr->HasAnyImplicitCalls()); // Only dst reg opnd opcode or ArgOut_A should have dstIsTempObject marked Assert(!isDst || !instr->dstIsTempObject || instr->m_opcode == Js::OpCode::ArgOut_A); // Post-op implicit call shouldn't have installed yet Assert(!instr->HasBailOutInfo() || (instr->GetBailOutKind() & IR::BailOutKindBits) != IR::BailOutOnImplicitCalls); GlobOptBlockData& globOptData = *CurrentBlockData(); Assert(globOptData.maybeTempObjectSyms != nullptr); IR::PropertySymOpnd * propertySymOpnd = nullptr; StackSym * stackSym = ObjectTemp::GetStackSym(opnd, &propertySymOpnd); // It is okay to not get the var equiv sym here, as use of a type specialized sym is not use of the temp object // so no need to add mark temp bailout. // TempObjectSysm doesn't contain any type spec sym, so we will get false here for all type spec sym. if (stackSym && globOptData.maybeTempObjectSyms->Test(stackSym->m_id)) { if (instr->HasBailOutInfo()) { instr->SetBailOutKind(instr->GetBailOutKind() | IR::BailOutMarkTempObject); } else { // On insert the pre op bailout if it is not Direct field access do nothing, don't check the dst yet. // SetTypeCheckBailout will clear this out if it is direct field access. if (isDst || (instr->m_opcode == Js::OpCode::FromVar && !opnd->GetValueType().IsPrimitive()) || propertySymOpnd == nullptr || !propertySymOpnd->IsTypeCheckProtected()) { this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject); } } if (!opnd->IsRegOpnd() && (!isDst || (globOptData.canStoreTempObjectSyms && globOptData.canStoreTempObjectSyms->Test(stackSym->m_id)))) { // If this opnd is a dst, that means that the object pointer is a stack object, // and we can store temp object/number on it. // If the opnd is a src, that means that the object pointer may be a stack object // so the load may be a temp object/number and we need to track its use. // Don't mark start of indir as can store temp, because we don't actually know // what it is assigning to. if (!isDst || !opnd->IsIndirOpnd()) { opnd->SetCanStoreTemp(); } if (propertySymOpnd) { // Track initfld of stack literals if (isDst && instr->m_opcode == Js::OpCode::InitFld) { const Js::PropertyId propertyId = propertySymOpnd->m_sym->AsPropertySym()->m_propertyId; // We don't need to track numeric properties init if (!this->func->GetThreadContextInfo()->IsNumericProperty(propertyId)) { DebugOnly(bool found = false); globOptData.stackLiteralInitFldDataMap->RemoveIf(stackSym, [&](StackSym * key, StackLiteralInitFldData & data) { DebugOnly(found = true); Assert(key == stackSym); Assert(data.currentInitFldCount < data.propIds->count); if (data.propIds->elements[data.currentInitFldCount] != propertyId) { #if DBG bool duplicate = false; for (uint i = 0; i < data.currentInitFldCount; i++) { if (data.propIds->elements[i] == propertyId) { duplicate = true; break; } } Assert(duplicate); #endif // duplicate initialization return false; } bool finished = (++data.currentInitFldCount == data.propIds->count); #if DBG if (finished) { // We can still track the finished stack literal InitFld lexically. this->finishedStackLiteralInitFld->Set(stackSym->m_id); } #endif return finished; }); // We might still see InitFld even we have finished with all the property Id because // of duplicate entries at the end Assert(found || finishedStackLiteralInitFld->Test(stackSym->m_id)); } } } } } return instr; } LoopCount * GlobOpt::GetOrGenerateLoopCountForMemOp(Loop *loop) { LoopCount *loopCount = loop->loopCount; if (loopCount && !loopCount->HasGeneratedLoopCountSym()) { Assert(loop->bailOutInfo); EnsureBailTarget(loop); GenerateLoopCountPlusOne(loop, loopCount); } return loopCount; } IR::Opnd * GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::Instr *insertBeforeInstr) { LoopCount *loopCount = loop->loopCount; IR::Opnd *sizeOpnd = nullptr; Assert(loopCount); Assert(loop->memOpInfo->inductionVariableOpndPerUnrollMap); if (loop->memOpInfo->inductionVariableOpndPerUnrollMap->TryGetValue(unroll, &sizeOpnd)) { return sizeOpnd; } Func *localFunc = loop->GetFunc(); const auto InsertInstr = [&](IR::Instr *instr) { if (insertBeforeInstr == nullptr) { loop->landingPad->InsertAfter(instr); } else { insertBeforeInstr->InsertBefore(instr); } }; if (loopCount->LoopCountMinusOneSym()) { IRType type = loopCount->LoopCountSym()->GetType(); // Loop count is off by one, so add one IR::RegOpnd *loopCountOpnd = IR::RegOpnd::New(loopCount->LoopCountSym(), type, localFunc); sizeOpnd = loopCountOpnd; if (unroll != 1) { sizeOpnd = IR::RegOpnd::New(TyUint32, this->func); IR::Opnd *unrollOpnd = IR::IntConstOpnd::New(unroll, type, localFunc); InsertInstr(IR::Instr::New(Js::OpCode::Mul_I4, sizeOpnd, loopCountOpnd, unrollOpnd, localFunc)); } } else { uint size = (loopCount->LoopCountMinusOneConstantValue() + 1) * unroll; sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc); } loop->memOpInfo->inductionVariableOpndPerUnrollMap->Add(unroll, sizeOpnd); return sizeOpnd; } IR::RegOpnd* GlobOpt::GenerateStartIndexOpndForMemop(Loop *loop, IR::Opnd *indexOpnd, IR::Opnd *sizeOpnd, bool isInductionVariableChangeIncremental, bool bIndexAlreadyChanged, IR::Instr *insertBeforeInstr) { IR::RegOpnd *startIndexOpnd = nullptr; Func *localFunc = loop->GetFunc(); IRType type = indexOpnd->GetType(); const int cacheIndex = ((int)isInductionVariableChangeIncremental << 1) | (int)bIndexAlreadyChanged; if (loop->memOpInfo->startIndexOpndCache[cacheIndex]) { return loop->memOpInfo->startIndexOpndCache[cacheIndex]; } const auto InsertInstr = [&](IR::Instr *instr) { if (insertBeforeInstr == nullptr) { loop->landingPad->InsertAfter(instr); } else { insertBeforeInstr->InsertBefore(instr); } }; startIndexOpnd = IR::RegOpnd::New(type, localFunc); // If the 2 are different we can simply use indexOpnd if (isInductionVariableChangeIncremental != bIndexAlreadyChanged) { InsertInstr(IR::Instr::New(Js::OpCode::Ld_A, startIndexOpnd, indexOpnd, localFunc)); } else { // Otherwise add 1 to it InsertInstr(IR::Instr::New(Js::OpCode::Add_I4, startIndexOpnd, indexOpnd, IR::IntConstOpnd::New(1, type, localFunc, true), localFunc)); } if (!isInductionVariableChangeIncremental) { InsertInstr(IR::Instr::New(Js::OpCode::Sub_I4, startIndexOpnd, startIndexOpnd, sizeOpnd, localFunc)); } loop->memOpInfo->startIndexOpndCache[cacheIndex] = startIndexOpnd; return startIndexOpnd; } IR::Instr* GlobOpt::FindUpperBoundsCheckInstr(IR::Instr* fromInstr) { IR::Instr *upperBoundCheck = fromInstr; do { upperBoundCheck = upperBoundCheck->m_prev; Assert(upperBoundCheck); Assert(!upperBoundCheck->IsLabelInstr()); } while (upperBoundCheck->m_opcode != Js::OpCode::BoundCheck); return upperBoundCheck; } IR::Instr* GlobOpt::FindArraySegmentLoadInstr(IR::Instr* fromInstr) { IR::Instr *headSegmentLengthLoad = fromInstr; do { headSegmentLengthLoad = headSegmentLengthLoad->m_prev; Assert(headSegmentLengthLoad); Assert(!headSegmentLengthLoad->IsLabelInstr()); } while (headSegmentLengthLoad->m_opcode != Js::OpCode::LdIndir); return headSegmentLengthLoad; } void GlobOpt::RemoveMemOpSrcInstr(IR::Instr* memopInstr, IR::Instr* srcInstr, BasicBlock* block) { Assert(srcInstr && (srcInstr->m_opcode == Js::OpCode::LdElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A_Strict)); Assert(memopInstr && (memopInstr->m_opcode == Js::OpCode::Memcopy || memopInstr->m_opcode == Js::OpCode::Memset)); Assert(block); const bool isDst = srcInstr->m_opcode == Js::OpCode::StElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A_Strict; IR::RegOpnd* opnd = (isDst ? memopInstr->GetDst() : memopInstr->GetSrc1())->AsIndirOpnd()->GetBaseOpnd(); IR::ArrayRegOpnd* arrayOpnd = opnd->IsArrayRegOpnd() ? opnd->AsArrayRegOpnd() : nullptr; IR::Instr* topInstr = srcInstr; if (srcInstr->extractedUpperBoundCheckWithoutHoisting) { IR::Instr *upperBoundCheck = FindUpperBoundsCheckInstr(srcInstr); Assert(upperBoundCheck && upperBoundCheck != srcInstr); topInstr = upperBoundCheck; } if (srcInstr->loadedArrayHeadSegmentLength && arrayOpnd && arrayOpnd->HeadSegmentLengthSym()) { IR::Instr *arrayLoadSegmentHeadLength = FindArraySegmentLoadInstr(topInstr); Assert(arrayLoadSegmentHeadLength); topInstr = arrayLoadSegmentHeadLength; arrayOpnd->RemoveHeadSegmentLengthSym(); } if (srcInstr->loadedArrayHeadSegment && arrayOpnd && arrayOpnd->HeadSegmentSym()) { IR::Instr *arrayLoadSegmentHead = FindArraySegmentLoadInstr(topInstr); Assert(arrayLoadSegmentHead); topInstr = arrayLoadSegmentHead; arrayOpnd->RemoveHeadSegmentSym(); } // If no bounds check are present, simply look up for instruction added for instrumentation if(topInstr == srcInstr) { bool checkPrev = true; while (checkPrev) { switch (topInstr->m_prev->m_opcode) { case Js::OpCode::BailOnNotArray: case Js::OpCode::NoImplicitCallUses: case Js::OpCode::ByteCodeUses: topInstr = topInstr->m_prev; checkPrev = !!topInstr->m_prev; break; default: checkPrev = false; break; } } } while (topInstr != srcInstr) { IR::Instr* removeInstr = topInstr; topInstr = topInstr->m_next; Assert( removeInstr->m_opcode == Js::OpCode::BailOnNotArray || removeInstr->m_opcode == Js::OpCode::NoImplicitCallUses || removeInstr->m_opcode == Js::OpCode::ByteCodeUses || removeInstr->m_opcode == Js::OpCode::LdIndir || removeInstr->m_opcode == Js::OpCode::BoundCheck ); if (removeInstr->m_opcode != Js::OpCode::ByteCodeUses) { block->RemoveInstr(removeInstr); } } this->ConvertToByteCodeUses(srcInstr); } void GlobOpt::GetMemOpSrcInfo(Loop* loop, IR::Instr* instr, IR::RegOpnd*& base, IR::RegOpnd*& index, IRType& arrayType) { Assert(instr && (instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict)); IR::Opnd* arrayOpnd = instr->m_opcode == Js::OpCode::LdElemI_A ? instr->GetSrc1() : instr->GetDst(); Assert(arrayOpnd->IsIndirOpnd()); IR::IndirOpnd* indirArrayOpnd = arrayOpnd->AsIndirOpnd(); IR::RegOpnd* baseOpnd = (IR::RegOpnd*)indirArrayOpnd->GetBaseOpnd(); IR::RegOpnd* indexOpnd = (IR::RegOpnd*)indirArrayOpnd->GetIndexOpnd(); Assert(baseOpnd); Assert(indexOpnd); // Process Out Params base = baseOpnd; index = indexOpnd; arrayType = indirArrayOpnd->GetType(); } void GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitData) { Assert(emitData); Assert(emitData->candidate); Assert(emitData->stElemInstr); Assert(emitData->stElemInstr->m_opcode == Js::OpCode::StElemI_A || emitData->stElemInstr->m_opcode == Js::OpCode::StElemI_A_Strict); IR::BailOutKind bailOutKind = emitData->bailOutKind; const byte unroll = emitData->inductionVar.unroll; Assert(unroll == 1); const bool isInductionVariableChangeIncremental = emitData->inductionVar.isIncremental; const bool bIndexAlreadyChanged = emitData->candidate->bIndexAlreadyChanged; IR::RegOpnd *baseOpnd = nullptr; IR::RegOpnd *indexOpnd = nullptr; IRType dstType; GetMemOpSrcInfo(loop, emitData->stElemInstr, baseOpnd, indexOpnd, dstType); Func *localFunc = loop->GetFunc(); // Handle bailout info EnsureBailTarget(loop); Assert(bailOutKind != IR::BailOutInvalid); // Keep only Array bits bailOuts. Consider handling these bailouts instead of simply ignoring them bailOutKind &= IR::BailOutForArrayBits; // Add our custom bailout to handle Op_MemCopy return value. bailOutKind |= IR::BailOutOnMemOpError; BailOutInfo *const bailOutInfo = loop->bailOutInfo; Assert(bailOutInfo); IR::Instr *insertBeforeInstr = bailOutInfo->bailOutInstr; Assert(insertBeforeInstr); IR::Opnd *sizeOpnd = GenerateInductionVariableChangeForMemOp(loop, unroll, insertBeforeInstr); IR::RegOpnd *startIndexOpnd = GenerateStartIndexOpndForMemop(loop, indexOpnd, sizeOpnd, isInductionVariableChangeIncremental, bIndexAlreadyChanged, insertBeforeInstr); IR::IndirOpnd* dstOpnd = IR::IndirOpnd::New(baseOpnd, startIndexOpnd, dstType, localFunc); IR::Opnd *src1; const bool isMemset = emitData->candidate->IsMemSet(); // Get the source according to the memop type if (isMemset) { MemSetEmitData* data = (MemSetEmitData*)emitData; const Loop::MemSetCandidate* candidate = data->candidate->AsMemSet(); if (candidate->srcSym) { IR::RegOpnd* regSrc = IR::RegOpnd::New(candidate->srcSym, candidate->srcSym->GetType(), func); regSrc->SetIsJITOptimizedReg(true); src1 = regSrc; } else { src1 = IR::AddrOpnd::New(candidate->constant.ToVar(localFunc), IR::AddrOpndKindConstantAddress, localFunc); } } else { Assert(emitData->candidate->IsMemCopy()); MemCopyEmitData* data = (MemCopyEmitData*)emitData; Assert(data->ldElemInstr); Assert(data->ldElemInstr->m_opcode == Js::OpCode::LdElemI_A); IR::RegOpnd *srcBaseOpnd = nullptr; IR::RegOpnd *srcIndexOpnd = nullptr; IRType srcType; GetMemOpSrcInfo(loop, data->ldElemInstr, srcBaseOpnd, srcIndexOpnd, srcType); Assert(GetVarSymID(srcIndexOpnd->GetStackSym()) == GetVarSymID(indexOpnd->GetStackSym())); src1 = IR::IndirOpnd::New(srcBaseOpnd, startIndexOpnd, srcType, localFunc); } // Generate memcopy IR::Instr* memopInstr = IR::BailOutInstr::New(isMemset ? Js::OpCode::Memset : Js::OpCode::Memcopy, bailOutKind, bailOutInfo, localFunc); memopInstr->SetDst(dstOpnd); memopInstr->SetSrc1(src1); memopInstr->SetSrc2(sizeOpnd); insertBeforeInstr->InsertBefore(memopInstr); #if DBG_DUMP if (DO_MEMOP_TRACE()) { char valueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseOpnd->GetValueType().ToString(valueTypeStr); const int loopCountBufSize = 16; char16 loopCountBuf[loopCountBufSize]; if (loopCount->LoopCountMinusOneSym()) { swprintf_s(loopCountBuf, _u("s%u"), loopCount->LoopCountMinusOneSym()->m_id); } else { swprintf_s(loopCountBuf, _u("%u"), loopCount->LoopCountMinusOneConstantValue() + 1); } if (isMemset) { const Loop::MemSetCandidate* candidate = emitData->candidate->AsMemSet(); const int constBufSize = 32; char16 constBuf[constBufSize]; if (candidate->srcSym) { swprintf_s(constBuf, _u("s%u"), candidate->srcSym->m_id); } else { switch (candidate->constant.type) { case TyInt8: case TyInt16: case TyInt32: case TyInt64: swprintf_s(constBuf, sizeof(IntConstType) == 8 ? _u("%lld") : _u("%d"), candidate->constant.u.intConst.value); break; case TyFloat32: case TyFloat64: swprintf_s(constBuf, _u("%.4f"), candidate->constant.u.floatConst.value); break; case TyVar: swprintf_s(constBuf, sizeof(Js::Var) == 8 ? _u("0x%.16llX") : _u("0x%.8X"), candidate->constant.u.varConst.value); break; default: AssertMsg(false, "Unsupported constant type"); swprintf_s(constBuf, _u("Unknown")); break; } } TRACE_MEMOP_PHASE(MemSet, loop, emitData->stElemInstr, _u("ValueType: %S, Base: s%u, Index: s%u, Constant: %s, LoopCount: %s, IsIndexChangedBeforeUse: %d"), valueTypeStr, candidate->base, candidate->index, constBuf, loopCountBuf, bIndexAlreadyChanged); } else { const Loop::MemCopyCandidate* candidate = emitData->candidate->AsMemCopy(); TRACE_MEMOP_PHASE(MemCopy, loop, emitData->stElemInstr, _u("ValueType: %S, StBase: s%u, Index: s%u, LdBase: s%u, LoopCount: %s, IsIndexChangedBeforeUse: %d"), valueTypeStr, candidate->base, candidate->index, candidate->ldBase, loopCountBuf, bIndexAlreadyChanged); } } #endif RemoveMemOpSrcInstr(memopInstr, emitData->stElemInstr, emitData->block); if (!isMemset) { RemoveMemOpSrcInstr(memopInstr, ((MemCopyEmitData*)emitData)->ldElemInstr, emitData->block); } } bool GlobOpt::InspectInstrForMemSetCandidate(Loop* loop, IR::Instr* instr, MemSetEmitData* emitData, bool& errorInInstr) { Assert(emitData && emitData->candidate && emitData->candidate->IsMemSet()); Loop::MemSetCandidate* candidate = (Loop::MemSetCandidate*)emitData->candidate; if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) { if (instr->GetDst()->IsIndirOpnd() && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->base) && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index) ) { Assert(instr->IsProfiledInstr()); emitData->stElemInstr = instr; emitData->bailOutKind = instr->GetBailOutKind(); return true; } TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Orphan StElemI_A detected")); errorInInstr = true; } else if (instr->m_opcode == Js::OpCode::LdElemI_A) { TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Orphan LdElemI_A detected")); errorInInstr = true; } return false; } bool GlobOpt::InspectInstrForMemCopyCandidate(Loop* loop, IR::Instr* instr, MemCopyEmitData* emitData, bool& errorInInstr) { Assert(emitData && emitData->candidate && emitData->candidate->IsMemCopy()); Loop::MemCopyCandidate* candidate = (Loop::MemCopyCandidate*)emitData->candidate; if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) { if ( instr->GetDst()->IsIndirOpnd() && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->base) && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index) ) { Assert(instr->IsProfiledInstr()); emitData->stElemInstr = instr; emitData->bailOutKind = instr->GetBailOutKind(); // Still need to find the LdElem return false; } TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Orphan StElemI_A detected")); errorInInstr = true; } else if (instr->m_opcode == Js::OpCode::LdElemI_A) { if ( emitData->stElemInstr && instr->GetSrc1()->IsIndirOpnd() && (GetVarSymID(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->ldBase) && (GetVarSymID(instr->GetSrc1()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index) ) { Assert(instr->IsProfiledInstr()); emitData->ldElemInstr = instr; ValueType stValueType = emitData->stElemInstr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType(); ValueType ldValueType = emitData->ldElemInstr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType(); if (stValueType != ldValueType) { #if DBG_DUMP char16 stValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; stValueType.ToString(stValueTypeStr); char16 ldValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; ldValueType.ToString(ldValueTypeStr); TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("for mismatch in Load(%s) and Store(%s) value type"), ldValueTypeStr, stValueTypeStr); #endif errorInInstr = true; return false; } // We found both instruction for this candidate return true; } TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Orphan LdElemI_A detected")); errorInInstr = true; } return false; } // The caller is responsible to free the memory allocated between inOrderEmitData[iEmitData -> end] bool GlobOpt::ValidateMemOpCandidates(Loop * loop, _Out_writes_(iEmitData) MemOpEmitData** inOrderEmitData, int& iEmitData) { AnalysisAssert(iEmitData == (int)loop->memOpInfo->candidates->Count()); // We iterate over the second block of the loop only. MemOp Works only if the loop has exactly 2 blocks Assert(loop->blockList.HasTwo()); Loop::MemOpList::Iterator iter(loop->memOpInfo->candidates); BasicBlock* bblock = loop->blockList.Head()->next; Loop::MemOpCandidate* candidate = nullptr; MemOpEmitData* emitData = nullptr; // Iterate backward because the list of candidate is reversed FOREACH_INSTR_BACKWARD_IN_BLOCK(instr, bblock) { if (!candidate) { // Time to check next candidate if (!iter.Next()) { // We have been through the whole list of candidates, finish break; } candidate = iter.Data(); if (!candidate) { continue; } // Common check for memset and memcopy Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 }; // Get the inductionVariable changeInfo if (!loop->memOpInfo->inductionVariableChangeInfoMap->TryGetValue(candidate->index, &inductionVariableChangeInfo)) { TRACE_MEMOP_VERBOSE(loop, nullptr, _u("MemOp skipped (s%d): no induction variable"), candidate->base); return false; } if (inductionVariableChangeInfo.unroll != candidate->count) { TRACE_MEMOP_VERBOSE(loop, nullptr, _u("MemOp skipped (s%d): not matching unroll count"), candidate->base); return false; } if (candidate->IsMemSet()) { Assert(!PHASE_OFF(Js::MemSetPhase, this->func)); emitData = JitAnew(this->alloc, MemSetEmitData); } else { Assert(!PHASE_OFF(Js::MemCopyPhase, this->func)); // Specific check for memcopy Assert(candidate->IsMemCopy()); Loop::MemCopyCandidate* memcopyCandidate = candidate->AsMemCopy(); if (memcopyCandidate->base == Js::Constants::InvalidSymID || memcopyCandidate->ldBase == Js::Constants::InvalidSymID || (memcopyCandidate->ldCount != memcopyCandidate->count)) { TRACE_MEMOP_PHASE(MemCopy, loop, nullptr, _u("(s%d): not matching ldElem and stElem"), candidate->base); return false; } emitData = JitAnew(this->alloc, MemCopyEmitData); } Assert(emitData); emitData->block = bblock; emitData->inductionVar = inductionVariableChangeInfo; emitData->candidate = candidate; } bool errorInInstr = false; bool candidateFound = candidate->IsMemSet() ? InspectInstrForMemSetCandidate(loop, instr, (MemSetEmitData*)emitData, errorInInstr) : InspectInstrForMemCopyCandidate(loop, instr, (MemCopyEmitData*)emitData, errorInInstr); if (errorInInstr) { JitAdelete(this->alloc, emitData); return false; } if (candidateFound) { AnalysisAssert(iEmitData > 0); if (iEmitData == 0) { // Explicit for OACR break; } inOrderEmitData[--iEmitData] = emitData; candidate = nullptr; emitData = nullptr; } } NEXT_INSTR_BACKWARD_IN_BLOCK; if (iter.IsValid()) { TRACE_MEMOP(loop, nullptr, _u("Candidates not found in loop while validating")); return false; } return true; } void GlobOpt::ProcessMemOp() { FOREACH_LOOP_IN_FUNC_EDITING(loop, this->func) { if (HasMemOp(loop)) { const int candidateCount = loop->memOpInfo->candidates->Count(); Assert(candidateCount > 0); LoopCount * loopCount = GetOrGenerateLoopCountForMemOp(loop); // If loopCount is not available we can not continue with memop if (!loopCount || !(loopCount->LoopCountMinusOneSym() || loopCount->LoopCountMinusOneConstantValue())) { TRACE_MEMOP(loop, nullptr, _u("MemOp skipped for no loop count")); loop->doMemOp = false; loop->memOpInfo->candidates->Clear(); continue; } // The list is reversed, check them and place them in order in the following array MemOpEmitData** inOrderCandidates = JitAnewArray(this->alloc, MemOpEmitData*, candidateCount); int i = candidateCount; if (ValidateMemOpCandidates(loop, inOrderCandidates, i)) { Assert(i == 0); // Process the valid MemOp candidate in order. for (; i < candidateCount; ++i) { // Emit EmitMemop(loop, loopCount, inOrderCandidates[i]); JitAdelete(this->alloc, inOrderCandidates[i]); } } else { Assert(i != 0); for (; i < candidateCount; ++i) { JitAdelete(this->alloc, inOrderCandidates[i]); } // One of the memop candidates did not validate. Do not emit for this loop. loop->doMemOp = false; loop->memOpInfo->candidates->Clear(); } // Free memory JitAdeleteArray(this->alloc, candidateCount, inOrderCandidates); } } NEXT_LOOP_EDITING; }
38.806877
297
0.569813
makepaddev
8b16ca446e51ee7b1c7677d5ce84c53424deeaf6
754
cpp
C++
libs/pika/memory/tests/unit/ip_convertible.cpp
msimberg/pika
f86bc232bca88900dabd931de429f2d1cd3f4cc1
[ "BSL-1.0" ]
null
null
null
libs/pika/memory/tests/unit/ip_convertible.cpp
msimberg/pika
f86bc232bca88900dabd931de429f2d1cd3f4cc1
[ "BSL-1.0" ]
null
null
null
libs/pika/memory/tests/unit/ip_convertible.cpp
msimberg/pika
f86bc232bca88900dabd931de429f2d1cd3f4cc1
[ "BSL-1.0" ]
null
null
null
// wp_convertible_test.cpp // // Copyright (c) 2008 Peter Dimov // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt #include <pika/local/config.hpp> #include <pika/modules/memory.hpp> #include <pika/modules/testing.hpp> // struct W { }; void intrusive_ptr_add_ref(W*) {} void intrusive_ptr_release(W*) {} struct X : public virtual W { }; struct Y : public virtual W { }; struct Z : public X { }; int f(pika::intrusive_ptr<X>) { return 1; } int f(pika::intrusive_ptr<Y>) { return 2; } int main() { PIKA_TEST_EQ(1, f(pika::intrusive_ptr<Z>())); return pika::util::report_errors(); }
15.08
62
0.675066
msimberg
8b17dcbab2c43a33a89896af9b51f62f9dfd1e46
1,022
cpp
C++
Codeforces/CPP/School Team Contest #2 (Winter Computer School 2010/E.cpp
riship99/codeWith-hacktoberfest
f16fa9dc9a2af0009dea3dea3220e3eaa43d3d2b
[ "MIT" ]
29
2020-10-03T17:41:46.000Z
2021-10-04T17:59:22.000Z
Codeforces/CPP/School Team Contest #2 (Winter Computer School 2010/E.cpp
riship99/codeWith-hacktoberfest
f16fa9dc9a2af0009dea3dea3220e3eaa43d3d2b
[ "MIT" ]
117
2020-10-03T15:39:39.000Z
2021-10-06T08:21:37.000Z
Codeforces/CPP/School Team Contest #2 (Winter Computer School 2010/E.cpp
riship99/codeWith-hacktoberfest
f16fa9dc9a2af0009dea3dea3220e3eaa43d3d2b
[ "MIT" ]
160
2020-10-03T15:39:23.000Z
2021-10-13T09:07:05.000Z
// Tags: DP #include <bits/stdc++.h> #define ll long long #define IO ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0) using namespace std; const int N = 205; int k, a, b, dp[N][N]; string s; int solve(int i, int l) { if(i == s.size()) return l == k; if(l == k) return 0; if(dp[i][l] != -1) return dp[i][l]; dp[i][l] = 0; for(int j = a; j <= b; j++) if(i + j <= s.size()) dp[i][l] |= solve(i + j, l + 1); return dp[i][l]; } void build(int i, int l) { if(i == s.size()) return; if(l == k) return; for(int j = a; j <= b; j++) { if(i + j > s.size()) break; if(!solve(i + j, l + 1)) continue; cout << s.substr(i, j) << '\n'; build(i + j, l + 1); return; } } int main() { IO; memset(dp, -1, sizeof dp); cin >> k >> a >> b >> s; if(solve(0, 0)) build(0, 0); else cout << "No solution"; }
15.253731
62
0.410959
riship99
8b1906f6d20d17a6e22c455e25756175c8d62dd1
1,321
hpp
C++
boost/boost/fusion/view/nview/detail/distance_impl.hpp
randolphwong/mcsema
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
[ "BSD-3-Clause" ]
71
2015-01-17T00:29:44.000Z
2021-02-09T02:59:16.000Z
boost/boost/fusion/view/nview/detail/distance_impl.hpp
randolphwong/mcsema
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
[ "BSD-3-Clause" ]
38
2015-07-22T07:35:45.000Z
2019-03-14T16:03:06.000Z
boost/boost/fusion/view/nview/detail/distance_impl.hpp
randolphwong/mcsema
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
[ "BSD-3-Clause" ]
44
2015-03-18T09:20:37.000Z
2021-12-21T08:09:17.000Z
/*============================================================================= Copyright (c) 2009 Hartmut Kaiser Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #if !defined(BOOST_FUSION_NVIEW_DISTANCE_IMPL_SEP_23_2009_0328PM) #define BOOST_FUSION_NVIEW_DISTANCE_IMPL_SEP_23_2009_0328PM #include <boost/fusion/iterator/distance.hpp> namespace boost { namespace fusion { struct nview_iterator_tag; namespace extension { template<typename Tag> struct distance_impl; template<> struct distance_impl<nview_iterator_tag> { template<typename First, typename Last> struct apply : result_of::distance<typename First::first_type, typename Last::first_type> { typedef typename result_of::distance< typename First::first_type, typename Last::first_type >::type type; static type call(First const& /*first*/, Last const& /*last*/) { return type(); } }; }; } }} #endif
29.355556
90
0.530659
randolphwong
8b19920335abcfa8c4c1746aef97e948f21828ec
1,000
cpp
C++
Flick/src/Flick/Renderer/Buffer.cpp
firo1738/FLICK
a6ccb0f23c212d0f1b97f71520beb3a89be57f2d
[ "Apache-2.0" ]
null
null
null
Flick/src/Flick/Renderer/Buffer.cpp
firo1738/FLICK
a6ccb0f23c212d0f1b97f71520beb3a89be57f2d
[ "Apache-2.0" ]
null
null
null
Flick/src/Flick/Renderer/Buffer.cpp
firo1738/FLICK
a6ccb0f23c212d0f1b97f71520beb3a89be57f2d
[ "Apache-2.0" ]
null
null
null
#include "fipch.h" #include "Buffer.h" #include "Renderer.h" #include "Platform/OpenGL/OpenGLBuffer.h" namespace Flick { ///////////////////Index Buffer/////////////////// VertexBuffer* VertexBuffer::Create(float* verticies, uint32_t size) { switch (Renderer::GetAPI()) { case RendererAPI::API::None: FI_CORE_ASSERT(false, "RendererAPi::None is not yet supported by Flick!"); return nullptr; case RendererAPI::API::OpenGL: return new OpenGLVertexBuffer(verticies, size); } FI_CORE_ASSERT(false, "Unknown RendererAPI!"); return nullptr; } ///////////////////Index Buffer/////////////////// IndexBuffer* IndexBuffer::Create(uint32_t* indicies, uint32_t count) { switch (Renderer::GetAPI()) { case RendererAPI::API::None: FI_CORE_ASSERT(false, "RendererAPi::None is not yet supported by Flick!"); return nullptr; case RendererAPI::API::OpenGL: return new OpenGLIndexBuffer(indicies, count); } FI_CORE_ASSERT(false, "Unknown RendererAPI!"); return nullptr; } }
27.027027
121
0.679
firo1738
8b1999ede9d43fe1e1f00f24b5de42b0c9718145
65,474
cc
C++
components/password_manager/core/browser/login_database_unittest.cc
maidiHaitai/haitaibrowser
a232a56bcfb177913a14210e7733e0ea83a6b18d
[ "BSD-3-Clause" ]
1
2020-09-15T08:43:34.000Z
2020-09-15T08:43:34.000Z
components/password_manager/core/browser/login_database_unittest.cc
maidiHaitai/haitaibrowser
a232a56bcfb177913a14210e7733e0ea83a6b18d
[ "BSD-3-Clause" ]
null
null
null
components/password_manager/core/browser/login_database_unittest.cc
maidiHaitai/haitaibrowser
a232a56bcfb177913a14210e7733e0ea83a6b18d
[ "BSD-3-Clause" ]
null
null
null
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/password_manager/core/browser/login_database.h" #include <stddef.h> #include <stdint.h> #include <memory> #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" #include "base/memory/scoped_vector.h" #include "base/path_service.h" #include "base/strings/string_number_conversions.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "base/test/histogram_tester.h" #include "base/time/time.h" #include "build/build_config.h" #include "components/autofill/core/common/password_form.h" #include "components/password_manager/core/browser/psl_matching_helper.h" #include "sql/connection.h" #include "sql/statement.h" #include "sql/test/test_helpers.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "url/origin.h" #if defined(OS_MACOSX) #include "components/os_crypt/os_crypt.h" #endif using autofill::PasswordForm; using base::ASCIIToUTF16; using ::testing::Eq; namespace password_manager { namespace { PasswordStoreChangeList AddChangeForForm(const PasswordForm& form) { return PasswordStoreChangeList( 1, PasswordStoreChange(PasswordStoreChange::ADD, form)); } PasswordStoreChangeList UpdateChangeForForm(const PasswordForm& form) { return PasswordStoreChangeList( 1, PasswordStoreChange(PasswordStoreChange::UPDATE, form)); } void GenerateExamplePasswordForm(PasswordForm* form) { form->origin = GURL("http://accounts.google.com/LoginAuth"); form->action = GURL("http://accounts.google.com/Login"); form->username_element = ASCIIToUTF16("Email"); form->username_value = ASCIIToUTF16("test@gmail.com"); form->password_element = ASCIIToUTF16("Passwd"); form->password_value = ASCIIToUTF16("test"); form->submit_element = ASCIIToUTF16("signIn"); form->signon_realm = "http://www.google.com/"; form->ssl_valid = false; form->preferred = false; form->scheme = PasswordForm::SCHEME_HTML; form->times_used = 1; form->form_data.name = ASCIIToUTF16("form_name"); form->date_synced = base::Time::Now(); form->display_name = ASCIIToUTF16("Mr. Smith"); form->icon_url = GURL("https://accounts.google.com/Icon"); form->federation_origin = url::Origin(GURL("https://accounts.google.com/")); form->skip_zero_click = true; } // Helper functions to read the value of the first column of an executed // statement if we know its type. You must implement a specialization for // every column type you use. template<class T> struct must_be_specialized { static const bool is_specialized = false; }; template<class T> T GetFirstColumn(const sql::Statement& s) { static_assert(must_be_specialized<T>::is_specialized, "Implement a specialization."); } template<> int64_t GetFirstColumn(const sql::Statement& s) { return s.ColumnInt64(0); }; template<> std::string GetFirstColumn(const sql::Statement& s) { return s.ColumnString(0); }; bool AddZeroClickableLogin(LoginDatabase* db, const std::string& unique_string) { // Example password form. PasswordForm form; form.origin = GURL("https://example.com/"); form.username_element = ASCIIToUTF16(unique_string); form.username_value = ASCIIToUTF16(unique_string); form.password_element = ASCIIToUTF16(unique_string); form.submit_element = ASCIIToUTF16("signIn"); form.signon_realm = form.origin.spec(); form.display_name = ASCIIToUTF16(unique_string); form.icon_url = GURL("https://example.com/"); form.federation_origin = url::Origin(GURL("https://example.com/")); form.date_created = base::Time::Now(); form.skip_zero_click = false; return db->AddLogin(form) == AddChangeForForm(form); } } // namespace // Serialization routines for vectors implemented in login_database.cc. base::Pickle SerializeVector(const std::vector<base::string16>& vec); std::vector<base::string16> DeserializeVector(const base::Pickle& pickle); class LoginDatabaseTest : public testing::Test { protected: void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); file_ = temp_dir_.path().AppendASCII("TestMetadataStoreMacDatabase"); #if defined(OS_MACOSX) OSCrypt::UseMockKeychain(true); #endif // defined(OS_MACOSX) db_.reset(new LoginDatabase(file_)); ASSERT_TRUE(db_->Init()); } LoginDatabase& db() { return *db_; } void TestNonHTMLFormPSLMatching(const PasswordForm::Scheme& scheme) { ScopedVector<autofill::PasswordForm> result; base::Time now = base::Time::Now(); // Simple non-html auth form. PasswordForm non_html_auth; non_html_auth.origin = GURL("http://example.com"); non_html_auth.username_value = ASCIIToUTF16("test@gmail.com"); non_html_auth.password_value = ASCIIToUTF16("test"); non_html_auth.signon_realm = "http://example.com/Realm"; non_html_auth.scheme = scheme; non_html_auth.date_created = now; // Simple password form. PasswordForm html_form(non_html_auth); html_form.action = GURL("http://example.com/login"); html_form.username_element = ASCIIToUTF16("username"); html_form.username_value = ASCIIToUTF16("test2@gmail.com"); html_form.password_element = ASCIIToUTF16("password"); html_form.submit_element = ASCIIToUTF16(""); html_form.signon_realm = "http://example.com/"; html_form.scheme = PasswordForm::SCHEME_HTML; html_form.date_created = now; // Add them and make sure they are there. EXPECT_EQ(AddChangeForForm(non_html_auth), db().AddLogin(non_html_auth)); EXPECT_EQ(AddChangeForForm(html_form), db().AddLogin(html_form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); PasswordForm second_non_html_auth(non_html_auth); second_non_html_auth.origin = GURL("http://second.example.com"); second_non_html_auth.signon_realm = "http://second.example.com/Realm"; // This shouldn't match anything. EXPECT_TRUE(db().GetLogins(second_non_html_auth, &result)); EXPECT_EQ(0U, result.size()); // non-html auth still matches against itself. EXPECT_TRUE(db().GetLogins(non_html_auth, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(result[0]->signon_realm, "http://example.com/Realm"); // Clear state. db().RemoveLoginsCreatedBetween(now, base::Time()); } // Checks that a form of a given |scheme|, once stored, can be successfully // retrieved from the database. void TestRetrievingIPAddress(const PasswordForm::Scheme& scheme) { SCOPED_TRACE(testing::Message() << "scheme = " << scheme); ScopedVector<autofill::PasswordForm> result; base::Time now = base::Time::Now(); std::string origin("http://56.7.8.90"); PasswordForm ip_form; ip_form.origin = GURL(origin); ip_form.username_value = ASCIIToUTF16("test@gmail.com"); ip_form.password_value = ASCIIToUTF16("test"); ip_form.signon_realm = origin; ip_form.scheme = scheme; ip_form.date_created = now; EXPECT_EQ(AddChangeForForm(ip_form), db().AddLogin(ip_form)); EXPECT_TRUE(db().GetLogins(ip_form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(result[0]->signon_realm, origin); // Clear state. db().RemoveLoginsCreatedBetween(now, base::Time()); } base::ScopedTempDir temp_dir_; base::FilePath file_; std::unique_ptr<LoginDatabase> db_; }; TEST_F(LoginDatabaseTest, Logins) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; GenerateExamplePasswordForm(&form); // Add it and make sure it is there and that all the fields were retrieved // correctly. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); // The example site changes... PasswordForm form2(form); form2.origin = GURL("http://www.google.com/new/accounts/LoginAuth"); form2.submit_element = ASCIIToUTF16("reallySignIn"); // Match against an inexact copy EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // Uh oh, the site changed origin & action URLs all at once! PasswordForm form3(form2); form3.action = GURL("http://www.google.com/new/accounts/Login"); // signon_realm is the same, should match. EXPECT_TRUE(db().GetLogins(form3, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // Imagine the site moves to a secure server for login. PasswordForm form4(form3); form4.signon_realm = "https://www.google.com/"; form4.ssl_valid = true; // We have only an http record, so no match for this. EXPECT_TRUE(db().GetLogins(form4, &result)); EXPECT_EQ(0U, result.size()); // Let's imagine the user logs into the secure site. EXPECT_EQ(AddChangeForForm(form4), db().AddLogin(form4)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // Now the match works EXPECT_TRUE(db().GetLogins(form4, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // The user chose to forget the original but not the new. EXPECT_TRUE(db().RemoveLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // The old form wont match the new site (http vs https). EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(0U, result.size()); // The user's request for the HTTPS site is intercepted // by an attacker who presents an invalid SSL cert. PasswordForm form5(form4); form5.ssl_valid = 0; // It will match in this case. EXPECT_TRUE(db().GetLogins(form5, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // User changes his password. PasswordForm form6(form5); form6.password_value = ASCIIToUTF16("test6"); form6.preferred = true; // We update, and check to make sure it matches the // old form, and there is only one record. EXPECT_EQ(UpdateChangeForForm(form6), db().UpdateLogin(form6)); // matches EXPECT_TRUE(db().GetLogins(form5, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // Only one record. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); // Password element was updated. EXPECT_EQ(form6.password_value, result[0]->password_value); // Preferred login. EXPECT_TRUE(form6.preferred); result.clear(); // Make sure everything can disappear. EXPECT_TRUE(db().RemoveLogin(form4)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatching) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("https://foo.com/"); form.action = GURL("https://foo.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://foo.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // We go to the mobile site. PasswordForm form2(form); form2.origin = GURL("https://mobile.foo.com/"); form2.action = GURL("https://mobile.foo.com/login"); form2.signon_realm = "https://mobile.foo.com/"; // Match against the mobile site. EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); EXPECT_EQ("https://foo.com/", result[0]->signon_realm); EXPECT_TRUE(result[0]->is_public_suffix_match); // Try to remove PSL matched form EXPECT_FALSE(db().RemoveLogin(*result[0])); result.clear(); // Ensure that the original form is still there EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); } TEST_F(LoginDatabaseTest, TestFederatedMatching) { ScopedVector<autofill::PasswordForm> result; // Example password form. PasswordForm form; form.origin = GURL("https://foo.com/"); form.action = GURL("https://foo.com/login"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_value = ASCIIToUTF16("test"); form.signon_realm = "https://foo.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // We go to the mobile site. PasswordForm form2(form); form2.origin = GURL("https://mobile.foo.com/"); form2.action = GURL("https://mobile.foo.com/login"); form2.signon_realm = "federation://mobile.foo.com/accounts.google.com"; form2.username_value = ASCIIToUTF16("test1@gmail.com"); form2.type = autofill::PasswordForm::TYPE_API; form2.federation_origin = url::Origin(GURL("https://accounts.google.com/")); // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_EQ(AddChangeForForm(form2), db().AddLogin(form2)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); // Match against desktop. PasswordForm form_request; form_request.origin = GURL("https://foo.com/"); form_request.signon_realm = "https://foo.com/"; form_request.scheme = PasswordForm::SCHEME_HTML; EXPECT_TRUE(db().GetLogins(form_request, &result)); EXPECT_THAT(result, testing::ElementsAre(testing::Pointee(form))); // Match against the mobile site. form_request.origin = GURL("https://mobile.foo.com/"); form_request.signon_realm = "https://mobile.foo.com/"; EXPECT_TRUE(db().GetLogins(form_request, &result)); form.is_public_suffix_match = true; EXPECT_THAT(result, testing::UnorderedElementsAre(testing::Pointee(form), testing::Pointee(form2))); } TEST_F(LoginDatabaseTest, TestPublicSuffixDisabledForNonHTMLForms) { TestNonHTMLFormPSLMatching(PasswordForm::SCHEME_BASIC); TestNonHTMLFormPSLMatching(PasswordForm::SCHEME_DIGEST); TestNonHTMLFormPSLMatching(PasswordForm::SCHEME_OTHER); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_HTML) { TestRetrievingIPAddress(PasswordForm::SCHEME_HTML); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_basic) { TestRetrievingIPAddress(PasswordForm::SCHEME_BASIC); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_digest) { TestRetrievingIPAddress(PasswordForm::SCHEME_DIGEST); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_other) { TestRetrievingIPAddress(PasswordForm::SCHEME_OTHER); } TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatchingShouldMatchingApply) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("https://accounts.google.com/"); form.action = GURL("https://accounts.google.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://accounts.google.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // We go to a different site on the same domain where feature is not needed. PasswordForm form2(form); form2.origin = GURL("https://some.other.google.com/"); form2.action = GURL("https://some.other.google.com/login"); form2.signon_realm = "https://some.other.google.com/"; // Match against the other site. Should not match since feature should not be // enabled for this domain. ASSERT_FALSE(ShouldPSLDomainMatchingApply( GetRegistryControlledDomain(GURL(form2.signon_realm)))); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, TestFederatedMatchingWithoutPSLMatching) { ScopedVector<autofill::PasswordForm> result; // Example password form. PasswordForm form; form.origin = GURL("https://accounts.google.com/"); form.action = GURL("https://accounts.google.com/login"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_value = ASCIIToUTF16("test"); form.signon_realm = "https://accounts.google.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // We go to a different site on the same domain where PSL is disabled. PasswordForm form2(form); form2.origin = GURL("https://some.other.google.com/"); form2.action = GURL("https://some.other.google.com/login"); form2.signon_realm = "federation://some.other.google.com/accounts.google.com"; form2.username_value = ASCIIToUTF16("test1@gmail.com"); form2.type = autofill::PasswordForm::TYPE_API; form2.federation_origin = url::Origin(GURL("https://accounts.google.com/")); // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_EQ(AddChangeForForm(form2), db().AddLogin(form2)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); // Match against the first one. PasswordForm form_request; form_request.origin = form.origin; form_request.signon_realm = form.signon_realm; form_request.scheme = PasswordForm::SCHEME_HTML; EXPECT_TRUE(db().GetLogins(form_request, &result)); EXPECT_THAT(result, testing::ElementsAre(testing::Pointee(form))); // Match against the second one. ASSERT_FALSE(ShouldPSLDomainMatchingApply( GetRegistryControlledDomain(GURL(form2.signon_realm)))); form_request.origin = form2.origin; form_request.signon_realm = form2.signon_realm; EXPECT_TRUE(db().GetLogins(form_request, &result)); form.is_public_suffix_match = true; EXPECT_THAT(result, testing::ElementsAre(testing::Pointee(form2))); } // This test fails if the implementation of GetLogins uses GetCachedStatement // instead of GetUniqueStatement, since REGEXP is in use. See // http://crbug.com/248608. TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatchingDifferentSites) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("https://foo.com/"); form.action = GURL("https://foo.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://foo.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // We go to the mobile site. PasswordForm form2(form); form2.origin = GURL("https://mobile.foo.com/"); form2.action = GURL("https://mobile.foo.com/login"); form2.signon_realm = "https://mobile.foo.com/"; // Match against the mobile site. EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); EXPECT_EQ("https://foo.com/", result[0]->signon_realm); EXPECT_TRUE(result[0]->is_public_suffix_match); result.clear(); // Add baz.com desktop site. form.origin = GURL("https://baz.com/login/"); form.action = GURL("https://baz.com/login/"); form.username_element = ASCIIToUTF16("email"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://baz.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // We go to the mobile site of baz.com. PasswordForm form3(form); form3.origin = GURL("https://m.baz.com/login/"); form3.action = GURL("https://m.baz.com/login/"); form3.signon_realm = "https://m.baz.com/"; // Match against the mobile site of baz.com. EXPECT_TRUE(db().GetLogins(form3, &result)); EXPECT_EQ(1U, result.size()); EXPECT_EQ("https://baz.com/", result[0]->signon_realm); EXPECT_TRUE(result[0]->is_public_suffix_match); result.clear(); } PasswordForm GetFormWithNewSignonRealm(PasswordForm form, std::string signon_realm) { PasswordForm form2(form); form2.origin = GURL(signon_realm); form2.action = GURL(signon_realm); form2.signon_realm = signon_realm; return form2; } TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatchingRegexp) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("http://foo.com/"); form.action = GURL("http://foo.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "http://foo.com/"; form.ssl_valid = false; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Example password form that has - in the domain name. PasswordForm form_dash = GetFormWithNewSignonRealm(form, "http://www.foo-bar.com/"); // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form_dash), db().AddLogin(form_dash)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // www.foo.com should match. PasswordForm form2 = GetFormWithNewSignonRealm(form, "http://www.foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a.b.foo.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a.b.foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a-b.foo.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a-b.foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // www.foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://www.foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a.b.foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a.b.foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a-b.foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a-b.foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // foo.com with port 1337 should not match. form2 = GetFormWithNewSignonRealm(form, "http://foo.com:1337/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // http://foo.com should not match since the scheme is wrong. form2 = GetFormWithNewSignonRealm(form, "https://foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // notfoo.com should not match. form2 = GetFormWithNewSignonRealm(form, "http://notfoo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // baz.com should not match. form2 = GetFormWithNewSignonRealm(form, "http://baz.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // foo-baz.com should not match. form2 = GetFormWithNewSignonRealm(form, "http://foo-baz.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); } static bool AddTimestampedLogin(LoginDatabase* db, std::string url, const std::string& unique_string, const base::Time& time, bool date_is_creation) { // Example password form. PasswordForm form; form.origin = GURL(url + std::string("/LoginAuth")); form.username_element = ASCIIToUTF16(unique_string); form.username_value = ASCIIToUTF16(unique_string); form.password_element = ASCIIToUTF16(unique_string); form.submit_element = ASCIIToUTF16("signIn"); form.signon_realm = url; form.display_name = ASCIIToUTF16(unique_string); form.icon_url = GURL("https://accounts.google.com/Icon"); form.federation_origin = url::Origin(GURL("https://accounts.google.com/")); form.skip_zero_click = true; if (date_is_creation) form.date_created = time; else form.date_synced = time; return db->AddLogin(form) == AddChangeForForm(form); } TEST_F(LoginDatabaseTest, ClearPrivateData_SavedPasswords) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); base::Time now = base::Time::Now(); base::TimeDelta one_day = base::TimeDelta::FromDays(1); // Create one with a 0 time. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://1.com", "foo1", base::Time(), true)); // Create one for now and +/- 1 day. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://2.com", "foo2", now - one_day, true)); EXPECT_TRUE(AddTimestampedLogin(&db(), "http://3.com", "foo3", now, true)); EXPECT_TRUE( AddTimestampedLogin(&db(), "http://4.com", "foo4", now + one_day, true)); // Verify inserts worked. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(4U, result.size()); result.clear(); // Get everything from today's date and on. EXPECT_TRUE(db().GetLoginsCreatedBetween(now, base::Time(), &result)); EXPECT_EQ(2U, result.size()); result.clear(); // Delete everything from today's date and on. db().RemoveLoginsCreatedBetween(now, base::Time()); // Should have deleted half of what we inserted. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // Delete with 0 date (should delete all). db().RemoveLoginsCreatedBetween(base::Time(), base::Time()); // Verify nothing is left. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, RemoveLoginsSyncedBetween) { ScopedVector<autofill::PasswordForm> result; base::Time now = base::Time::Now(); base::TimeDelta one_day = base::TimeDelta::FromDays(1); // Create one with a 0 time. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://1.com", "foo1", base::Time(), false)); // Create one for now and +/- 1 day. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://2.com", "foo2", now - one_day, false)); EXPECT_TRUE(AddTimestampedLogin(&db(), "http://3.com", "foo3", now, false)); EXPECT_TRUE( AddTimestampedLogin(&db(), "http://4.com", "foo4", now + one_day, false)); // Verify inserts worked. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(4U, result.size()); result.clear(); // Get everything from today's date and on. EXPECT_TRUE(db().GetLoginsSyncedBetween(now, base::Time(), &result)); ASSERT_EQ(2U, result.size()); EXPECT_EQ("http://3.com", result[0]->signon_realm); EXPECT_EQ("http://4.com", result[1]->signon_realm); result.clear(); // Delete everything from today's date and on. db().RemoveLoginsSyncedBetween(now, base::Time()); // Should have deleted half of what we inserted. EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(2U, result.size()); EXPECT_EQ("http://1.com", result[0]->signon_realm); EXPECT_EQ("http://2.com", result[1]->signon_realm); result.clear(); // Delete with 0 date (should delete all). db().RemoveLoginsSyncedBetween(base::Time(), now); // Verify nothing is left. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, GetAutoSignInLogins) { ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo1")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo2")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo3")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo4")); EXPECT_TRUE(db().GetAutoSignInLogins(&result)); EXPECT_EQ(4U, result.size()); for (const auto& form : result) EXPECT_FALSE(form->skip_zero_click); EXPECT_TRUE(db().DisableAutoSignInForAllLogins()); EXPECT_TRUE(db().GetAutoSignInLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, DisableAutoSignInForAllLogins) { ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo1")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo2")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo3")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo4")); EXPECT_TRUE(db().GetAutofillableLogins(&result)); for (const auto& form : result) EXPECT_FALSE(form->skip_zero_click); EXPECT_TRUE(db().DisableAutoSignInForAllLogins()); EXPECT_TRUE(db().GetAutofillableLogins(&result)); for (const auto& form : result) EXPECT_TRUE(form->skip_zero_click); } TEST_F(LoginDatabaseTest, BlacklistedLogins) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetBlacklistLogins(&result)); ASSERT_EQ(0U, result.size()); // Save a form as blacklisted. PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.action = GURL("http://accounts.google.com/Login"); form.username_element = ASCIIToUTF16("Email"); form.password_element = ASCIIToUTF16("Passwd"); form.submit_element = ASCIIToUTF16("signIn"); form.signon_realm = "http://www.google.com/"; form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = true; form.scheme = PasswordForm::SCHEME_HTML; form.date_synced = base::Time::Now(); form.display_name = ASCIIToUTF16("Mr. Smith"); form.icon_url = GURL("https://accounts.google.com/Icon"); form.federation_origin = url::Origin(GURL("https://accounts.google.com/")); form.skip_zero_click = true; EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); // Get all non-blacklisted logins (should be none). EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(0U, result.size()); // GetLogins should give the blacklisted result. EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); // So should GetAllBlacklistedLogins. EXPECT_TRUE(db().GetBlacklistLogins(&result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); } TEST_F(LoginDatabaseTest, VectorSerialization) { // Empty vector. std::vector<base::string16> vec; base::Pickle temp = SerializeVector(vec); std::vector<base::string16> output = DeserializeVector(temp); EXPECT_THAT(output, Eq(vec)); // Normal data. vec.push_back(ASCIIToUTF16("first")); vec.push_back(ASCIIToUTF16("second")); vec.push_back(ASCIIToUTF16("third")); temp = SerializeVector(vec); output = DeserializeVector(temp); EXPECT_THAT(output, Eq(vec)); } TEST_F(LoginDatabaseTest, UpdateIncompleteCredentials) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(0U, result.size()); // Save an incomplete form. Note that it only has a few fields set, ex. it's // missing 'action', 'username_element' and 'password_element'. Such forms // are sometimes inserted during import from other browsers (which may not // store this info). PasswordForm incomplete_form; incomplete_form.origin = GURL("http://accounts.google.com/LoginAuth"); incomplete_form.signon_realm = "http://accounts.google.com/"; incomplete_form.username_value = ASCIIToUTF16("my_username"); incomplete_form.password_value = ASCIIToUTF16("my_password"); incomplete_form.ssl_valid = false; incomplete_form.preferred = true; incomplete_form.blacklisted_by_user = false; incomplete_form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(incomplete_form), db().AddLogin(incomplete_form)); // A form on some website. It should trigger a match with the stored one. PasswordForm encountered_form; encountered_form.origin = GURL("http://accounts.google.com/LoginAuth"); encountered_form.signon_realm = "http://accounts.google.com/"; encountered_form.action = GURL("http://accounts.google.com/Login"); encountered_form.username_element = ASCIIToUTF16("Email"); encountered_form.password_element = ASCIIToUTF16("Passwd"); encountered_form.submit_element = ASCIIToUTF16("signIn"); // Get matches for encountered_form. EXPECT_TRUE(db().GetLogins(encountered_form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(incomplete_form.origin, result[0]->origin); EXPECT_EQ(incomplete_form.signon_realm, result[0]->signon_realm); EXPECT_EQ(incomplete_form.username_value, result[0]->username_value); EXPECT_EQ(incomplete_form.password_value, result[0]->password_value); EXPECT_TRUE(result[0]->preferred); EXPECT_FALSE(result[0]->ssl_valid); // We should return empty 'action', 'username_element', 'password_element' // and 'submit_element' as we can't be sure if the credentials were entered // in this particular form on the page. EXPECT_EQ(GURL(), result[0]->action); EXPECT_TRUE(result[0]->username_element.empty()); EXPECT_TRUE(result[0]->password_element.empty()); EXPECT_TRUE(result[0]->submit_element.empty()); result.clear(); // Let's say this login form worked. Now update the stored credentials with // 'action', 'username_element', 'password_element' and 'submit_element' from // the encountered form. PasswordForm completed_form(incomplete_form); completed_form.action = encountered_form.action; completed_form.username_element = encountered_form.username_element; completed_form.password_element = encountered_form.password_element; completed_form.submit_element = encountered_form.submit_element; EXPECT_EQ(AddChangeForForm(completed_form), db().AddLogin(completed_form)); EXPECT_TRUE(db().RemoveLogin(incomplete_form)); // Get matches for encountered_form again. EXPECT_TRUE(db().GetLogins(encountered_form, &result)); ASSERT_EQ(1U, result.size()); // This time we should have all the info available. PasswordForm expected_form(completed_form); EXPECT_EQ(expected_form, *result[0]); result.clear(); } TEST_F(LoginDatabaseTest, UpdateOverlappingCredentials) { // Save an incomplete form. Note that it only has a few fields set, ex. it's // missing 'action', 'username_element' and 'password_element'. Such forms // are sometimes inserted during import from other browsers (which may not // store this info). PasswordForm incomplete_form; incomplete_form.origin = GURL("http://accounts.google.com/LoginAuth"); incomplete_form.signon_realm = "http://accounts.google.com/"; incomplete_form.username_value = ASCIIToUTF16("my_username"); incomplete_form.password_value = ASCIIToUTF16("my_password"); incomplete_form.ssl_valid = false; incomplete_form.preferred = true; incomplete_form.blacklisted_by_user = false; incomplete_form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(incomplete_form), db().AddLogin(incomplete_form)); // Save a complete version of the previous form. Both forms could exist if // the user created the complete version before importing the incomplete // version from a different browser. PasswordForm complete_form = incomplete_form; complete_form.action = GURL("http://accounts.google.com/Login"); complete_form.username_element = ASCIIToUTF16("username_element"); complete_form.password_element = ASCIIToUTF16("password_element"); complete_form.submit_element = ASCIIToUTF16("submit"); // An update fails because the primary key for |complete_form| is different. EXPECT_EQ(PasswordStoreChangeList(), db().UpdateLogin(complete_form)); EXPECT_EQ(AddChangeForForm(complete_form), db().AddLogin(complete_form)); // Make sure both passwords exist. ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(2U, result.size()); result.clear(); // Simulate the user changing their password. complete_form.password_value = ASCIIToUTF16("new_password"); complete_form.date_synced = base::Time::Now(); EXPECT_EQ(UpdateChangeForForm(complete_form), db().UpdateLogin(complete_form)); // Both still exist now. EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(2U, result.size()); if (result[0]->username_element.empty()) std::swap(result[0], result[1]); EXPECT_EQ(complete_form, *result[0]); EXPECT_EQ(incomplete_form, *result[1]); } TEST_F(LoginDatabaseTest, DoubleAdd) { PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); // Add almost the same form again. form.times_used++; PasswordStoreChangeList list; list.push_back(PasswordStoreChange(PasswordStoreChange::REMOVE, form)); list.push_back(PasswordStoreChange(PasswordStoreChange::ADD, form)); EXPECT_EQ(list, db().AddLogin(form)); } TEST_F(LoginDatabaseTest, AddWrongForm) { PasswordForm form; // |origin| shouldn't be empty. form.origin = GURL(); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(PasswordStoreChangeList(), db().AddLogin(form)); // |signon_realm| shouldn't be empty. form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm.clear(); EXPECT_EQ(PasswordStoreChangeList(), db().AddLogin(form)); } TEST_F(LoginDatabaseTest, UpdateLogin) { PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); form.action = GURL("http://accounts.google.com/login"); form.password_value = ASCIIToUTF16("my_new_password"); form.ssl_valid = true; form.preferred = false; form.other_possible_usernames.push_back(ASCIIToUTF16("my_new_username")); form.times_used = 20; form.submit_element = ASCIIToUTF16("submit_element"); form.date_synced = base::Time::Now(); form.date_created = base::Time::Now() - base::TimeDelta::FromDays(1); form.blacklisted_by_user = true; form.scheme = PasswordForm::SCHEME_BASIC; form.type = PasswordForm::TYPE_GENERATED; form.display_name = ASCIIToUTF16("Mr. Smith"); form.icon_url = GURL("https://accounts.google.com/Icon"); form.federation_origin = url::Origin(GURL("https://accounts.google.com/")); form.skip_zero_click = true; EXPECT_EQ(UpdateChangeForForm(form), db().UpdateLogin(form)); ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); } TEST_F(LoginDatabaseTest, RemoveWrongForm) { PasswordForm form; // |origin| shouldn't be empty. form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; // The form isn't in the database. EXPECT_FALSE(db().RemoveLogin(form)); EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().RemoveLogin(form)); EXPECT_FALSE(db().RemoveLogin(form)); } TEST_F(LoginDatabaseTest, ReportMetricsTest) { PasswordForm password_form; password_form.origin = GURL("http://example.com"); password_form.username_value = ASCIIToUTF16("test1@gmail.com"); password_form.password_value = ASCIIToUTF16("test"); password_form.signon_realm = "http://example.com/"; password_form.times_used = 0; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("test2@gmail.com"); password_form.times_used = 1; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("http://second.example.com"); password_form.signon_realm = "http://second.example.com"; password_form.times_used = 3; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("test3@gmail.com"); password_form.type = PasswordForm::TYPE_GENERATED; password_form.times_used = 2; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("ftp://third.example.com/"); password_form.signon_realm = "ftp://third.example.com/"; password_form.times_used = 4; password_form.scheme = PasswordForm::SCHEME_OTHER; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("http://fourth.example.com/"); password_form.signon_realm = "http://fourth.example.com/"; password_form.type = PasswordForm::TYPE_MANUAL; password_form.username_value = ASCIIToUTF16(""); password_form.times_used = 10; password_form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("https://fifth.example.com/"); password_form.signon_realm = "https://fifth.example.com/"; password_form.password_value = ASCIIToUTF16(""); password_form.blacklisted_by_user = true; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("https://sixth.example.com/"); password_form.signon_realm = "https://sixth.example.com/"; password_form.username_value = ASCIIToUTF16(""); password_form.password_value = ASCIIToUTF16("my_password"); password_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_element = ASCIIToUTF16("some_other_input"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("my_username"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL(); password_form.signon_realm = "android://hash@com.example.android/"; password_form.username_value = ASCIIToUTF16("JohnDoe"); password_form.password_value = ASCIIToUTF16("my_password"); password_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("JaneDoe"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); base::HistogramTester histogram_tester; db().ReportMetrics("", false); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccounts.UserCreated.WithoutCustomPassphrase", 9, 1); histogram_tester.ExpectBucketCount( "PasswordManager.AccountsPerSite.UserCreated.WithoutCustomPassphrase", 1, 2); histogram_tester.ExpectBucketCount( "PasswordManager.AccountsPerSite.UserCreated.WithoutCustomPassphrase", 2, 3); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.UserCreated.WithoutCustomPassphrase", 0, 1); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.UserCreated.WithoutCustomPassphrase", 1, 1); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.UserCreated.WithoutCustomPassphrase", 3, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccounts.AutoGenerated.WithoutCustomPassphrase", 2, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Android", 2, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Ftp", 1, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Http", 5, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Https", 3, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Other", 0, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.AccountsPerSite.AutoGenerated.WithoutCustomPassphrase", 1, 2); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.AutoGenerated.WithoutCustomPassphrase", 2, 1); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.AutoGenerated.WithoutCustomPassphrase", 4, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.EmptyUsernames.CountInDatabase", 3, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.EmptyUsernames.WithoutCorrespondingNonempty", 1, 1); } TEST_F(LoginDatabaseTest, PasswordReuseMetrics) { // -- Group of accounts that are reusing password #1. // // Destination account // +-----------------+-------+-------+-------+-------+-------+-------+-------+ // | | 1 | 2 | 3 | 4 | 5 | 6 | 7 | // +-----------------+-------+-------+-------+-------+-------+-------+-------+ // | Scheme? | HTTP | HTTP | HTTP | HTTP | HTTPS | HTTPS | HTTPS | // +-----------------+-------+-------+-------+-------+-------+-------+-------+ // | | 1 | - | Same | PSL | Diff. | Same | Diff. | Diff. | // | | 2 | Same | - | PSL | Diff. | Same | Diff. | Diff. | // | Relation | 3 | PSL | PSL | - | Diff. | Diff. | Same | Diff. | // | to host | 4 | Diff. | Diff. | Diff. | - | Diff. | Diff. | Same | // | of source | 5 | Same | Same | Diff. | Diff. | - | PSL | Diff. | // | account: | 6 | Diff. | Diff. | Same | Diff. | PSL | - | Diff. | // | | 7 | Diff. | Diff. | Diff. | Same | Diff. | Diff. | - | // +-----------------+-------+-------+-------+-------+-------+-------+-------+ PasswordForm password_form; password_form.signon_realm = "http://example.com/"; password_form.origin = GURL("http://example.com/"); password_form.username_value = ASCIIToUTF16("username_1"); password_form.password_value = ASCIIToUTF16("password_1"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("http://example.com/"); password_form.username_value = ASCIIToUTF16("username_2"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // Note: This PSL matches http://example.com, but not https://example.com. password_form.signon_realm = "http://www.example.com/"; password_form.origin = GURL("http://www.example.com/"); password_form.username_value = ASCIIToUTF16("username_3"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "http://not-example.com/"; password_form.origin = GURL("http://not-example.com/"); password_form.username_value = ASCIIToUTF16("username_4"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "https://example.com/"; password_form.origin = GURL("https://example.com/"); password_form.username_value = ASCIIToUTF16("username_5"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // Note: This PSL matches https://example.com, but not http://example.com. password_form.signon_realm = "https://www.example.com/"; password_form.origin = GURL("https://www.example.com/"); password_form.username_value = ASCIIToUTF16("username_6"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "https://not-example.com/"; password_form.origin = GURL("https://not-example.com/"); password_form.username_value = ASCIIToUTF16("username_7"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // -- Group of accounts that are reusing password #2. // Both HTTP, different host. password_form.signon_realm = "http://example.com/"; password_form.origin = GURL("http://example.com/"); password_form.username_value = ASCIIToUTF16("username_8"); password_form.password_value = ASCIIToUTF16("password_2"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "http://not-example.com/"; password_form.origin = GURL("http://not-example.com/"); password_form.username_value = ASCIIToUTF16("username_9"); password_form.password_value = ASCIIToUTF16("password_2"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // -- Group of accounts that are reusing password #3. // HTTP sites identified by different IP addresses, so they should not be // considered a public suffix match. password_form.signon_realm = "http://1.2.3.4/"; password_form.origin = GURL("http://1.2.3.4/"); password_form.username_value = ASCIIToUTF16("username_10"); password_form.password_value = ASCIIToUTF16("password_3"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "http://2.2.3.4/"; password_form.origin = GURL("http://2.2.3.4/"); password_form.username_value = ASCIIToUTF16("username_11"); password_form.password_value = ASCIIToUTF16("password_3"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // -- Not HTML form based logins or blacklisted logins. Should be ignored. PasswordForm ignored_form; ignored_form.scheme = PasswordForm::SCHEME_HTML; ignored_form.signon_realm = "http://example.org/"; ignored_form.origin = GURL("http://example.org/blacklist"); ignored_form.blacklisted_by_user = true; ignored_form.username_value = ASCIIToUTF16("username_x"); ignored_form.password_value = ASCIIToUTF16("password_y"); EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); ignored_form.scheme = PasswordForm::SCHEME_BASIC; ignored_form.signon_realm = "http://example.org/HTTP Auth Realm"; ignored_form.origin = GURL("http://example.org/"); ignored_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); ignored_form.scheme = PasswordForm::SCHEME_HTML; ignored_form.signon_realm = "android://hash@com.example/"; ignored_form.origin = GURL(); ignored_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); ignored_form.scheme = PasswordForm::SCHEME_HTML; ignored_form.signon_realm = "federation://example.com/federation.com"; ignored_form.origin = GURL("https://example.com/"); ignored_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); base::HistogramTester histogram_tester; db().ReportMetrics("", false); const std::string kPrefix("PasswordManager.AccountsReusingPassword."); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpRealmWithSameHost"), testing::ElementsAre(base::Bucket(0, 6), base::Bucket(1, 2))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpsRealmWithSameHost"), testing::ElementsAre(base::Bucket(0, 4), base::Bucket(1, 4))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnPSLMatchingRealm"), testing::ElementsAre(base::Bucket(0, 5), base::Bucket(1, 2), base::Bucket(2, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpsRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(0, 4), base::Bucket(2, 4))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(1, 7), base::Bucket(3, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnAnyRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(1, 4), base::Bucket(3, 3), base::Bucket(5, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpRealmWithSameHost"), testing::ElementsAre(base::Bucket(1, 2), base::Bucket(2, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpsRealmWithSameHost"), testing::ElementsAre(base::Bucket(0, 3))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnPSLMatchingRealm"), testing::ElementsAre(base::Bucket(0, 1), base::Bucket(1, 2))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(2, 1), base::Bucket(3, 2))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpsRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(1, 2), base::Bucket(2, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnAnyRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(3, 1), base::Bucket(4, 1), base::Bucket(5, 1))); } TEST_F(LoginDatabaseTest, ClearPasswordValues) { db().set_clear_password_values(true); // Add a PasswordForm, the password should be cleared. base::HistogramTester histogram_tester; PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("12345"); EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); PasswordForm expected_form = form; expected_form.password_value.clear(); EXPECT_EQ(expected_form, *result[0]); // Update the password, it should stay empty. form.password_value = ASCIIToUTF16("password"); EXPECT_EQ(UpdateChangeForForm(form), db().UpdateLogin(form)); EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(expected_form, *result[0]); // Encrypting/decrypting shouldn't happen. Thus there should be no keychain // access on Mac. histogram_tester.ExpectTotalCount("OSX.Keychain.Access", 0); } #if defined(OS_POSIX) // Only the current user has permission to read the database. // // Only POSIX because GetPosixFilePermissions() only exists on POSIX. // This tests that sql::Connection::set_restrict_to_user() was called, // and that function is a noop on non-POSIX platforms in any case. TEST_F(LoginDatabaseTest, FilePermissions) { int mode = base::FILE_PERMISSION_MASK; EXPECT_TRUE(base::GetPosixFilePermissions(file_, &mode)); EXPECT_EQ((mode & base::FILE_PERMISSION_USER_MASK), mode); } #endif // defined(OS_POSIX) // Test the migration from GetParam() version to kCurrentVersionNumber. class LoginDatabaseMigrationTest : public testing::TestWithParam<int> { protected: void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); database_dump_location_ = database_dump_location_.AppendASCII("components") .AppendASCII("test") .AppendASCII("data") .AppendASCII("password_manager"); database_path_ = temp_dir_.path().AppendASCII("test.db"); #if defined(OS_MACOSX) OSCrypt::UseMockKeychain(true); #endif // defined(OS_MACOSX) } // Creates the databse from |sql_file|. void CreateDatabase(base::StringPiece sql_file) { base::FilePath database_dump; ASSERT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &database_dump)); database_dump = database_dump.Append(database_dump_location_).AppendASCII(sql_file); ASSERT_TRUE( sql::test::CreateDatabaseFromSQL(database_path_, database_dump)); } void DestroyDatabase() { if (!database_path_.empty()) sql::Connection::Delete(database_path_); } // Returns an empty vector on failure. Otherwise returns values in the column // |column_name| of the logins table. The order of the // returned rows is well-defined. template <class T> std::vector<T> GetValues(const std::string& column_name) { sql::Connection db; std::vector<T> results; if (!db.Open(database_path_)) return results; std::string statement = base::StringPrintf( "SELECT %s FROM logins ORDER BY username_value, %s DESC", column_name.c_str(), column_name.c_str()); sql::Statement s(db.GetCachedStatement(SQL_FROM_HERE, statement.c_str())); if (!s.is_valid()) { db.Close(); return results; } while (s.Step()) results.push_back(GetFirstColumn<T>(s)); s.Clear(); db.Close(); return results; } // Returns the database version for the test. int version() const { return GetParam(); } // Actual test body. void MigrationToVCurrent(base::StringPiece sql_file); base::FilePath database_path_; private: base::FilePath database_dump_location_; base::ScopedTempDir temp_dir_; }; void LoginDatabaseMigrationTest::MigrationToVCurrent( base::StringPiece sql_file) { SCOPED_TRACE(testing::Message("Version file = ") << sql_file); CreateDatabase(sql_file); // Original date, in seconds since UTC epoch. std::vector<int64_t> date_created(GetValues<int64_t>("date_created")); ASSERT_EQ(2U, date_created.size()); // Migration to version 8 performs changes dates to the new format. // So for versions less of equal to 8 create date should be in old // format before migration and in new format after. if (version() <= 8) { ASSERT_EQ(1402955745, date_created[0]); ASSERT_EQ(1402950000, date_created[1]); } else { ASSERT_EQ(13047429345000000, date_created[0]); ASSERT_EQ(13047423600000000, date_created[1]); } { // Assert that the database was successfully opened and updated // to current version. LoginDatabase db(database_path_); ASSERT_TRUE(db.Init()); // Verifies that the final version can save all the appropriate fields. PasswordForm form; GenerateExamplePasswordForm(&form); // Add the same form twice to test the constraints in the database. EXPECT_EQ(AddChangeForForm(form), db.AddLogin(form)); PasswordStoreChangeList list; list.push_back(PasswordStoreChange(PasswordStoreChange::REMOVE, form)); list.push_back(PasswordStoreChange(PasswordStoreChange::ADD, form)); EXPECT_EQ(list, db.AddLogin(form)); ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db.GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); EXPECT_TRUE(db.RemoveLogin(form)); } // New date, in microseconds since platform independent epoch. std::vector<int64_t> new_date_created(GetValues<int64_t>("date_created")); if (version() <= 8) { ASSERT_EQ(2U, new_date_created.size()); // Check that the two dates match up. for (size_t i = 0; i < date_created.size(); ++i) { EXPECT_EQ(base::Time::FromInternalValue(new_date_created[i]), base::Time::FromTimeT(date_created[i])); } } else if (version() == 10) { // The test data is setup on this version to cause a unique key collision. EXPECT_EQ(1U, new_date_created.size()); } else { ASSERT_EQ(2U, new_date_created.size()); ASSERT_EQ(13047429345000000, new_date_created[0]); ASSERT_EQ(13047423600000000, new_date_created[1]); } if (version() >= 7 && version() <= 13) { // The "avatar_url" column first appeared in version 7. In version 14, // it was renamed to "icon_url". Migration from a version <= 13 // to >= 14 should not break theses URLs. std::vector<std::string> urls(GetValues<std::string>("icon_url")); if (version() == 10) { // The testcase for version 10 tests duplicate entries, so we only expect // one URL. EXPECT_THAT(urls, testing::ElementsAre("https://www.google.com/icon")); } else { // Otherwise, we expect one empty and one valid URL. EXPECT_THAT( urls, testing::ElementsAre("", "https://www.google.com/icon")); } } { // On versions < 15 |kCompatibleVersionNumber| was set to 1, but // the migration should bring it to the correct value. sql::Connection db; sql::MetaTable meta_table; ASSERT_TRUE(db.Open(database_path_)); ASSERT_TRUE( meta_table.Init(&db, kCurrentVersionNumber, kCompatibleVersionNumber)); EXPECT_EQ(password_manager::kCompatibleVersionNumber, meta_table.GetCompatibleVersionNumber()); } DestroyDatabase(); } // Tests the migration of the login database from version() to // kCurrentVersionNumber. TEST_P(LoginDatabaseMigrationTest, MigrationToVCurrent) { MigrationToVCurrent(base::StringPrintf("login_db_v%d.sql", version())); } class LoginDatabaseMigrationTestV9 : public LoginDatabaseMigrationTest { }; // Tests migration from the alternative version #9, see crbug.com/423716. TEST_P(LoginDatabaseMigrationTestV9, V9WithoutUseAdditionalAuthField) { ASSERT_EQ(9, version()); MigrationToVCurrent("login_db_v9_without_use_additional_auth_field.sql"); } class LoginDatabaseMigrationTestBroken : public LoginDatabaseMigrationTest {}; // Test migrating certain databases with incorrect version. // http://crbug.com/295851 TEST_P(LoginDatabaseMigrationTestBroken, Broken) { MigrationToVCurrent(base::StringPrintf("login_db_v%d_broken.sql", version())); } INSTANTIATE_TEST_CASE_P(MigrationToVCurrent, LoginDatabaseMigrationTest, testing::Range(1, kCurrentVersionNumber + 1)); INSTANTIATE_TEST_CASE_P(MigrationToVCurrent, LoginDatabaseMigrationTestV9, testing::Values(9)); INSTANTIATE_TEST_CASE_P(MigrationToVCurrent, LoginDatabaseMigrationTestBroken, testing::Range(1, 4)); } // namespace password_manager
38.787915
80
0.713505
maidiHaitai
8b1ba8c9a1f015dfe538c5ca55526f5b1addd0ba
27,437
cpp
C++
tess-two/jni/com_googlecode_tesseract_android/src/ccstruct/statistc.cpp
leejoo71/tess-two
2f3e2eec1d1cc61b0f27c31deb845fc460c78b00
[ "Apache-2.0" ]
3,479
2015-01-05T10:07:00.000Z
2022-03-31T06:00:43.000Z
tess-two/jni/com_googlecode_tesseract_android/src/ccstruct/statistc.cpp
skumailraza/Tess4OCR
26001172d5d856b0553cb36e3ae9eb64f1631cb0
[ "Apache-2.0" ]
207
2015-01-05T11:45:56.000Z
2019-10-20T00:53:21.000Z
tess-two/jni/com_googlecode_tesseract_android/src/ccstruct/statistc.cpp
skumailraza/Tess4OCR
26001172d5d856b0553cb36e3ae9eb64f1631cb0
[ "Apache-2.0" ]
1,287
2015-01-05T11:51:28.000Z
2022-03-29T03:36:11.000Z
/********************************************************************** * File: statistc.c (Formerly stats.c) * Description: Simple statistical package for integer values. * Author: Ray Smith * Created: Mon Feb 04 16:56:05 GMT 1991 * * (C) Copyright 1991, Hewlett-Packard Ltd. ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** http://www.apache.org/licenses/LICENSE-2.0 ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. * **********************************************************************/ // Include automatically generated configuration file if running autoconf. #ifdef HAVE_CONFIG_H #include "config_auto.h" #endif #include "statistc.h" #include <string.h> #include <math.h> #include <stdlib.h> #include "helpers.h" #include "scrollview.h" #include "tprintf.h" using tesseract::KDPairInc; /********************************************************************** * STATS::STATS * * Construct a new stats element by allocating and zeroing the memory. **********************************************************************/ STATS::STATS(inT32 min_bucket_value, inT32 max_bucket_value_plus_1) { if (max_bucket_value_plus_1 <= min_bucket_value) { min_bucket_value = 0; max_bucket_value_plus_1 = 1; } rangemin_ = min_bucket_value; // setup rangemax_ = max_bucket_value_plus_1; buckets_ = new inT32[rangemax_ - rangemin_]; clear(); } STATS::STATS() { rangemax_ = 0; rangemin_ = 0; buckets_ = NULL; } /********************************************************************** * STATS::set_range * * Alter the range on an existing stats element. **********************************************************************/ bool STATS::set_range(inT32 min_bucket_value, inT32 max_bucket_value_plus_1) { if (max_bucket_value_plus_1 <= min_bucket_value) { return false; } if (rangemax_ - rangemin_ != max_bucket_value_plus_1 - min_bucket_value) { delete [] buckets_; buckets_ = new inT32[max_bucket_value_plus_1 - min_bucket_value]; } rangemin_ = min_bucket_value; // setup rangemax_ = max_bucket_value_plus_1; clear(); // zero it return true; } /********************************************************************** * STATS::clear * * Clear out the STATS class by zeroing all the buckets. **********************************************************************/ void STATS::clear() { // clear out buckets total_count_ = 0; if (buckets_ != NULL) memset(buckets_, 0, (rangemax_ - rangemin_) * sizeof(buckets_[0])); } /********************************************************************** * STATS::~STATS * * Destructor for a stats class. **********************************************************************/ STATS::~STATS () { if (buckets_ != NULL) { delete [] buckets_; buckets_ = NULL; } } /********************************************************************** * STATS::add * * Add a set of samples to (or delete from) a pile. **********************************************************************/ void STATS::add(inT32 value, inT32 count) { if (buckets_ == NULL) { return; } value = ClipToRange(value, rangemin_, rangemax_ - 1); buckets_[value - rangemin_] += count; total_count_ += count; // keep count of total } /********************************************************************** * STATS::mode * * Find the mode of a stats class. **********************************************************************/ inT32 STATS::mode() const { // get mode of samples if (buckets_ == NULL) { return rangemin_; } inT32 max = buckets_[0]; // max cell count inT32 maxindex = 0; // index of max for (int index = rangemax_ - rangemin_ - 1; index > 0; --index) { if (buckets_[index] > max) { max = buckets_[index]; // find biggest maxindex = index; } } return maxindex + rangemin_; // index of biggest } /********************************************************************** * STATS::mean * * Find the mean of a stats class. **********************************************************************/ double STATS::mean() const { //get mean of samples if (buckets_ == NULL || total_count_ <= 0) { return static_cast<double>(rangemin_); } inT64 sum = 0; for (int index = rangemax_ - rangemin_ - 1; index >= 0; --index) { sum += static_cast<inT64>(index) * buckets_[index]; } return static_cast<double>(sum) / total_count_ + rangemin_; } /********************************************************************** * STATS::sd * * Find the standard deviation of a stats class. **********************************************************************/ double STATS::sd() const { //standard deviation if (buckets_ == NULL || total_count_ <= 0) { return 0.0; } inT64 sum = 0; double sqsum = 0.0; for (int index = rangemax_ - rangemin_ - 1; index >= 0; --index) { sum += static_cast<inT64>(index) * buckets_[index]; sqsum += static_cast<double>(index) * index * buckets_[index]; } double variance = static_cast<double>(sum) / total_count_; variance = sqsum / total_count_ - variance * variance; if (variance > 0.0) return sqrt(variance); return 0.0; } /********************************************************************** * STATS::ile * * Returns the fractile value such that frac fraction (in [0,1]) of samples * has a value less than the return value. **********************************************************************/ double STATS::ile(double frac) const { if (buckets_ == NULL || total_count_ == 0) { return static_cast<double>(rangemin_); } #if 0 // TODO(rays) The existing code doesn't seem to be doing the right thing // with target a double but this substitute crashes the code that uses it. // Investigate and fix properly. int target = IntCastRounded(frac * total_count_); target = ClipToRange(target, 1, total_count_); #else double target = frac * total_count_; target = ClipToRange(target, 1.0, static_cast<double>(total_count_)); #endif int sum = 0; int index = 0; for (index = 0; index < rangemax_ - rangemin_ && sum < target; sum += buckets_[index++]); if (index > 0) { ASSERT_HOST(buckets_[index - 1] > 0); return rangemin_ + index - static_cast<double>(sum - target) / buckets_[index - 1]; } else { return static_cast<double>(rangemin_); } } /********************************************************************** * STATS::min_bucket * * Find REAL minimum bucket - ile(0.0) isn't necessarily correct **********************************************************************/ inT32 STATS::min_bucket() const { // Find min if (buckets_ == NULL || total_count_ == 0) { return rangemin_; } inT32 min = 0; for (min = 0; (min < rangemax_ - rangemin_) && (buckets_[min] == 0); min++); return rangemin_ + min; } /********************************************************************** * STATS::max_bucket * * Find REAL maximum bucket - ile(1.0) isn't necessarily correct **********************************************************************/ inT32 STATS::max_bucket() const { // Find max if (buckets_ == NULL || total_count_ == 0) { return rangemin_; } inT32 max; for (max = rangemax_ - rangemin_ - 1; max > 0 && buckets_[max] == 0; max--); return rangemin_ + max; } /********************************************************************** * STATS::median * * Finds a more useful estimate of median than ile(0.5). * * Overcomes a problem with ile() - if the samples are, for example, * 6,6,13,14 ile(0.5) return 7.0 - when a more useful value would be midway * between 6 and 13 = 9.5 **********************************************************************/ double STATS::median() const { //get median if (buckets_ == NULL) { return static_cast<double>(rangemin_); } double median = ile(0.5); int median_pile = static_cast<int>(floor(median)); if ((total_count_ > 1) && (pile_count(median_pile) == 0)) { inT32 min_pile; inT32 max_pile; /* Find preceding non zero pile */ for (min_pile = median_pile; pile_count(min_pile) == 0; min_pile--); /* Find following non zero pile */ for (max_pile = median_pile; pile_count(max_pile) == 0; max_pile++); median = (min_pile + max_pile) / 2.0; } return median; } /********************************************************************** * STATS::local_min * * Return TRUE if this point is a local min. **********************************************************************/ bool STATS::local_min(inT32 x) const { if (buckets_ == NULL) { return false; } x = ClipToRange(x, rangemin_, rangemax_ - 1) - rangemin_; if (buckets_[x] == 0) return true; inT32 index; // table index for (index = x - 1; index >= 0 && buckets_[index] == buckets_[x]; --index); if (index >= 0 && buckets_[index] < buckets_[x]) return false; for (index = x + 1; index < rangemax_ - rangemin_ && buckets_[index] == buckets_[x]; ++index); if (index < rangemax_ - rangemin_ && buckets_[index] < buckets_[x]) return false; else return true; } /********************************************************************** * STATS::smooth * * Apply a triangular smoothing filter to the stats. * This makes the modes a bit more useful. * The factor gives the height of the triangle, i.e. the weight of the * centre. **********************************************************************/ void STATS::smooth(inT32 factor) { if (buckets_ == NULL || factor < 2) { return; } STATS result(rangemin_, rangemax_); int entrycount = rangemax_ - rangemin_; for (int entry = 0; entry < entrycount; entry++) { //centre weight int count = buckets_[entry] * factor; for (int offset = 1; offset < factor; offset++) { if (entry - offset >= 0) count += buckets_[entry - offset] * (factor - offset); if (entry + offset < entrycount) count += buckets_[entry + offset] * (factor - offset); } result.add(entry + rangemin_, count); } total_count_ = result.total_count_; memcpy(buckets_, result.buckets_, entrycount * sizeof(buckets_[0])); } /********************************************************************** * STATS::cluster * * Cluster the samples into max_cluster clusters. * Each call runs one iteration. The array of clusters must be * max_clusters+1 in size as cluster 0 is used to indicate which samples * have been used. * The return value is the current number of clusters. **********************************************************************/ inT32 STATS::cluster(float lower, // thresholds float upper, float multiple, // distance threshold inT32 max_clusters, // max no to make STATS *clusters) { // array of clusters BOOL8 new_cluster; // added one float *centres; // cluster centres inT32 entry; // bucket index inT32 cluster; // cluster index inT32 best_cluster; // one to assign to inT32 new_centre = 0; // residual mode inT32 new_mode; // pile count of new_centre inT32 count; // pile to place float dist; // from cluster float min_dist; // from best_cluster inT32 cluster_count; // no of clusters if (buckets_ == NULL || max_clusters < 1) return 0; centres = new float[max_clusters + 1]; for (cluster_count = 1; cluster_count <= max_clusters && clusters[cluster_count].buckets_ != NULL && clusters[cluster_count].total_count_ > 0; cluster_count++) { centres[cluster_count] = static_cast<float>(clusters[cluster_count].ile(0.5)); new_centre = clusters[cluster_count].mode(); for (entry = new_centre - 1; centres[cluster_count] - entry < lower && entry >= rangemin_ && pile_count(entry) <= pile_count(entry + 1); entry--) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add (entry, count); } } for (entry = new_centre + 1; entry - centres[cluster_count] < lower && entry < rangemax_ && pile_count(entry) <= pile_count(entry - 1); entry++) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add(entry, count); } } } cluster_count--; if (cluster_count == 0) { clusters[0].set_range(rangemin_, rangemax_); } do { new_cluster = FALSE; new_mode = 0; for (entry = 0; entry < rangemax_ - rangemin_; entry++) { count = buckets_[entry] - clusters[0].buckets_[entry]; //remaining pile if (count > 0) { //any to handle min_dist = static_cast<float>(MAX_INT32); best_cluster = 0; for (cluster = 1; cluster <= cluster_count; cluster++) { dist = entry + rangemin_ - centres[cluster]; //find distance if (dist < 0) dist = -dist; if (dist < min_dist) { min_dist = dist; //find least best_cluster = cluster; } } if (min_dist > upper //far enough for new && (best_cluster == 0 || entry + rangemin_ > centres[best_cluster] * multiple || entry + rangemin_ < centres[best_cluster] / multiple)) { if (count > new_mode) { new_mode = count; new_centre = entry + rangemin_; } } } } // need new and room if (new_mode > 0 && cluster_count < max_clusters) { cluster_count++; new_cluster = TRUE; if (!clusters[cluster_count].set_range(rangemin_, rangemax_)) { delete [] centres; return 0; } centres[cluster_count] = static_cast<float>(new_centre); clusters[cluster_count].add(new_centre, new_mode); clusters[0].add(new_centre, new_mode); for (entry = new_centre - 1; centres[cluster_count] - entry < lower && entry >= rangemin_ && pile_count (entry) <= pile_count(entry + 1); entry--) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add(entry, count); } } for (entry = new_centre + 1; entry - centres[cluster_count] < lower && entry < rangemax_ && pile_count (entry) <= pile_count(entry - 1); entry++) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add (entry, count); } } centres[cluster_count] = static_cast<float>(clusters[cluster_count].ile(0.5)); } } while (new_cluster && cluster_count < max_clusters); delete [] centres; return cluster_count; } // Helper tests that the current index is still part of the peak and gathers // the data into the peak, returning false when the peak is ended. // src_buckets[index] - used_buckets[index] is the unused part of the histogram. // prev_count is the histogram count of the previous index on entry and is // updated to the current index on return. // total_count and total_value are accumulating the mean of the peak. static bool GatherPeak(int index, const int* src_buckets, int* used_buckets, int* prev_count, int* total_count, double* total_value) { int pile_count = src_buckets[index] - used_buckets[index]; if (pile_count <= *prev_count && pile_count > 0) { // Accumulate count and index.count product. *total_count += pile_count; *total_value += index * pile_count; // Mark this index as used used_buckets[index] = src_buckets[index]; *prev_count = pile_count; return true; } else { return false; } } // Finds (at most) the top max_modes modes, well actually the whole peak around // each mode, returning them in the given modes vector as a <mean of peak, // total count of peak> pair in order of decreasing total count. // Since the mean is the key and the count the data in the pair, a single call // to sort on the output will re-sort by increasing mean of peak if that is // more useful than decreasing total count. // Returns the actual number of modes found. int STATS::top_n_modes(int max_modes, GenericVector<KDPairInc<float, int> >* modes) const { if (max_modes <= 0) return 0; int src_count = rangemax_ - rangemin_; // Used copies the counts in buckets_ as they get used. STATS used(rangemin_, rangemax_); modes->truncate(0); // Total count of the smallest peak found so far. int least_count = 1; // Mode that is used as a seed for each peak int max_count = 0; do { // Find an unused mode. max_count = 0; int max_index = 0; for (int src_index = 0; src_index < src_count; src_index++) { int pile_count = buckets_[src_index] - used.buckets_[src_index]; if (pile_count > max_count) { max_count = pile_count; max_index = src_index; } } if (max_count > 0) { // Copy the bucket count to used so it doesn't get found again. used.buckets_[max_index] = max_count; // Get the entire peak. double total_value = max_index * max_count; int total_count = max_count; int prev_pile = max_count; for (int offset = 1; max_index + offset < src_count; ++offset) { if (!GatherPeak(max_index + offset, buckets_, used.buckets_, &prev_pile, &total_count, &total_value)) break; } prev_pile = buckets_[max_index]; for (int offset = 1; max_index - offset >= 0; ++offset) { if (!GatherPeak(max_index - offset, buckets_, used.buckets_, &prev_pile, &total_count, &total_value)) break; } if (total_count > least_count || modes->size() < max_modes) { // We definitely want this mode, so if we have enough discard the least. if (modes->size() == max_modes) modes->truncate(max_modes - 1); int target_index = 0; // Linear search for the target insertion point. while (target_index < modes->size() && (*modes)[target_index].data >= total_count) ++target_index; float peak_mean = static_cast<float>(total_value / total_count + rangemin_); modes->insert(KDPairInc<float, int>(peak_mean, total_count), target_index); least_count = modes->back().data; } } } while (max_count > 0); return modes->size(); } /********************************************************************** * STATS::print * * Prints a summary and table of the histogram. **********************************************************************/ void STATS::print() const { if (buckets_ == NULL) { return; } inT32 min = min_bucket() - rangemin_; inT32 max = max_bucket() - rangemin_; int num_printed = 0; for (int index = min; index <= max; index++) { if (buckets_[index] != 0) { tprintf("%4d:%-3d ", rangemin_ + index, buckets_[index]); if (++num_printed % 8 == 0) tprintf ("\n"); } } tprintf ("\n"); print_summary(); } /********************************************************************** * STATS::print_summary * * Print a summary of the stats. **********************************************************************/ void STATS::print_summary() const { if (buckets_ == NULL) { return; } inT32 min = min_bucket(); inT32 max = max_bucket(); tprintf("Total count=%d\n", total_count_); tprintf("Min=%.2f Really=%d\n", ile(0.0), min); tprintf("Lower quartile=%.2f\n", ile(0.25)); tprintf("Median=%.2f, ile(0.5)=%.2f\n", median(), ile(0.5)); tprintf("Upper quartile=%.2f\n", ile(0.75)); tprintf("Max=%.2f Really=%d\n", ile(1.0), max); tprintf("Range=%d\n", max + 1 - min); tprintf("Mean= %.2f\n", mean()); tprintf("SD= %.2f\n", sd()); } /********************************************************************** * STATS::plot * * Draw a histogram of the stats table. **********************************************************************/ #ifndef GRAPHICS_DISABLED void STATS::plot(ScrollView* window, // to draw in float xorigin, // bottom left float yorigin, float xscale, // one x unit float yscale, // one y unit ScrollView::Color colour) const { // colour to draw in if (buckets_ == NULL) { return; } window->Pen(colour); for (int index = 0; index < rangemax_ - rangemin_; index++) { window->Rectangle( xorigin + xscale * index, yorigin, xorigin + xscale * (index + 1), yorigin + yscale * buckets_[index]); } } #endif /********************************************************************** * STATS::plotline * * Draw a histogram of the stats table. (Line only) **********************************************************************/ #ifndef GRAPHICS_DISABLED void STATS::plotline(ScrollView* window, // to draw in float xorigin, // bottom left float yorigin, float xscale, // one x unit float yscale, // one y unit ScrollView::Color colour) const { // colour to draw in if (buckets_ == NULL) { return; } window->Pen(colour); window->SetCursor(xorigin, yorigin + yscale * buckets_[0]); for (int index = 0; index < rangemax_ - rangemin_; index++) { window->DrawTo(xorigin + xscale * index, yorigin + yscale * buckets_[index]); } } #endif /********************************************************************** * choose_nth_item * * Returns the index of what would b the nth item in the array * if the members were sorted, without actually sorting. **********************************************************************/ inT32 choose_nth_item(inT32 index, float *array, inT32 count) { inT32 next_sample; // next one to do inT32 next_lesser; // space for new inT32 prev_greater; // last one saved inT32 equal_count; // no of equal ones float pivot; // proposed median float sample; // current sample if (count <= 1) return 0; if (count == 2) { if (array[0] < array[1]) { return index >= 1 ? 1 : 0; } else { return index >= 1 ? 0 : 1; } } else { if (index < 0) index = 0; // ensure legal else if (index >= count) index = count - 1; equal_count = (inT32) (rand() % count); pivot = array[equal_count]; // fill gap array[equal_count] = array[0]; next_lesser = 0; prev_greater = count; equal_count = 1; for (next_sample = 1; next_sample < prev_greater;) { sample = array[next_sample]; if (sample < pivot) { // shuffle array[next_lesser++] = sample; next_sample++; } else if (sample > pivot) { prev_greater--; // juggle array[next_sample] = array[prev_greater]; array[prev_greater] = sample; } else { equal_count++; next_sample++; } } for (next_sample = next_lesser; next_sample < prev_greater;) array[next_sample++] = pivot; if (index < next_lesser) return choose_nth_item (index, array, next_lesser); else if (index < prev_greater) return next_lesser; // in equal bracket else return choose_nth_item (index - prev_greater, array + prev_greater, count - prev_greater) + prev_greater; } } /********************************************************************** * choose_nth_item * * Returns the index of what would be the nth item in the array * if the members were sorted, without actually sorting. **********************************************************************/ inT32 choose_nth_item(inT32 index, void *array, inT32 count, size_t size, int (*compar)(const void*, const void*)) { int result; // of compar inT32 next_sample; // next one to do inT32 next_lesser; // space for new inT32 prev_greater; // last one saved inT32 equal_count; // no of equal ones inT32 pivot; // proposed median if (count <= 1) return 0; if (count == 2) { if (compar (array, (char *) array + size) < 0) { return index >= 1 ? 1 : 0; } else { return index >= 1 ? 0 : 1; } } if (index < 0) index = 0; // ensure legal else if (index >= count) index = count - 1; pivot = (inT32) (rand () % count); swap_entries (array, size, pivot, 0); next_lesser = 0; prev_greater = count; equal_count = 1; for (next_sample = 1; next_sample < prev_greater;) { result = compar ((char *) array + size * next_sample, (char *) array + size * next_lesser); if (result < 0) { swap_entries (array, size, next_lesser++, next_sample++); // shuffle } else if (result > 0) { prev_greater--; swap_entries(array, size, prev_greater, next_sample); } else { equal_count++; next_sample++; } } if (index < next_lesser) return choose_nth_item (index, array, next_lesser, size, compar); else if (index < prev_greater) return next_lesser; // in equal bracket else return choose_nth_item (index - prev_greater, (char *) array + size * prev_greater, count - prev_greater, size, compar) + prev_greater; } /********************************************************************** * swap_entries * * Swap 2 entries of arbitrary size in-place in a table. **********************************************************************/ void swap_entries(void *array, // array of entries size_t size, // size of entry inT32 index1, // entries to swap inT32 index2) { char tmp; char *ptr1; // to entries char *ptr2; size_t count; // of bytes ptr1 = reinterpret_cast<char*>(array) + index1 * size; ptr2 = reinterpret_cast<char*>(array) + index2 * size; for (count = 0; count < size; count++) { tmp = *ptr1; *ptr1++ = *ptr2; *ptr2++ = tmp; // tedious! } }
34.907125
80
0.528884
leejoo71
8b1be66bce0247c8e905c64bc6e1b9da52b86e43
980
cpp
C++
references/aoapc-book/BeginningAlgorithmContests/bookcodes/ch5/642_uva.cpp
voleking/ICPC
fc2cf408fa2607ad29b01eb00a1a212e6d0860a5
[ "MIT" ]
68
2017-10-08T04:44:23.000Z
2019-08-06T20:15:02.000Z
BAC/bookcodes/ch5/642_uva.cpp
Anyrainel/aoapc-related-code
e787a01380698fb9236d933462052f97b20e6132
[ "Apache-2.0" ]
null
null
null
BAC/bookcodes/ch5/642_uva.cpp
Anyrainel/aoapc-related-code
e787a01380698fb9236d933462052f97b20e6132
[ "Apache-2.0" ]
18
2017-05-31T02:52:23.000Z
2019-07-05T09:18:34.000Z
#include<stdio.h> #include<stdlib.h> #include<string.h> int n; char word[2000][10], sorted[2000][10]; int cmp_char(const void* _a, const void* _b) { char* a = (char*)_a; char* b = (char*)_b; return *a - *b; } int cmp_string(const void* _a, const void* _b) { char* a = (char*)_a; char* b = (char*)_b; return strcmp(a, b); } int main() { n = 0; for(;;) { scanf("%s", word[n]); if(word[n][0] == 'X') break; n++; } qsort(word, n, sizeof(word[0]), cmp_string); for(int i = 0; i < n; i++) { strcpy(sorted[i], word[i]); qsort(sorted[i], strlen(sorted[i]), sizeof(char), cmp_char); } char s[10]; while(scanf("%s", s) == 1) { if(s[0] == 'X') break; qsort(s, strlen(s), sizeof(char), cmp_char); int found = 0; for(int i = 0; i < n; i++) if(strcmp(sorted[i], s) == 0) { found = 1; printf("%s\n", word[i]); } if(!found) printf("NOT A VALID WORD\n"); printf("******\n"); } return 0; }
20.851064
64
0.514286
voleking
8b1c1c21fc787873b841c665aaa5db39f5cee350
12,440
cpp
C++
apps/linear_algebra/tests/test_halide_blas.cpp
josephwinston/Halide
039b743d901e8c60831046be79d1342f8125f0e1
[ "MIT" ]
2
2020-09-24T17:03:37.000Z
2022-02-03T10:48:14.000Z
apps/linear_algebra/tests/test_halide_blas.cpp
champyen/Halide
8c0dbba26971c4b2a0adbb4c45aac2688642401d
[ "MIT" ]
8
2015-07-21T10:06:12.000Z
2015-08-03T19:09:52.000Z
apps/linear_algebra/tests/test_halide_blas.cpp
champyen/Halide
8c0dbba26971c4b2a0adbb4c45aac2688642401d
[ "MIT" ]
1
2020-10-01T17:27:17.000Z
2020-10-01T17:27:17.000Z
#include <cmath> #include <iomanip> #include <iostream> #include <limits> #include <random> #include <string> #include <cblas.h> #include <halide_blas.h> #include "Halide.h" #define RUN_TEST(method) \ std::cout << std::setw(30) << ("Testing " #method ": ") << std::flush; \ if (test_##method(N)) { \ std::cout << "PASSED\n"; \ } \ #define L1_VECTOR_TEST(method, code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Vector ex(random_vector(N)); \ Vector ey(random_vector(N)); \ Vector ax(ex), ay(ey); \ \ { \ Scalar *x = &(ex[0]); \ Scalar *y = &(ey[0]); \ cblas_##code; \ } \ \ { \ Scalar *x = &(ax[0]); \ Scalar *y = &(ay[0]); \ hblas_##code; \ } \ \ return compareVectors(N, ey, ay); \ } #define L1_SCALAR_TEST(method, code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Vector ex(random_vector(N)); \ Vector ey(random_vector(N)); \ Vector ax(ex), ay(ey); \ Scalar er, ar; \ \ { \ Scalar *x = &(ex[0]); \ Scalar *y = &(ey[0]); \ er = cblas_##code; \ } \ \ { \ Scalar *x = &(ax[0]); \ Scalar *y = &(ay[0]); \ ar = hblas_##code; \ } \ \ return compareScalars(er, ar); \ } #define L2_TEST(method, cblas_code, hblas_code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Scalar beta = random_scalar(); \ Vector ex(random_vector(N)); \ Vector ey(random_vector(N)); \ Matrix eA(random_matrix(N)); \ Vector ax(ex), ay(ey); \ Matrix aA(eA); \ \ { \ Scalar *x = &(ex[0]); \ Scalar *y = &(ey[0]); \ Scalar *A = &(eA[0]); \ cblas_code; \ } \ \ { \ Scalar *x = &(ax[0]); \ Scalar *y = &(ay[0]); \ Scalar *A = &(aA[0]); \ hblas_code; \ } \ \ return compareVectors(N, ey, ay); \ } #define L3_TEST(method, cblas_code, hblas_code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Scalar beta = random_scalar(); \ Matrix eA(random_matrix(N)); \ Matrix eB(random_matrix(N)); \ Matrix eC(random_matrix(N)); \ Matrix aA(eA), aB(eB), aC(eC); \ \ { \ Scalar *A = &(eA[0]); \ Scalar *B = &(eB[0]); \ Scalar *C = &(eC[0]); \ cblas_code; \ } \ \ { \ Scalar *A = &(aA[0]); \ Scalar *B = &(aB[0]); \ Scalar *C = &(aC[0]); \ hblas_code; \ } \ \ return compareMatrices(N, eC, aC); \ } template<class T> struct BLASTestBase { typedef T Scalar; typedef std::vector<T> Vector; typedef std::vector<T> Matrix; std::random_device rand_dev; std::default_random_engine rand_eng; BLASTestBase() : rand_eng(rand_dev()) {} Scalar random_scalar() { std::uniform_real_distribution<T> uniform_dist(0.0, 1.0); return uniform_dist(rand_eng); } Vector random_vector(int N) { Vector buff(N); for (int i=0; i<N; ++i) { buff[i] = random_scalar(); } return buff; } Matrix random_matrix(int N) { Matrix buff(N * N); for (int i=0; i<N*N; ++i) { buff[i] = random_scalar(); } return buff; } bool compareScalars(Scalar x, Scalar y, Scalar epsilon = 4 * std::numeric_limits<Scalar>::epsilon()) { if (x == y) { return true; } else { const Scalar min_normal = std::numeric_limits<Scalar>::min(); Scalar ax = std::abs(x); Scalar ay = std::abs(y); Scalar diff = std::abs(x - y); bool equal = false; if (x == 0.0 || y == 0.0 || diff < min_normal) { equal = diff < (epsilon * min_normal); } else { equal = diff / (ax + ay) < epsilon; } if (!equal) { std::cerr << "FAIL! expected = " << x << ", actual = " << y << "\n"; } return equal; } } bool compareVectors(int N, const Vector &x, const Vector &y, Scalar epsilon = 16 * std::numeric_limits<Scalar>::epsilon()) { bool equal = true; for (int i = 0; i < N; ++i) { if (!compareScalars(x[i], y[i], epsilon)) { std::cerr << "Vectors differ at index: " << i << "\n"; equal = false; break; } } return equal; } bool compareMatrices(int N, const Matrix &A, const Matrix &B, Scalar epsilon = 16 * std::numeric_limits<Scalar>::epsilon()) { bool equal = true; for (int i = 0; i < N*N; ++i) { if (!compareScalars(A[i], A[i], epsilon)) { std::cerr << "Matrices differ at coords: (" << i%N << ", " << i/N << ")\n"; equal = false; break; } } return equal; } }; struct BLASFloatTests : public BLASTestBase<float> { void run_tests(int N) { RUN_TEST(scopy); RUN_TEST(sscal); RUN_TEST(saxpy); RUN_TEST(sdot); RUN_TEST(sasum); RUN_TEST(sgemv_notrans); RUN_TEST(sgemv_trans); RUN_TEST(sger); RUN_TEST(sgemm_notrans); RUN_TEST(sgemm_transA); RUN_TEST(sgemm_transB); RUN_TEST(sgemm_transAB); } L1_VECTOR_TEST(scopy, scopy(N, x, 1, y, 1)) L1_VECTOR_TEST(sscal, sscal(N, alpha, y, 1)) L1_VECTOR_TEST(saxpy, saxpy(N, alpha, x, 1, y, 1)) L1_SCALAR_TEST(sdot, sdot(N, x, 1, y, 1)) L1_SCALAR_TEST(sasum, sasum(N, x, 1)) L2_TEST(sgemv_notrans, cblas_sgemv(CblasColMajor, CblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_sgemv(HblasColMajor, HblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(sgemv_trans, cblas_sgemv(CblasColMajor, CblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_sgemv(HblasColMajor, HblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(sger, cblas_sger(CblasColMajor, N, N, alpha, x, 1, y, 1, A, N), hblas_sger(HblasColMajor, N, N, alpha, x, 1, y, 1, A, N)); L3_TEST(sgemm_notrans, cblas_sgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasNoTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(sgemm_transA, cblas_sgemm(CblasColMajor, CblasTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(sgemm_transB, cblas_sgemm(CblasColMajor, CblasNoTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasNoTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(sgemm_transAB, cblas_sgemm(CblasColMajor, CblasTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); }; struct BLASDoubleTests : public BLASTestBase<double> { void run_tests(int N) { RUN_TEST(dcopy); RUN_TEST(dscal); RUN_TEST(daxpy); RUN_TEST(ddot); RUN_TEST(dasum); RUN_TEST(dgemv_notrans); RUN_TEST(dgemv_trans); RUN_TEST(dger); RUN_TEST(dgemm_notrans); RUN_TEST(dgemm_transA); RUN_TEST(dgemm_transB); RUN_TEST(dgemm_transAB); } L1_VECTOR_TEST(dcopy, dcopy(N, x, 1, y, 1)) L1_VECTOR_TEST(dscal, dscal(N, alpha, y, 1)) L1_VECTOR_TEST(daxpy, daxpy(N, alpha, x, 1, y, 1)) L1_SCALAR_TEST(ddot, ddot(N, x, 1, y, 1)) L1_SCALAR_TEST(dasum, dasum(N, x, 1)) L2_TEST(dgemv_notrans, cblas_dgemv(CblasColMajor, CblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_dgemv(HblasColMajor, HblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(dgemv_trans, cblas_dgemv(CblasColMajor, CblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_dgemv(HblasColMajor, HblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(dger, cblas_dger(CblasColMajor, N, N, alpha, x, 1, y, 1, A, N), hblas_dger(HblasColMajor, N, N, alpha, x, 1, y, 1, A, N)); L3_TEST(dgemm_notrans, cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasNoTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(dgemm_transA, cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(dgemm_transB, cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasNoTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(dgemm_transAB, cblas_dgemm(CblasColMajor, CblasTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); }; int main(int argc, char *argv[]) { BLASFloatTests s; BLASDoubleTests d; if (argc > 1) { for (int i = 1; i < argc; ++i) { int size = std::stoi(argv[i]); std::cout << "Testing halide_blas with N = " << size << ":\n"; s.run_tests(size); d.run_tests(size); } } else { int size = 277; std::cout << "Testing halide_blas with N = " << size << ":\n"; s.run_tests(size); d.run_tests(size); } }
39.744409
108
0.423955
josephwinston
8b1e4ab07b56c2014f36e5c645e1b7e8386b4de5
1,476
cpp
C++
controls.extend/SRatingBar.cpp
fengjixuchui/soui
360d9b63cab96e7c01d600ff772578c2fdc9af24
[ "MIT" ]
4
2018-01-06T13:16:50.000Z
2018-05-14T01:20:00.000Z
controls.extend/SRatingBar.cpp
fengjixuchui/soui
360d9b63cab96e7c01d600ff772578c2fdc9af24
[ "MIT" ]
null
null
null
controls.extend/SRatingBar.cpp
fengjixuchui/soui
360d9b63cab96e7c01d600ff772578c2fdc9af24
[ "MIT" ]
3
2017-10-12T05:50:15.000Z
2018-08-14T03:32:06.000Z
#include "stdafx.h" #include "SRatingBar.h" namespace SOUI { SRatingBar::SRatingBar(void):m_pStar(NULL),m_nStars(5),m_fValue(0.0f) { } SRatingBar::~SRatingBar(void) { } void SRatingBar::OnPaint(IRenderTarget *pRT) { CRect rcClient = GetClientRect(); int nWid = (int)(rcClient.Width()*m_fValue/m_nStars); CRect rcFore = rcClient; rcFore.right = rcFore.left + nWid; pRT->PushClipRect(rcFore); DrawStars(pRT,rcClient,TRUE); pRT->PopClip(); CRect rcBack = rcClient; rcBack.left = rcFore.right; pRT->PushClipRect(rcBack); DrawStars(pRT,rcClient,FALSE); pRT->PopClip(); } CSize SRatingBar::GetDesiredSize(LPCRECT pRcContainer) { if (!m_pStar) return CSize(16,16); CSize szStar = m_pStar->GetSkinSize(); szStar.cx *= m_nStars; return szStar; } void SRatingBar::DrawStars(IRenderTarget *pRT,CRect rc,BOOL bForeground) { if (!m_pStar) return; CSize szStar = rc.Size(); szStar.cx/=m_nStars; CRect rcStar(rc.TopLeft(),szStar); for(int i=0;i<m_nStars;i++) { m_pStar->Draw(pRT,rcStar,bForeground?0:1); rcStar.OffsetRect(szStar.cx,0); } } void SRatingBar::SetValue(float fValue) { m_fValue = fValue; if(m_fValue>(float)m_nStars) m_fValue = (float)m_nStars; Invalidate(); } }
23.428571
76
0.584011
fengjixuchui
8b1f53a5afc248f3940158763926e017b884f9f0
1,033
cpp
C++
ceng213/hw2/mains/tpbst_case_19.cpp
zeynepozalp/Coursework
d2526229a757a926c311e49c7ffec995ebb9f365
[ "MIT" ]
null
null
null
ceng213/hw2/mains/tpbst_case_19.cpp
zeynepozalp/Coursework
d2526229a757a926c311e49c7ffec995ebb9f365
[ "MIT" ]
null
null
null
ceng213/hw2/mains/tpbst_case_19.cpp
zeynepozalp/Coursework
d2526229a757a926c311e49c7ffec995ebb9f365
[ "MIT" ]
null
null
null
#include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include "tpbst.hpp" /* * Case 19 : Empty tree, insert many items, create primary node with empty secondary tree, remove item with two children, print. */ int main() { TwoPhaseBST<int> tpbst; tpbst.insert("ceng351", "sec2", 32) .insert("ceng351", "sec1", 31) .insert("ceng351", "sec3", 33) .insert("ceng351", "sec4", 34) .insert("ceng213", "sec1", 21) .remove("ceng213", "sec1") .insert("ceng435", "sec1", 41) .insert("ceng435", "sec2", 42) .insert("ceng435", "sec4", 44) .insert("ceng435", "sec3", 43) .insert("ceng477", "sec3", 73) .insert("ceng477", "sec4", 74) .insert("ceng477", "sec1", 71) .insert("ceng477", "sec2", 72) .insert("ceng453", "sec1", 91) .remove("ceng351", "sec2") .remove("ceng477", "sec3") .print(); return 0; }
29.514286
128
0.514037
zeynepozalp
8b1ffff653407c7e8895c8aeb20113097d30754b
32,159
cpp
C++
src/ZNCString.cpp
md-5/znc
39c741fcd2307d707a0d1bebbed3d80be9b1899b
[ "Apache-2.0" ]
1
2021-11-11T04:49:01.000Z
2021-11-11T04:49:01.000Z
src/ZNCString.cpp
md-5/znc
39c741fcd2307d707a0d1bebbed3d80be9b1899b
[ "Apache-2.0" ]
null
null
null
src/ZNCString.cpp
md-5/znc
39c741fcd2307d707a0d1bebbed3d80be9b1899b
[ "Apache-2.0" ]
1
2021-11-11T04:48:51.000Z
2021-11-11T04:48:51.000Z
/* * Copyright (C) 2004-2015 ZNC, see the NOTICE file for details. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <znc/FileUtils.h> #include <znc/Utils.h> #include <znc/MD5.h> #include <znc/SHA256.h> #include <sstream> using std::stringstream; CString::CString(char c) : string() { stringstream s; s << c; *this = s.str(); } CString::CString(unsigned char c) : string() { stringstream s; s << c; *this = s.str(); } CString::CString(short i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned short i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(int i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned int i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(long long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned long long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(double i, int precision) : string() { stringstream s; s.precision(precision); s << std::fixed << i; *this = s.str(); } CString::CString(float i, int precision) : string() { stringstream s; s.precision(precision); s << std::fixed << i; *this = s.str(); } unsigned char* CString::strnchr(const unsigned char* src, unsigned char c, unsigned int iMaxBytes, unsigned char* pFill, unsigned int* piCount) const { for (unsigned int a = 0; a < iMaxBytes && *src; a++, src++) { if (pFill) { pFill[a] = *src; } if (*src == c) { if (pFill) { pFill[a +1] = 0; } if (piCount) { *piCount = a; } return (unsigned char*) src; } } if (pFill) { *pFill = 0; } if (piCount) { *piCount = 0; } return NULL; } int CString::CaseCmp(const CString& s, CString::size_type uLen) const { if (uLen != CString::npos) { return strncasecmp(c_str(), s.c_str(), uLen); } return strcasecmp(c_str(), s.c_str()); } int CString::StrCmp(const CString& s, CString::size_type uLen) const { if (uLen != CString::npos) { return strncmp(c_str(), s.c_str(), uLen); } return strcmp(c_str(), s.c_str()); } bool CString::Equals(const CString& s, CaseSensitivity cs) const { if (cs == CaseSensitive) { return (StrCmp(s) == 0); } else { return (CaseCmp(s) == 0); } } bool CString::Equals(const CString& s, bool bCaseSensitive, CString::size_type uLen) const { if (bCaseSensitive) { return (StrCmp(s, uLen) == 0); } else { return (CaseCmp(s, uLen) == 0); } } bool CString::WildCmp(const CString& sWild, const CString& sString) { // Written by Jack Handy - jakkhandy@hotmail.com const char *wild = sWild.c_str(), *CString = sString.c_str(); const char *cp = NULL, *mp = NULL; while ((*CString) && (*wild != '*')) { if ((*wild != *CString) && (*wild != '?')) { return false; } wild++; CString++; } while (*CString) { if (*wild == '*') { if (!*++wild) { return true; } mp = wild; cp = CString+1; } else if ((*wild == *CString) || (*wild == '?')) { wild++; CString++; } else { wild = mp; CString = cp++; } } while (*wild == '*') { wild++; } return (*wild == 0); } bool CString::WildCmp(const CString& sWild) const { return CString::WildCmp(sWild, *this); } CString& CString::MakeUpper() { for (size_type a = 0; a < length(); a++) { char& c = (*this)[a]; //TODO use unicode c = (char)toupper(c); } return *this; } CString& CString::MakeLower() { for (size_type a = 0; a < length(); a++) { char& c = (*this)[a]; //TODO use unicode c = (char)tolower(c); } return *this; } CString CString::AsUpper() const { CString sRet = *this; sRet.MakeUpper(); return sRet; } CString CString::AsLower() const { CString sRet = *this; sRet.MakeLower(); return sRet; } CString::EEscape CString::ToEscape(const CString& sEsc) { if (sEsc.Equals("ASCII")) { return EASCII; } else if (sEsc.Equals("HTML")) { return EHTML; } else if (sEsc.Equals("URL")) { return EURL; } else if (sEsc.Equals("SQL")) { return ESQL; } else if (sEsc.Equals("NAMEDFMT")) { return ENAMEDFMT; } else if (sEsc.Equals("DEBUG")) { return EDEBUG; } else if (sEsc.Equals("MSGTAG")) { return EMSGTAG; } else if (sEsc.Equals("HEXCOLON")) { return EHEXCOLON; } return EASCII; } CString CString::Escape_n(EEscape eFrom, EEscape eTo) const { CString sRet; const char szHex[] = "0123456789ABCDEF"; const unsigned char *pStart = (const unsigned char*) data(); const unsigned char *p = (const unsigned char*) data(); size_type iLength = length(); sRet.reserve(iLength *3); unsigned char pTmp[21]; unsigned int iCounted = 0; for (unsigned int a = 0; a < iLength; a++, p = pStart + a) { unsigned char ch = 0; switch (eFrom) { case EHTML: if ((*p == '&') && (strnchr((unsigned char*) p, ';', sizeof(pTmp) - 1, pTmp, &iCounted))) { // please note that we do not have any Unicode or UTF-8 support here at all. if ((iCounted >= 3) && (pTmp[1] == '#')) { // do XML and HTML &#97; &#x3c int base = 10; if ((pTmp[2] & 0xDF) == 'X') { base = 16; } char* endptr = NULL; unsigned long int b = strtol((const char*) (pTmp +2 + (base == 16)), &endptr, base); if ((*endptr == ';') && (b <= 255)) { // incase they do something like &#7777777777; ch = (unsigned char)b; a += iCounted; break; } } if (ch == 0) { if (!strncasecmp((const char*) &pTmp, "&lt;", 2)) ch = '<'; else if (!strncasecmp((const char*) &pTmp, "&gt;", 2)) ch = '>'; else if (!strncasecmp((const char*) &pTmp, "&quot;", 4)) ch = '"'; else if (!strncasecmp((const char*) &pTmp, "&amp;", 3)) ch = '&'; } if (ch > 0) { a += iCounted; } else { ch = *p; // Not a valid escape, just record the & } } else { ch = *p; } break; case EASCII: ch = *p; break; case EURL: if (*p == '%' && (a +2) < iLength && isxdigit(*(p +1)) && isxdigit(*(p +2))) { p++; if (isdigit(*p)) { ch = (unsigned char)((*p - '0') << 4); } else { ch = (unsigned char)((tolower(*p) - 'a' +10) << 4); } p++; if (isdigit(*p)) { ch |= (unsigned char)(*p - '0'); } else { ch |= (unsigned char)(tolower(*p) - 'a' +10); } a += 2; } else if (pStart[a] == '+') { ch = ' '; } else { ch = *p; } break; case ESQL: if (*p != '\\' || iLength < (a +1)) { ch = *p; } else { a++; p++; if (*p == 'n') { ch = '\n'; } else if (*p == 'r') { ch = '\r'; } else if (*p == '0') { ch = '\0'; } else if (*p == 't') { ch = '\t'; } else if (*p == 'b') { ch = '\b'; } else { ch = *p; } } break; case ENAMEDFMT: if (*p != '\\' || iLength < (a +1)) { ch = *p; } else { a++; p++; ch = *p; } break; case EDEBUG: if (*p == '\\' && (a +3) < iLength && *(p +1) == 'x' && isxdigit(*(p +2)) && isxdigit(*(p +3))) { p += 2; if (isdigit(*p)) { ch = (unsigned char)((*p - '0') << 4); } else { ch = (unsigned char)((tolower(*p) - 'a' +10) << 4); } p++; if (isdigit(*p)) { ch |= (unsigned char)(*p - '0'); } else { ch |= (unsigned char)(tolower(*p) - 'a' +10); } a += 3; } else if (*p == '\\' && a+1 < iLength && *(p+1) == '.') { a++; p++; ch = '\\'; } else { ch = *p; } break; case EMSGTAG: if (*p != '\\' || iLength < (a +1)) { ch = *p; } else { a++; p++; if (*p == ':') { ch = ';'; } else if (*p == 's') { ch = ' '; } else if (*p == '0') { ch = '\0'; } else if (*p == '\\') { ch = '\\'; } else if (*p == 'r') { ch = '\r'; } else if (*p == 'n') { ch = '\n'; } else { ch = *p; } } break; case EHEXCOLON: { while (!isxdigit(*p) && a < iLength) { a++; p++; } if (a == iLength) { continue; } if (isdigit(*p)) { ch = (unsigned char)((*p - '0') << 4); } else { ch = (unsigned char)((tolower(*p) - 'a' +10) << 4); } a++; p++; while (!isxdigit(*p) && a < iLength) { a++; p++; } if (a == iLength) { continue; } if (isdigit(*p)) { ch |= (unsigned char)(*p - '0'); } else { ch |= (unsigned char)(tolower(*p) - 'a' +10); } } break; } switch (eTo) { case EHTML: if (ch == '<') sRet += "&lt;"; else if (ch == '>') sRet += "&gt;"; else if (ch == '"') sRet += "&quot;"; else if (ch == '&') sRet += "&amp;"; else { sRet += ch; } break; case EASCII: sRet += ch; break; case EURL: if (isalnum(ch) || ch == '_' || ch == '.' || ch == '-') { sRet += ch; } else if (ch == ' ') { sRet += '+'; } else { sRet += '%'; sRet += szHex[ch >> 4]; sRet += szHex[ch & 0xf]; } break; case ESQL: if (ch == '\0') { sRet += '\\'; sRet += '0'; } else if (ch == '\n') { sRet += '\\'; sRet += 'n'; } else if (ch == '\t') { sRet += '\\'; sRet += 't'; } else if (ch == '\r') { sRet += '\\'; sRet += 'r'; } else if (ch == '\b') { sRet += '\\'; sRet += 'b'; } else if (ch == '\"') { sRet += '\\'; sRet += '\"'; } else if (ch == '\'') { sRet += '\\'; sRet += '\''; } else if (ch == '\\') { sRet += '\\'; sRet += '\\'; } else { sRet += ch; } break; case ENAMEDFMT: if (ch == '\\') { sRet += '\\'; sRet += '\\'; } else if (ch == '{') { sRet += '\\'; sRet += '{'; } else if (ch == '}') { sRet += '\\'; sRet += '}'; } else { sRet += ch; } break; case EDEBUG: if (ch < 0x20 || ch == 0x7F) { sRet += "\\x"; sRet += szHex[ch >> 4]; sRet += szHex[ch & 0xf]; } else if (ch == '\\') { sRet += "\\."; } else { sRet += ch; } break; case EMSGTAG: if (ch == ';') { sRet += '\\'; sRet += ':'; } else if (ch == ' ') { sRet += '\\'; sRet += 's'; } else if (ch == '\0') { sRet += '\\'; sRet += '0'; } else if (ch == '\\') { sRet += '\\'; sRet += '\\'; } else if (ch == '\r') { sRet += '\\'; sRet += 'r'; } else if (ch == '\n') { sRet += '\\'; sRet += 'n'; } else { sRet += ch; } break; case EHEXCOLON: { sRet += tolower(szHex[ch >> 4]); sRet += tolower(szHex[ch & 0xf]); sRet += ":"; } break; } } if (eTo == EHEXCOLON) { sRet.TrimRight(":"); } return sRet; } CString CString::Escape_n(EEscape eTo) const { return Escape_n(EASCII, eTo); } CString& CString::Escape(EEscape eFrom, EEscape eTo) { return (*this = Escape_n(eFrom, eTo)); } CString& CString::Escape(EEscape eTo) { return (*this = Escape_n(eTo)); } CString CString::Replace_n(const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) const { CString sRet = *this; CString::Replace(sRet, sReplace, sWith, sLeft, sRight, bRemoveDelims); return sRet; } unsigned int CString::Replace(const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) { return CString::Replace(*this, sReplace, sWith, sLeft, sRight, bRemoveDelims); } unsigned int CString::Replace(CString& sStr, const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) { unsigned int uRet = 0; CString sCopy = sStr; sStr.clear(); size_type uReplaceWidth = sReplace.length(); size_type uLeftWidth = sLeft.length(); size_type uRightWidth = sRight.length(); const char* p = sCopy.c_str(); bool bInside = false; while (*p) { if (!bInside && uLeftWidth && strncmp(p, sLeft.c_str(), uLeftWidth) == 0) { if (!bRemoveDelims) { sStr += sLeft; } p += uLeftWidth -1; bInside = true; } else if (bInside && uRightWidth && strncmp(p, sRight.c_str(), uRightWidth) == 0) { if (!bRemoveDelims) { sStr += sRight; } p += uRightWidth -1; bInside = false; } else if (!bInside && strncmp(p, sReplace.c_str(), uReplaceWidth) == 0) { sStr += sWith; p += uReplaceWidth -1; uRet++; } else { sStr.append(p, 1); } p++; } return uRet; } CString CString::Token(size_t uPos, bool bRest, const CString& sSep, bool bAllowEmpty, const CString& sLeft, const CString& sRight, bool bTrimQuotes) const { VCString vsTokens; if (Split(sSep, vsTokens, bAllowEmpty, sLeft, sRight, bTrimQuotes) > uPos) { CString sRet; for (size_t a = uPos; a < vsTokens.size(); a++) { if (a > uPos) { sRet += sSep; } sRet += vsTokens[a]; if (!bRest) { break; } } return sRet; } return Token(uPos, bRest, sSep, bAllowEmpty); } CString CString::Token(size_t uPos, bool bRest, const CString& sSep, bool bAllowEmpty) const { const char *sep_str = sSep.c_str(); size_t sep_len = sSep.length(); const char *str = c_str(); size_t str_len = length(); size_t start_pos = 0; size_t end_pos; if (!bAllowEmpty) { while (strncmp(&str[start_pos], sep_str, sep_len) == 0) { start_pos += sep_len; } } // First, find the start of our token while (uPos != 0 && start_pos < str_len) { bool bFoundSep = false; while (strncmp(&str[start_pos], sep_str, sep_len) == 0 && (!bFoundSep || !bAllowEmpty)) { start_pos += sep_len; bFoundSep = true; } if (bFoundSep) { uPos--; } else { start_pos++; } } // String is over? if (start_pos >= str_len) return ""; // If they want everything from here on, give it to them if (bRest) { return substr(start_pos); } // Now look for the end of the token they want end_pos = start_pos; while (end_pos < str_len) { if (strncmp(&str[end_pos], sep_str, sep_len) == 0) return substr(start_pos, end_pos - start_pos); end_pos++; } // They want the last token in the string, not something in between return substr(start_pos); } CString CString::Ellipsize(unsigned int uLen) const { if (uLen >= size()) { return *this; } string sRet; // @todo this looks suspect if (uLen < 4) { for (unsigned int a = 0; a < uLen; a++) { sRet += "."; } return sRet; } sRet = substr(0, uLen -3) + "..."; return sRet; } CString CString::Left(size_type uCount) const { uCount = (uCount > length()) ? length() : uCount; return substr(0, uCount); } CString CString::Right(size_type uCount) const { uCount = (uCount > length()) ? length() : uCount; return substr(length() - uCount, uCount); } CString::size_type CString::URLSplit(MCString& msRet) const { msRet.clear(); VCString vsPairs; Split("&", vsPairs); for (size_t a = 0; a < vsPairs.size(); a++) { const CString& sPair = vsPairs[a]; msRet[sPair.Token(0, false, "=").Escape(CString::EURL, CString::EASCII)] = sPair.Token(1, true, "=").Escape(CString::EURL, CString::EASCII); } return msRet.size(); } CString::size_type CString::OptionSplit(MCString& msRet, bool bUpperKeys) const { CString sName; CString sCopy(*this); msRet.clear(); while (!sCopy.empty()) { sName = sCopy.Token(0, false, "=", false, "\"", "\"", false).Trim_n(); sCopy = sCopy.Token(1, true, "=", false, "\"", "\"", false).TrimLeft_n(); if (sName.empty()) { continue; } VCString vsNames; sName.Split(" ", vsNames, false, "\"", "\""); for (unsigned int a = 0; a < vsNames.size(); a++) { CString sKeyName = vsNames[a]; if (bUpperKeys) { sKeyName.MakeUpper(); } if ((a +1) == vsNames.size()) { msRet[sKeyName] = sCopy.Token(0, false, " ", false, "\"", "\""); sCopy = sCopy.Token(1, true, " ", false, "\"", "\"", false); } else { msRet[sKeyName] = ""; } } } return msRet.size(); } CString::size_type CString::QuoteSplit(VCString& vsRet) const { vsRet.clear(); return Split(" ", vsRet, false, "\"", "\"", true); } CString::size_type CString::Split(const CString& sDelim, VCString& vsRet, bool bAllowEmpty, const CString& sLeft, const CString& sRight, bool bTrimQuotes, bool bTrimWhiteSpace) const { vsRet.clear(); if (empty()) { return 0; } CString sTmp; bool bInside = false; size_type uDelimLen = sDelim.length(); size_type uLeftLen = sLeft.length(); size_type uRightLen = sRight.length(); const char* p = c_str(); if (!bAllowEmpty) { while (strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) { p += uDelimLen; } } while (*p) { if (uLeftLen && uRightLen && !bInside && strncasecmp(p, sLeft.c_str(), uLeftLen) == 0) { if (!bTrimQuotes) { sTmp += sLeft; } p += uLeftLen; bInside = true; continue; } if (uLeftLen && uRightLen && bInside && strncasecmp(p, sRight.c_str(), uRightLen) == 0) { if (!bTrimQuotes) { sTmp += sRight; } p += uRightLen; bInside = false; continue; } if (uDelimLen && !bInside && strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) { if (bTrimWhiteSpace) { sTmp.Trim(); } vsRet.push_back(sTmp); sTmp.clear(); p += uDelimLen; if (!bAllowEmpty) { while (strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) { p += uDelimLen; } } bInside = false; continue; } else { sTmp += *p; } p++; } if (!sTmp.empty()) { if (bTrimWhiteSpace) { sTmp.Trim(); } vsRet.push_back(sTmp); } return vsRet.size(); } CString::size_type CString::Split(const CString& sDelim, SCString& ssRet, bool bAllowEmpty, const CString& sLeft, const CString& sRight, bool bTrimQuotes, bool bTrimWhiteSpace) const { VCString vsTokens; Split(sDelim, vsTokens, bAllowEmpty, sLeft, sRight, bTrimQuotes, bTrimWhiteSpace); ssRet.clear(); for (size_t a = 0; a < vsTokens.size(); a++) { ssRet.insert(vsTokens[a]); } return ssRet.size(); } CString CString::NamedFormat(const CString& sFormat, const MCString& msValues) { CString sRet; CString sKey; bool bEscape = false; bool bParam = false; const char* p = sFormat.c_str(); while (*p) { if (!bParam) { if (bEscape) { sRet += *p; bEscape = false; } else if (*p == '\\') { bEscape = true; } else if (*p == '{') { bParam = true; sKey.clear(); } else { sRet += *p; } } else { if (bEscape) { sKey += *p; bEscape = false; } else if (*p == '\\') { bEscape = true; } else if (*p == '}') { bParam = false; MCString::const_iterator it = msValues.find(sKey); if (it != msValues.end()) { sRet += (*it).second; } } else { sKey += *p; } } p++; } return sRet; } CString CString::RandomString(unsigned int uLength) { const char chars[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789!?.,:;/*-+_()"; // -1 because sizeof() includes the trailing '\0' byte const size_t len = sizeof(chars) / sizeof(chars[0]) - 1; size_t p; CString sRet; for (unsigned int a = 0; a < uLength; a++) { p = (size_t) (len * (rand() / (RAND_MAX + 1.0))); sRet += chars[p]; } return sRet; } bool CString::Base64Encode(unsigned int uWrap) { CString sCopy(*this); return sCopy.Base64Encode(*this, uWrap); } unsigned long CString::Base64Decode() { CString sCopy(*this); return sCopy.Base64Decode(*this); } CString CString::Base64Encode_n(unsigned int uWrap) const { CString sRet; Base64Encode(sRet, uWrap); return sRet; } CString CString::Base64Decode_n() const { CString sRet; Base64Decode(sRet); return sRet; } bool CString::Base64Encode(CString& sRet, unsigned int uWrap) const { const char b64table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; sRet.clear(); size_t len = size(); const unsigned char* input = (const unsigned char*) c_str(); unsigned char *output, *p; size_t i = 0, mod = len % 3, toalloc; toalloc = (len / 3) * 4 + (3 - mod) % 3 + 1 + 8; if (uWrap) { toalloc += len / 57; if (len % 57) { toalloc++; } } if (toalloc < len) { return 0; } p = output = new unsigned char [toalloc]; while (i < len - mod) { *p++ = b64table[input[i++] >> 2]; *p++ = b64table[((input[i - 1] << 4) | (input[i] >> 4)) & 0x3f]; *p++ = b64table[((input[i] << 2) | (input[i + 1] >> 6)) & 0x3f]; *p++ = b64table[input[i + 1] & 0x3f]; i += 2; if (uWrap && !(i % 57)) { *p++ = '\n'; } } if (!mod) { if (uWrap && i % 57) { *p++ = '\n'; } } else { *p++ = b64table[input[i++] >> 2]; *p++ = b64table[((input[i - 1] << 4) | (input[i] >> 4)) & 0x3f]; if (mod == 1) { *p++ = '='; } else { *p++ = b64table[(input[i] << 2) & 0x3f]; } *p++ = '='; if (uWrap) { *p++ = '\n'; } } *p = 0; sRet = (char*) output; delete[] output; return true; } unsigned long CString::Base64Decode(CString& sRet) const { CString sTmp(*this); // remove new lines sTmp.Replace("\r", ""); sTmp.Replace("\n", ""); const char* in = sTmp.c_str(); char c, c1, *p; unsigned long i; unsigned long uLen = sTmp.size(); char* out = new char[uLen + 1]; for (i = 0, p = out; i < uLen; i++) { c = (char)base64_table[(unsigned char)in[i++]]; c1 = (char)base64_table[(unsigned char)in[i++]]; *p++ = char((c << 2) | ((c1 >> 4) & 0x3)); if (i < uLen) { if (in[i] == '=') { break; } c = (char)base64_table[(unsigned char)in[i]]; *p++ = char(((c1 << 4) & 0xf0) | ((c >> 2) & 0xf)); } if (++i < uLen) { if (in[i] == '=') { break; } *p++ = char(((c << 6) & 0xc0) | (char)base64_table[(unsigned char)in[i]]); } } *p = '\0'; unsigned long uRet = p - out; sRet.clear(); sRet.append(out, uRet); delete[] out; return uRet; } CString CString::MD5() const { return (const char*) CMD5(*this); } CString CString::SHA256() const { unsigned char digest[SHA256_DIGEST_SIZE]; char digest_hex[SHA256_DIGEST_SIZE * 2 + 1]; const unsigned char *message = (const unsigned char *) c_str(); sha256(message, length(), digest); snprintf(digest_hex, sizeof(digest_hex), "%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x%02x%02x", digest[ 0], digest[ 1], digest[ 2], digest[ 3], digest[ 4], digest[ 5], digest[ 6], digest[ 7], digest[ 8], digest[ 9], digest[10], digest[11], digest[12], digest[13], digest[14], digest[15], digest[16], digest[17], digest[18], digest[19], digest[20], digest[21], digest[22], digest[23], digest[24], digest[25], digest[26], digest[27], digest[28], digest[29], digest[30], digest[31]); return digest_hex; } #ifdef HAVE_LIBSSL CString CString::Encrypt_n(const CString& sPass, const CString& sIvec) const { CString sRet; sRet.Encrypt(sPass, sIvec); return sRet; } CString CString::Decrypt_n(const CString& sPass, const CString& sIvec) const { CString sRet; sRet.Decrypt(sPass, sIvec); return sRet; } void CString::Encrypt(const CString& sPass, const CString& sIvec) { Crypt(sPass, true, sIvec); } void CString::Decrypt(const CString& sPass, const CString& sIvec) { Crypt(sPass, false, sIvec); } void CString::Crypt(const CString& sPass, bool bEncrypt, const CString& sIvec) { unsigned char szIvec[8] = {0,0,0,0,0,0,0,0}; BF_KEY bKey; if (sIvec.length() >= 8) { memcpy(szIvec, sIvec.data(), 8); } BF_set_key(&bKey, (unsigned int)sPass.length(), (unsigned char*) sPass.data()); unsigned int uPad = (length() % 8); if (uPad) { uPad = 8 - uPad; append(uPad, '\0'); } size_t uLen = length(); unsigned char* szBuff = (unsigned char*) malloc(uLen); BF_cbc_encrypt((const unsigned char*) data(), szBuff, uLen, &bKey, szIvec, ((bEncrypt) ? BF_ENCRYPT : BF_DECRYPT)); clear(); append((const char*) szBuff, uLen); free(szBuff); } #endif // HAVE_LIBSSL CString CString::ToPercent(double d) { char szRet[32]; snprintf(szRet, 32, "%.02f%%", d); return szRet; } CString CString::ToByteStr(unsigned long long d) { const unsigned long long KiB = 1024; const unsigned long long MiB = KiB * 1024; const unsigned long long GiB = MiB * 1024; const unsigned long long TiB = GiB * 1024; if (d > TiB) { return CString(d / TiB) + " TiB"; } else if (d > GiB) { return CString(d / GiB) + " GiB"; } else if (d > MiB) { return CString(d / MiB) + " MiB"; } else if (d > KiB) { return CString(d / KiB) + " KiB"; } return CString(d) + " B"; } CString CString::ToTimeStr(unsigned long s) { const unsigned long m = 60; const unsigned long h = m * 60; const unsigned long d = h * 24; const unsigned long w = d * 7; const unsigned long y = d * 365; CString sRet; #define TIMESPAN(time, str) \ if (s >= time) { \ sRet += CString(s / time) + str " "; \ s = s % time; \ } TIMESPAN(y, "y"); TIMESPAN(w, "w"); TIMESPAN(d, "d"); TIMESPAN(h, "h"); TIMESPAN(m, "m"); TIMESPAN(1, "s"); if (sRet.empty()) return "0s"; return sRet.RightChomp_n(); } bool CString::ToBool() const { CString sTrimmed = Trim_n(); return (!sTrimmed.Trim_n("0").empty() && !sTrimmed.Equals("false") && !sTrimmed.Equals("off") && !sTrimmed.Equals("no") && !sTrimmed.Equals("n")); } short CString::ToShort() const { return (short int)strtol(this->c_str(), (char**) NULL, 10); } unsigned short CString::ToUShort() const { return (unsigned short int)strtoul(this->c_str(), (char**) NULL, 10); } unsigned int CString::ToUInt() const { return (unsigned int)strtoul(this->c_str(), (char**) NULL, 10); } int CString::ToInt() const { return (int)strtol(this->c_str(), (char**) NULL, 10); } long CString::ToLong() const { return strtol(this->c_str(), (char**) NULL, 10); } unsigned long CString::ToULong() const { return strtoul(c_str(), NULL, 10); } unsigned long long CString::ToULongLong() const { return strtoull(c_str(), NULL, 10); } long long CString::ToLongLong() const { return strtoll(c_str(), NULL, 10); } double CString::ToDouble() const { return strtod(c_str(), NULL); } bool CString::Trim(const CString& s) { bool bLeft = TrimLeft(s); return (TrimRight(s) || bLeft); } bool CString::TrimLeft(const CString& s) { size_type i = find_first_not_of(s); if (i == 0) return false; if (i != npos) this->erase(0, i); else this->clear(); return true; } bool CString::TrimRight(const CString& s) { size_type i = find_last_not_of(s); if (i + 1 == length()) return false; if (i != npos) this->erase(i + 1, npos); else this->clear(); return true; } CString CString::Trim_n(const CString& s) const { CString sRet = *this; sRet.Trim(s); return sRet; } CString CString::TrimLeft_n(const CString& s) const { CString sRet = *this; sRet.TrimLeft(s); return sRet; } CString CString::TrimRight_n(const CString& s) const { CString sRet = *this; sRet.TrimRight(s); return sRet; } bool CString::TrimPrefix(const CString& sPrefix) { if (Equals(sPrefix, false, sPrefix.length())) { LeftChomp(sPrefix.length()); return true; } else { return false; } } bool CString::TrimSuffix(const CString& sSuffix) { if (Right(sSuffix.length()).Equals(sSuffix)) { RightChomp(sSuffix.length()); return true; } else { return false; } } size_t CString::Find(const CString& s, CaseSensitivity cs) const { if (cs == CaseSensitive) { return find(s); } else { return AsLower().find(s.AsLower()); } } bool CString::StartsWith(const CString& sPrefix, CaseSensitivity cs) const { return Left(sPrefix.length()).Equals(sPrefix, cs); } bool CString::EndsWith(const CString& sSuffix, CaseSensitivity cs) const { return Right(sSuffix.length()).Equals(sSuffix, cs); } bool CString::Contains(const CString& s, CaseSensitivity cs) const { return Find(s, cs) != npos; } CString CString::TrimPrefix_n(const CString& sPrefix) const { CString sRet = *this; sRet.TrimPrefix(sPrefix); return sRet; } CString CString::TrimSuffix_n(const CString& sSuffix) const { CString sRet = *this; sRet.TrimSuffix(sSuffix); return sRet; } CString CString::LeftChomp_n(size_type uLen) const { CString sRet = *this; sRet.LeftChomp(uLen); return sRet; } CString CString::RightChomp_n(size_type uLen) const { CString sRet = *this; sRet.RightChomp(uLen); return sRet; } bool CString::LeftChomp(size_type uLen) { bool bRet = false; while ((uLen--) && (length())) { erase(0, 1); bRet = true; } return bRet; } bool CString::RightChomp(size_type uLen) { bool bRet = false; while ((uLen--) && (length())) { erase(length() -1); bRet = true; } return bRet; } CString CString::StripControls_n() const { CString sRet; const unsigned char *pStart = (const unsigned char*) data(); unsigned char ch = *pStart; size_type iLength = length(); sRet.reserve(iLength); bool colorCode = false; unsigned int digits = 0; bool comma = false; for (unsigned int a = 0; a < iLength; a++, ch = pStart[a]) { // Color code. Format: \x03([0-9]{1,2}(,[0-9]{1,2})?)? if (ch == 0x03) { colorCode = true; digits = 0; comma = false; continue; } if (colorCode) { if (isdigit(ch) && digits < 2) { digits++; continue; } if (ch == ',' && !comma && digits > 0) { comma = true; digits = 0; continue; } colorCode = false; if (digits == 0 && comma) { // There was a ',' which wasn't followed by digits, we should print it. sRet += ','; } } // CO controls codes if (ch < 0x20 || ch == 0x7F) continue; sRet += ch; } if (colorCode && digits == 0 && comma) { sRet += ','; } sRet.reserve(0); return sRet; } CString& CString::StripControls() { return (*this = StripControls_n()); } //////////////// MCString //////////////// const MCString MCString::EmptyMap; MCString::status_t MCString::WriteToDisk(const CString& sPath, mode_t iMode) const { CFile cFile(sPath); if (this->empty()) { if (!cFile.Exists()) return MCS_SUCCESS; if (cFile.Delete()) return MCS_SUCCESS; } if (!cFile.Open(O_WRONLY|O_CREAT|O_TRUNC, iMode)) { return MCS_EOPEN; } for (MCString::const_iterator it = this->begin(); it != this->end(); ++it) { CString sKey = it->first; CString sValue = it->second; if (!WriteFilter(sKey, sValue)) { return MCS_EWRITEFIL; } if (sKey.empty()) { continue; } if (cFile.Write(Encode(sKey) + " " + Encode(sValue) + "\n") <= 0) { return MCS_EWRITE; } } cFile.Close(); return MCS_SUCCESS; } MCString::status_t MCString::ReadFromDisk(const CString& sPath) { clear(); CFile cFile(sPath); if (!cFile.Open(O_RDONLY)) { return MCS_EOPEN; } CString sBuffer; while (cFile.ReadLine(sBuffer)) { sBuffer.Trim(); CString sKey = sBuffer.Token(0); CString sValue = sBuffer.Token(1); Decode(sKey); Decode(sValue); if (!ReadFilter(sKey, sValue)) return MCS_EREADFIL; (*this)[sKey] = sValue; } cFile.Close(); return MCS_SUCCESS; } static const char hexdigits[] = "0123456789abcdef"; CString& MCString::Encode(CString& sValue) const { CString sTmp; for (CString::iterator it = sValue.begin(); it != sValue.end(); ++it) { // isalnum() needs unsigned char as argument and this code // assumes unsigned, too. unsigned char c = *it; if (isalnum(c)) { sTmp += c; } else { sTmp += "%"; sTmp += hexdigits[c >> 4]; sTmp += hexdigits[c & 0xf]; sTmp += ";"; } } sValue = sTmp; return sValue; } CString& MCString::Decode(CString& sValue) const { const char *pTmp = sValue.c_str(); char *endptr; CString sTmp; while (*pTmp) { if (*pTmp != '%') { sTmp += *pTmp++; } else { char ch = (char) strtol(pTmp + 1, &endptr, 16); if (*endptr == ';') { sTmp += ch; pTmp = ++endptr; } else { sTmp += *pTmp++; } } } sValue = sTmp; return sValue; }
22.823989
184
0.57225
md-5
8b21cdf51788de20f811639ed0f7e4eeda1560b1
21,287
cpp
C++
opencl/source/helpers/task_information.cpp
8tab/compute-runtime
71bd96ad7184df83c7af04ffa8e0d6678ab26f99
[ "MIT" ]
1
2020-04-17T05:46:04.000Z
2020-04-17T05:46:04.000Z
opencl/source/helpers/task_information.cpp
8tab/compute-runtime
71bd96ad7184df83c7af04ffa8e0d6678ab26f99
[ "MIT" ]
null
null
null
opencl/source/helpers/task_information.cpp
8tab/compute-runtime
71bd96ad7184df83c7af04ffa8e0d6678ab26f99
[ "MIT" ]
null
null
null
/* * Copyright (C) 2017-2020 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include "opencl/source/helpers/task_information.h" #include "shared/source/command_stream/command_stream_receiver.h" #include "shared/source/command_stream/csr_deps.h" #include "shared/source/command_stream/linear_stream.h" #include "shared/source/command_stream/preemption.h" #include "shared/source/helpers/aligned_memory.h" #include "shared/source/helpers/engine_node_helper.h" #include "shared/source/helpers/string.h" #include "shared/source/memory_manager/internal_allocation_storage.h" #include "shared/source/memory_manager/surface.h" #include "opencl/source/built_ins/builtins_dispatch_builder.h" #include "opencl/source/cl_device/cl_device.h" #include "opencl/source/command_queue/command_queue.h" #include "opencl/source/command_queue/enqueue_common.h" #include "opencl/source/device_queue/device_queue.h" #include "opencl/source/gtpin/gtpin_notify.h" #include "opencl/source/helpers/enqueue_properties.h" #include "opencl/source/helpers/task_information.inl" #include "opencl/source/mem_obj/mem_obj.h" namespace NEO { template void KernelOperation::ResourceCleaner::operator()<LinearStream>(LinearStream *); template void KernelOperation::ResourceCleaner::operator()<IndirectHeap>(IndirectHeap *); CommandMapUnmap::CommandMapUnmap(MapOperationType operationType, MemObj &memObj, MemObjSizeArray &copySize, MemObjOffsetArray &copyOffset, bool readOnly, CommandQueue &commandQueue) : Command(commandQueue), memObj(memObj), copySize(copySize), copyOffset(copyOffset), readOnly(readOnly), operationType(operationType) { memObj.incRefInternal(); } CompletionStamp &CommandMapUnmap::submit(uint32_t taskLevel, bool terminated) { if (terminated) { memObj.decRefInternal(); return completionStamp; } auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); auto commandStreamReceiverOwnership = commandStreamReceiver.obtainUniqueOwnership(); auto &queueCommandStream = commandQueue.getCS(0); size_t offset = queueCommandStream.getUsed(); MultiDispatchInfo multiDispatch; Device &device = commandQueue.getDevice(); DispatchFlags dispatchFlags( {}, //csrDependencies nullptr, //barrierTimestampPacketNodes {}, //pipelineSelectArgs commandQueue.flushStamp->getStampReference(), //flushStampReference commandQueue.getThrottle(), //throttle PreemptionHelper::taskPreemptionMode(device, multiDispatch), //preemptionMode GrfConfig::DefaultGrfNumber, //numGrfRequired L3CachingSettings::l3CacheOn, //l3CacheSettings ThreadArbitrationPolicy::NotPresent, //threadArbitrationPolicy commandQueue.getSliceCount(), //sliceCount true, //blocking true, //dcFlush false, //useSLM true, //guardCommandBufferWithPipeControl false, //GSBA32BitRequired false, //requiresCoherency commandQueue.getPriority() == QueuePriority::LOW, //lowPriority false, //implicitFlush commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed false, //epilogueRequired false //usePerDssBackedBuffer ); DEBUG_BREAK_IF(taskLevel >= CompletionStamp::levelNotReady); gtpinNotifyPreFlushTask(&commandQueue); completionStamp = commandStreamReceiver.flushTask(queueCommandStream, offset, commandQueue.getIndirectHeap(IndirectHeap::DYNAMIC_STATE, 0u), commandQueue.getIndirectHeap(IndirectHeap::INDIRECT_OBJECT, 0u), commandQueue.getIndirectHeap(IndirectHeap::SURFACE_STATE, 0u), taskLevel, dispatchFlags, commandQueue.getDevice()); if (!memObj.isMemObjZeroCopy()) { commandQueue.waitUntilComplete(completionStamp.taskCount, completionStamp.flushStamp, false); if (operationType == MAP) { memObj.transferDataToHostPtr(copySize, copyOffset); } else if (!readOnly) { DEBUG_BREAK_IF(operationType != UNMAP); memObj.transferDataFromHostPtr(copySize, copyOffset); } } memObj.decRefInternal(); return completionStamp; } CommandComputeKernel::CommandComputeKernel(CommandQueue &commandQueue, std::unique_ptr<KernelOperation> &kernelOperation, std::vector<Surface *> &surfaces, bool flushDC, bool usesSLM, bool ndRangeKernel, std::unique_ptr<PrintfHandler> printfHandler, PreemptionMode preemptionMode, Kernel *kernel, uint32_t kernelCount) : Command(commandQueue, kernelOperation), flushDC(flushDC), slmUsed(usesSLM), NDRangeKernel(ndRangeKernel), printfHandler(std::move(printfHandler)), kernel(kernel), kernelCount(kernelCount), preemptionMode(preemptionMode) { for (auto surface : surfaces) { this->surfaces.push_back(surface); } UNRECOVERABLE_IF(nullptr == this->kernel); kernel->incRefInternal(); } CommandComputeKernel::~CommandComputeKernel() { kernel->decRefInternal(); } CompletionStamp &CommandComputeKernel::submit(uint32_t taskLevel, bool terminated) { if (terminated) { for (auto surface : surfaces) { delete surface; } surfaces.clear(); return completionStamp; } auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); bool executionModelKernel = kernel->isParentKernel; auto devQueue = commandQueue.getContext().getDefaultDeviceQueue(); auto commandStreamReceiverOwnership = commandStreamReceiver.obtainUniqueOwnership(); bool isCcsUsed = EngineHelpers::isCcs(commandQueue.getGpgpuEngine().osContext->getEngineType()); if (executionModelKernel) { while (!devQueue->isEMCriticalSectionFree()) ; devQueue->resetDeviceQueue(); devQueue->acquireEMCriticalSection(); } IndirectHeap *dsh = kernelOperation->dsh.get(); IndirectHeap *ioh = kernelOperation->ioh.get(); IndirectHeap *ssh = kernelOperation->ssh.get(); auto requiresCoherency = false; auto anyUncacheableArgs = false; for (auto &surface : surfaces) { DEBUG_BREAK_IF(!surface); surface->makeResident(commandStreamReceiver); requiresCoherency |= surface->IsCoherent; if (!surface->allowsL3Caching()) { anyUncacheableArgs = true; } } if (printfHandler) { printfHandler.get()->makeResident(commandStreamReceiver); } makeTimestampPacketsResident(commandStreamReceiver); if (executionModelKernel) { uint32_t taskCount = commandStreamReceiver.peekTaskCount() + 1; devQueue->setupExecutionModelDispatch(*ssh, *dsh, kernel, kernelCount, commandStreamReceiver.getTagAllocation()->getGpuAddress(), taskCount, timestamp, isCcsUsed); SchedulerKernel &scheduler = commandQueue.getContext().getSchedulerKernel(); scheduler.setArgs(devQueue->getQueueBuffer(), devQueue->getStackBuffer(), devQueue->getEventPoolBuffer(), devQueue->getSlbBuffer(), dsh->getGraphicsAllocation(), kernel->getKernelReflectionSurface(), devQueue->getQueueStorageBuffer(), ssh->getGraphicsAllocation(), devQueue->getDebugQueue()); devQueue->dispatchScheduler( *kernelOperation->commandStream, scheduler, preemptionMode, ssh, dsh, isCcsUsed); scheduler.makeResident(commandStreamReceiver); // Update SLM usage slmUsed |= scheduler.slmTotalSize > 0; this->kernel->getProgram()->getBlockKernelManager()->makeInternalAllocationsResident(commandStreamReceiver); } if (kernelOperation->blitPropertiesContainer.size() > 0) { auto &bcsCsr = *commandQueue.getBcsCommandStreamReceiver(); CsrDependencies csrDeps; eventsRequest.fillCsrDependencies(csrDeps, bcsCsr, CsrDependencies::DependenciesType::All); BlitProperties::setupDependenciesForAuxTranslation(kernelOperation->blitPropertiesContainer, *timestampPacketDependencies, *currentTimestampPacketNodes, csrDeps, commandQueue.getGpgpuCommandStreamReceiver(), bcsCsr); auto bcsTaskCount = bcsCsr.blitBuffer(kernelOperation->blitPropertiesContainer, false); commandQueue.updateBcsTaskCount(bcsTaskCount); } DispatchFlags dispatchFlags( {}, //csrDependencies nullptr, //barrierTimestampPacketNodes {false, kernel->isVmeKernel()}, //pipelineSelectArgs commandQueue.flushStamp->getStampReference(), //flushStampReference commandQueue.getThrottle(), //throttle preemptionMode, //preemptionMode kernel->getKernelInfo().patchInfo.executionEnvironment->NumGRFRequired, //numGrfRequired L3CachingSettings::l3CacheOn, //l3CacheSettings kernel->getThreadArbitrationPolicy(), //threadArbitrationPolicy commandQueue.getSliceCount(), //sliceCount true, //blocking flushDC, //dcFlush slmUsed, //useSLM true, //guardCommandBufferWithPipeControl NDRangeKernel, //GSBA32BitRequired requiresCoherency, //requiresCoherency commandQueue.getPriority() == QueuePriority::LOW, //lowPriority false, //implicitFlush commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed false, //epilogueRequired kernel->requiresPerDssBackedBuffer() //usePerDssBackedBuffer ); if (timestampPacketDependencies) { eventsRequest.fillCsrDependencies(dispatchFlags.csrDependencies, commandStreamReceiver, CsrDependencies::DependenciesType::OutOfCsr); dispatchFlags.barrierTimestampPacketNodes = &timestampPacketDependencies->barrierNodes; } dispatchFlags.pipelineSelectArgs.specialPipelineSelectMode = kernel->requiresSpecialPipelineSelectMode(); if (anyUncacheableArgs) { dispatchFlags.l3CacheSettings = L3CachingSettings::l3CacheOff; } else if (!kernel->areStatelessWritesUsed()) { dispatchFlags.l3CacheSettings = L3CachingSettings::l3AndL1On; } if (commandQueue.dispatchHints != 0) { dispatchFlags.engineHints = commandQueue.dispatchHints; dispatchFlags.epilogueRequired = true; } DEBUG_BREAK_IF(taskLevel >= CompletionStamp::levelNotReady); gtpinNotifyPreFlushTask(&commandQueue); completionStamp = commandStreamReceiver.flushTask(*kernelOperation->commandStream, 0, *dsh, *ioh, *ssh, taskLevel, dispatchFlags, commandQueue.getDevice()); if (gtpinIsGTPinInitialized()) { gtpinNotifyFlushTask(completionStamp.taskCount); } if (printfHandler) { commandQueue.waitUntilComplete(completionStamp.taskCount, completionStamp.flushStamp, false); printfHandler.get()->printEnqueueOutput(); } for (auto surface : surfaces) { delete surface; } surfaces.clear(); return completionStamp; } void CommandWithoutKernel::dispatchBlitOperation() { auto bcsCsr = commandQueue.getBcsCommandStreamReceiver(); UNRECOVERABLE_IF(bcsCsr == nullptr); UNRECOVERABLE_IF(kernelOperation->blitPropertiesContainer.size() != 1); auto &blitProperties = *kernelOperation->blitPropertiesContainer.begin(); eventsRequest.fillCsrDependencies(blitProperties.csrDependencies, *bcsCsr, CsrDependencies::DependenciesType::All); blitProperties.csrDependencies.push_back(&timestampPacketDependencies->cacheFlushNodes); blitProperties.csrDependencies.push_back(&timestampPacketDependencies->previousEnqueueNodes); blitProperties.csrDependencies.push_back(&timestampPacketDependencies->barrierNodes); blitProperties.outputTimestampPacket = currentTimestampPacketNodes->peekNodes()[0]; auto bcsTaskCount = bcsCsr->blitBuffer(kernelOperation->blitPropertiesContainer, false); commandQueue.updateBcsTaskCount(bcsTaskCount); } CompletionStamp &CommandWithoutKernel::submit(uint32_t taskLevel, bool terminated) { if (terminated) { return completionStamp; } auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); if (!kernelOperation) { completionStamp.taskCount = commandStreamReceiver.peekTaskCount(); completionStamp.taskLevel = commandStreamReceiver.peekTaskLevel(); completionStamp.flushStamp = commandStreamReceiver.obtainCurrentFlushStamp(); return completionStamp; } auto lockCSR = commandStreamReceiver.obtainUniqueOwnership(); if (kernelOperation->blitEnqueue) { if (commandStreamReceiver.isStallingPipeControlOnNextFlushRequired()) { timestampPacketDependencies->barrierNodes.add(commandStreamReceiver.getTimestampPacketAllocator()->getTag()); } dispatchBlitOperation(); } DispatchFlags dispatchFlags( {}, //csrDependencies &timestampPacketDependencies->barrierNodes, //barrierTimestampPacketNodes {}, //pipelineSelectArgs commandQueue.flushStamp->getStampReference(), //flushStampReference commandQueue.getThrottle(), //throttle commandQueue.getDevice().getPreemptionMode(), //preemptionMode GrfConfig::DefaultGrfNumber, //numGrfRequired L3CachingSettings::l3CacheOn, //l3CacheSettings ThreadArbitrationPolicy::NotPresent, //threadArbitrationPolicy commandQueue.getSliceCount(), //sliceCount true, //blocking false, //dcFlush false, //useSLM true, //guardCommandBufferWithPipeControl false, //GSBA32BitRequired false, //requiresCoherency commandQueue.getPriority() == QueuePriority::LOW, //lowPriority false, //implicitFlush commandStreamReceiver.isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed false, //epilogueRequired false //usePerDssBackedBuffer ); UNRECOVERABLE_IF(!commandStreamReceiver.peekTimestampPacketWriteEnabled()); eventsRequest.fillCsrDependencies(dispatchFlags.csrDependencies, commandStreamReceiver, CsrDependencies::DependenciesType::OutOfCsr); makeTimestampPacketsResident(commandStreamReceiver); gtpinNotifyPreFlushTask(&commandQueue); completionStamp = commandStreamReceiver.flushTask(*kernelOperation->commandStream, 0, commandQueue.getIndirectHeap(IndirectHeap::DYNAMIC_STATE, 0u), commandQueue.getIndirectHeap(IndirectHeap::INDIRECT_OBJECT, 0u), commandQueue.getIndirectHeap(IndirectHeap::SURFACE_STATE, 0u), taskLevel, dispatchFlags, commandQueue.getDevice()); return completionStamp; } void Command::setEventsRequest(EventsRequest &eventsRequest) { this->eventsRequest = eventsRequest; if (eventsRequest.numEventsInWaitList > 0) { eventsWaitlist.resize(eventsRequest.numEventsInWaitList); auto size = eventsRequest.numEventsInWaitList * sizeof(cl_event); memcpy_s(&eventsWaitlist[0], size, eventsRequest.eventWaitList, size); this->eventsRequest.eventWaitList = &eventsWaitlist[0]; } } void Command::setTimestampPacketNode(TimestampPacketContainer &current, TimestampPacketDependencies &&dependencies) { currentTimestampPacketNodes = std::make_unique<TimestampPacketContainer>(); currentTimestampPacketNodes->assignAndIncrementNodesRefCounts(current); timestampPacketDependencies = std::make_unique<TimestampPacketDependencies>(); *timestampPacketDependencies = std::move(dependencies); } Command::~Command() { auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); if (commandStreamReceiver.peekTimestampPacketWriteEnabled()) { for (cl_event &eventFromWaitList : eventsWaitlist) { auto event = castToObjectOrAbort<Event>(eventFromWaitList); event->decRefInternal(); } } } void Command::makeTimestampPacketsResident(CommandStreamReceiver &commandStreamReceiver) { if (commandStreamReceiver.peekTimestampPacketWriteEnabled()) { for (cl_event &eventFromWaitList : eventsWaitlist) { auto event = castToObjectOrAbort<Event>(eventFromWaitList); if (event->getTimestampPacketNodes()) { event->getTimestampPacketNodes()->makeResident(commandStreamReceiver); } } } if (currentTimestampPacketNodes) { currentTimestampPacketNodes->makeResident(commandStreamReceiver); } if (timestampPacketDependencies) { timestampPacketDependencies->cacheFlushNodes.makeResident(commandStreamReceiver); timestampPacketDependencies->previousEnqueueNodes.makeResident(commandStreamReceiver); } } Command::Command(CommandQueue &commandQueue) : commandQueue(commandQueue) {} Command::Command(CommandQueue &commandQueue, std::unique_ptr<KernelOperation> &kernelOperation) : commandQueue(commandQueue), kernelOperation(std::move(kernelOperation)) {} } // namespace NEO
51.047962
155
0.584864
8tab