url
stringlengths
12
221
text
stringlengths
176
1.03M
encoding
stringclasses
16 values
confidence
float64
0.7
1
license
stringlengths
0
347
copyright
stringlengths
3
31.8k
llvm-toolchain-12-12.0.1/clang/test/Driver/msan.c
// REQUIRES: x86-registered-target // RUN: %clang -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -O1 -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -O2 -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -O3 -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-KMSAN // RUN: %clang -O1 -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-KMSAN // RUN: %clang -O2 -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-KMSAN // RUN: %clang -O3 -fno-experimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-KMSAN // RUN: %clang -target mips64-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -target mips64el-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -target powerpc64-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -target powerpc64le-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // Verify that -fsanitize=memory and -fsanitize=kernel-memory invoke MSan/KMSAN instrumentation. // Also check that this works with the new pass manager with and without // optimization // RUN: %clang -fexperimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -O1 -fexperimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -O2 -fexperimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN // RUN: %clang -O3 -fexperimental-new-pass-manager -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-MSAN int foo(int *a) { return *a; } // CHECK-MSAN: __msan_init // CHECK-KMSAN: __msan_get_context_state
utf-8
1
unknown
unknown
thunderbird-91.6.0/toolkit/components/typeaheadfind/nsTypeAheadFind.cpp
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "nsCOMPtr.h" #include "nsDocShell.h" #include "nsMemory.h" #include "mozilla/ErrorResult.h" #include "mozilla/ModuleUtils.h" #include "mozilla/PresShell.h" #include "mozilla/Services.h" #include "nsCURILoader.h" #include "nsCycleCollectionParticipant.h" #include "nsNetUtil.h" #include "nsIURL.h" #include "nsIURI.h" #include "nsIDocShell.h" #include "nsISimpleEnumerator.h" #include "nsPIDOMWindow.h" #include "nsIPrefBranch.h" #include "nsString.h" #include "nsCRT.h" #include "nsGenericHTMLElement.h" #include "nsIFrame.h" #include "nsContainerFrame.h" #include "nsFrameTraversal.h" #include "mozilla/dom/Document.h" #include "nsIContent.h" #include "nsTextFragment.h" #include "nsIEditor.h" #include "nsIDocShellTreeItem.h" #include "nsIInterfaceRequestor.h" #include "nsIInterfaceRequestorUtils.h" #include "nsContentCID.h" #include "nsLayoutCID.h" #include "nsWidgetsCID.h" #include "nsIFormControl.h" #include "nsNameSpaceManager.h" #include "nsIObserverService.h" #include "nsFocusManager.h" #include "mozilla/dom/Element.h" #include "mozilla/dom/HTMLInputElement.h" #include "mozilla/dom/HTMLTextAreaElement.h" #include "mozilla/dom/Link.h" #include "mozilla/dom/RangeBinding.h" #include "mozilla/dom/Selection.h" #include "nsLayoutUtils.h" #include "nsRange.h" #include "nsTypeAheadFind.h" using namespace mozilla; using namespace mozilla::dom; NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(nsTypeAheadFind) NS_INTERFACE_MAP_ENTRY(nsITypeAheadFind) NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsITypeAheadFind) NS_INTERFACE_MAP_ENTRY(nsISupportsWeakReference) NS_INTERFACE_MAP_ENTRY(nsIObserver) NS_INTERFACE_MAP_END NS_IMPL_CYCLE_COLLECTING_ADDREF(nsTypeAheadFind) NS_IMPL_CYCLE_COLLECTING_RELEASE(nsTypeAheadFind) NS_IMPL_CYCLE_COLLECTION_WEAK(nsTypeAheadFind, mFoundLink, mFoundEditable, mCurrentWindow, mStartFindRange, mSearchRange, mStartPointRange, mEndPointRange, mSoundInterface, mFind, mFoundRange) #define NS_FIND_CONTRACTID "@mozilla.org/embedcomp/rangefind;1" nsTypeAheadFind::nsTypeAheadFind() : mStartLinksOnlyPref(false), mCaretBrowsingOn(false), mDidAddObservers(false), mLastFindLength(0), mIsSoundInitialized(false), mCaseSensitive(false), mEntireWord(false), mMatchDiacritics(false) {} nsTypeAheadFind::~nsTypeAheadFind() { nsCOMPtr<nsIPrefBranch> prefInternal( do_GetService(NS_PREFSERVICE_CONTRACTID)); if (prefInternal) { prefInternal->RemoveObserver("accessibility.typeaheadfind", this); prefInternal->RemoveObserver("accessibility.browsewithcaret", this); } } nsresult nsTypeAheadFind::Init(nsIDocShell* aDocShell) { nsCOMPtr<nsIPrefBranch> prefInternal( do_GetService(NS_PREFSERVICE_CONTRACTID)); mSearchRange = nullptr; mStartPointRange = nullptr; mEndPointRange = nullptr; if (!prefInternal || !EnsureFind()) return NS_ERROR_FAILURE; SetDocShell(aDocShell); if (!mDidAddObservers) { mDidAddObservers = true; // ----------- Listen to prefs ------------------ nsresult rv = prefInternal->AddObserver("accessibility.browsewithcaret", this, true); NS_ENSURE_SUCCESS(rv, rv); rv = prefInternal->AddObserver("accessibility.typeaheadfind", this, true); NS_ENSURE_SUCCESS(rv, rv); // ----------- Get initial preferences ---------- PrefsReset(); nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService(); if (os) { os->AddObserver(this, DOM_WINDOW_DESTROYED_TOPIC, true); } } if (!mIsSoundInitialized && !mNotFoundSoundURL.IsEmpty()) { // This makes sure system sound library is loaded so that // there's no lag before the first sound is played // by waiting for the first keystroke, we still get the startup time // benefits. mIsSoundInitialized = true; mSoundInterface = do_CreateInstance("@mozilla.org/sound;1"); if (mSoundInterface && !mNotFoundSoundURL.EqualsLiteral("beep")) { mSoundInterface->Init(); } } return NS_OK; } nsresult nsTypeAheadFind::PrefsReset() { nsCOMPtr<nsIPrefBranch> prefBranch(do_GetService(NS_PREFSERVICE_CONTRACTID)); NS_ENSURE_TRUE(prefBranch, NS_ERROR_FAILURE); prefBranch->GetBoolPref("accessibility.typeaheadfind.startlinksonly", &mStartLinksOnlyPref); bool isSoundEnabled = true; prefBranch->GetBoolPref("accessibility.typeaheadfind.enablesound", &isSoundEnabled); nsAutoCString soundStr; if (isSoundEnabled) prefBranch->GetCharPref("accessibility.typeaheadfind.soundURL", soundStr); mNotFoundSoundURL = soundStr; if (!mNotFoundSoundURL.IsEmpty() && !mNotFoundSoundURL.EqualsLiteral("beep")) { if (!mSoundInterface) { mSoundInterface = do_CreateInstance("@mozilla.org/sound;1"); } // Init to load the system sound library if the lib is not ready if (mSoundInterface) { mIsSoundInitialized = true; mSoundInterface->Init(); } } prefBranch->GetBoolPref("accessibility.browsewithcaret", &mCaretBrowsingOn); return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::SetCaseSensitive(bool isCaseSensitive) { mCaseSensitive = isCaseSensitive; if (mFind) { mFind->SetCaseSensitive(mCaseSensitive); } return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::GetCaseSensitive(bool* isCaseSensitive) { *isCaseSensitive = mCaseSensitive; return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::SetEntireWord(bool isEntireWord) { mEntireWord = isEntireWord; if (mFind) { mFind->SetEntireWord(mEntireWord); } return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::GetEntireWord(bool* isEntireWord) { *isEntireWord = mEntireWord; return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::SetMatchDiacritics(bool matchDiacritics) { mMatchDiacritics = matchDiacritics; if (mFind) { mFind->SetMatchDiacritics(mMatchDiacritics); } return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::GetMatchDiacritics(bool* matchDiacritics) { *matchDiacritics = mMatchDiacritics; return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::SetDocShell(nsIDocShell* aDocShell) { mDocShell = do_GetWeakReference(aDocShell); mWebBrowserFind = do_GetInterface(aDocShell); NS_ENSURE_TRUE(mWebBrowserFind, NS_ERROR_FAILURE); mDocument = do_GetWeakReference(aDocShell->GetExtantDocument()); ReleaseStrongMemberVariables(); return NS_OK; } void nsTypeAheadFind::ReleaseStrongMemberVariables() { mStartFindRange = nullptr; mStartPointRange = nullptr; mSearchRange = nullptr; mEndPointRange = nullptr; mFoundLink = nullptr; mFoundEditable = nullptr; mFoundRange = nullptr; mCurrentWindow = nullptr; mSelectionController = nullptr; mFind = nullptr; } NS_IMETHODIMP nsTypeAheadFind::SetSelectionModeAndRepaint(int16_t aToggle) { nsCOMPtr<nsISelectionController> selectionController = do_QueryReferent(mSelectionController); if (!selectionController) { return NS_OK; } selectionController->SetDisplaySelection(aToggle); selectionController->RepaintSelection( nsISelectionController::SELECTION_NORMAL); return NS_OK; } MOZ_CAN_RUN_SCRIPT_BOUNDARY NS_IMETHODIMP nsTypeAheadFind::CollapseSelection() { nsCOMPtr<nsISelectionController> selectionController = do_QueryReferent(mSelectionController); if (!selectionController) { return NS_OK; } RefPtr<Selection> selection = selectionController->GetSelection( nsISelectionController::SELECTION_NORMAL); if (selection) { selection->CollapseToStart(IgnoreErrors()); } return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::Observe(nsISupports* aSubject, const char* aTopic, const char16_t* aData) { if (!nsCRT::strcmp(aTopic, NS_PREFBRANCH_PREFCHANGE_TOPIC_ID)) { return PrefsReset(); } if (!nsCRT::strcmp(aTopic, DOM_WINDOW_DESTROYED_TOPIC) && SameCOMIdentity(aSubject, mCurrentWindow)) { ReleaseStrongMemberVariables(); } return NS_OK; } void nsTypeAheadFind::SaveFind() { if (mWebBrowserFind) mWebBrowserFind->SetSearchString(mTypeAheadBuffer); // save the length of this find for "not found" sound mLastFindLength = mTypeAheadBuffer.Length(); } void nsTypeAheadFind::PlayNotFoundSound() { if (mNotFoundSoundURL.IsEmpty()) // no sound return; if (!mSoundInterface) mSoundInterface = do_CreateInstance("@mozilla.org/sound;1"); if (mSoundInterface) { mIsSoundInitialized = true; if (mNotFoundSoundURL.EqualsLiteral("beep")) { mSoundInterface->Beep(); return; } nsCOMPtr<nsIURI> soundURI; if (mNotFoundSoundURL.EqualsLiteral("default")) NS_NewURI(getter_AddRefs(soundURI), nsLiteralCString(TYPEAHEADFIND_NOTFOUND_WAV_URL)); else NS_NewURI(getter_AddRefs(soundURI), mNotFoundSoundURL); nsCOMPtr<nsIURL> soundURL(do_QueryInterface(soundURI)); if (soundURL) mSoundInterface->Play(soundURL); } } nsresult nsTypeAheadFind::FindItNow(uint32_t aMode, bool aIsLinksOnly, bool aIsFirstVisiblePreferred, bool aDontIterateFrames, uint16_t* aResult) { *aResult = FIND_NOTFOUND; mFoundLink = nullptr; mFoundEditable = nullptr; mFoundRange = nullptr; mCurrentWindow = nullptr; RefPtr<Document> startingDocument = GetDocument(); NS_ENSURE_TRUE(startingDocument, NS_ERROR_FAILURE); // There could be unflushed notifications which hide textareas or other // elements that we don't want to find text in. startingDocument->FlushPendingNotifications(mozilla::FlushType::Layout); RefPtr<PresShell> presShell = startingDocument->GetPresShell(); NS_ENSURE_TRUE(presShell, NS_ERROR_FAILURE); RefPtr<nsPresContext> presContext = presShell->GetPresContext(); NS_ENSURE_TRUE(presContext, NS_ERROR_FAILURE); RefPtr<Selection> selection; nsCOMPtr<nsISelectionController> selectionController = do_QueryReferent(mSelectionController); if (!selectionController) { GetSelection(presShell, getter_AddRefs(selectionController), getter_AddRefs(selection)); // cache for reuse mSelectionController = do_GetWeakReference(selectionController); } else { selection = selectionController->GetSelection( nsISelectionController::SELECTION_NORMAL); } nsCOMPtr<nsIDocShell> startingDocShell(presContext->GetDocShell()); NS_ASSERTION( startingDocShell, "Bug 175321 Crashes with Type Ahead Find [@ nsTypeAheadFind::FindItNow]"); if (!startingDocShell) return NS_ERROR_FAILURE; nsCOMPtr<nsIDocShell> currentDocShell; nsCOMPtr<nsISupports> currentContainer; nsCOMPtr<nsIDocShellTreeItem> rootContentTreeItem; nsCOMPtr<nsIDocShell> rootContentDocShell; typedef nsTArray<RefPtr<nsIDocShell>> DocShells; DocShells docShells; DocShells::const_iterator it, it_end; if (!aDontIterateFrames) { // The use of GetInProcessSameTypeRootTreeItem (and later in this method) is // OK here as out-of-process frames are handled externally by // FinderParent.jsm, which will end up only calling this method with // aDontIterateFrames set to true. startingDocShell->GetInProcessSameTypeRootTreeItem( getter_AddRefs(rootContentTreeItem)); rootContentDocShell = do_QueryInterface(rootContentTreeItem); if (!rootContentDocShell) return NS_ERROR_FAILURE; rootContentDocShell->GetAllDocShellsInSubtree( nsIDocShellTreeItem::typeContent, nsIDocShell::ENUMERATE_FORWARDS, docShells); // Default: can start at the current document currentContainer = do_QueryInterface(rootContentDocShell); // Iterate up to current shell, if there's more than 1 that we're // dealing with for (it = docShells.begin(), it_end = docShells.end(); it != it_end; ++it) { currentDocShell = *it; if (!currentDocShell || currentDocShell == startingDocShell || aIsFirstVisiblePreferred) break; } } else { currentContainer = currentDocShell = startingDocShell; } bool findPrev = (aMode == FIND_PREVIOUS || aMode == FIND_LAST); // ------------ Get ranges ready ---------------- bool useSelection = (aMode != FIND_FIRST && aMode != FIND_LAST) && (!aIsFirstVisiblePreferred || mStartFindRange); RefPtr<nsRange> returnRange; if (NS_FAILED(GetSearchContainers( currentContainer, useSelection ? selectionController.get() : nullptr, aIsFirstVisiblePreferred, findPrev, getter_AddRefs(presShell), getter_AddRefs(presContext)))) { return NS_ERROR_FAILURE; } if (!mStartPointRange) { mStartPointRange = nsRange::Create(presShell->GetDocument()); } // XXXbz Should this really be ignoring errors? int16_t rangeCompareResult = mStartPointRange->CompareBoundaryPoints( Range_Binding::START_TO_START, *mSearchRange, IgnoreErrors()); // No need to wrap find in doc if starting at beginning bool hasWrapped = (rangeCompareResult < 0); if (mTypeAheadBuffer.IsEmpty() || !EnsureFind()) return NS_ERROR_FAILURE; mFind->SetFindBackwards(findPrev); while (true) { // ----- Outer while loop: go through all docs ----- while (true) { // === Inner while loop: go through a single doc === mFind->Find(mTypeAheadBuffer, mSearchRange, mStartPointRange, mEndPointRange, getter_AddRefs(returnRange)); if (!returnRange) { break; // Nothing found in this doc, go to outer loop (try next doc) } // ------- Test resulting found range for success conditions ------ bool isInsideLink = false, isStartingLink = false; if (aIsLinksOnly) { // Don't check if inside link when searching all text RangeStartsInsideLink(returnRange, &isInsideLink, &isStartingLink); } bool usesIndependentSelection = false; // Check actual visibility of the range, and generate some // side effects (like updating mStartPointRange and // setting usesIndependentSelection) that we'll need whether // or not the range is visible. bool canSeeRange = IsRangeVisible(returnRange, aIsFirstVisiblePreferred, false, &usesIndependentSelection); mStartPointRange = returnRange->CloneRange(); // If we can't see the range, we still might be able to scroll // it into view if usesIndependentSelection is true. If both are // false, then we treat it as a failure condition. if ((!canSeeRange && !usesIndependentSelection) || (aIsLinksOnly && !isInsideLink) || (mStartLinksOnlyPref && aIsLinksOnly && !isStartingLink)) { // We want to jump over this range, so collapse to the start if we're // finding backwards and vice versa. mStartPointRange->Collapse(findPrev); continue; } mFoundRange = returnRange; // ------ Success! ------- // Hide old selection (new one may be on a different controller) if (selection) { selection->CollapseToStart(IgnoreErrors()); SetSelectionModeAndRepaint(nsISelectionController::SELECTION_ON); } RefPtr<Document> document = presShell->GetDocument(); NS_ASSERTION(document, "Wow, presShell doesn't have document!"); if (!document) { return NS_ERROR_UNEXPECTED; } // Make sure new document is selected if (document != startingDocument) { // We are in a new document (because of frames/iframes) mDocument = do_GetWeakReference(document); } nsCOMPtr<nsPIDOMWindowInner> window = document->GetInnerWindow(); NS_ASSERTION(window, "document has no window"); if (!window) return NS_ERROR_UNEXPECTED; RefPtr<nsFocusManager> fm = nsFocusManager::GetFocusManager(); if (usesIndependentSelection) { /* If a search result is found inside an editable element, we'll focus * the element only if focus is in our content window, i.e. * |if (focusedWindow.top == ourWindow.top)| */ bool shouldFocusEditableElement = false; if (fm) { nsCOMPtr<mozIDOMWindowProxy> focusedWindow; nsresult rv = fm->GetFocusedWindow(getter_AddRefs(focusedWindow)); if (NS_SUCCEEDED(rv) && focusedWindow) { auto* fwPI = nsPIDOMWindowOuter::From(focusedWindow); nsCOMPtr<nsIDocShellTreeItem> fwTreeItem(fwPI->GetDocShell()); if (NS_SUCCEEDED(rv)) { nsCOMPtr<nsIDocShellTreeItem> fwRootTreeItem; rv = fwTreeItem->GetInProcessSameTypeRootTreeItem( getter_AddRefs(fwRootTreeItem)); if (NS_SUCCEEDED(rv) && fwRootTreeItem == rootContentTreeItem) shouldFocusEditableElement = true; } } } // We may be inside an editable element, and therefore the selection // may be controlled by a different selection controller. Walk up the // chain of parent nodes to see if we find one. nsINode* node = returnRange->GetStartContainer(); while (node) { nsCOMPtr<nsIEditor> editor; if (RefPtr<HTMLInputElement> input = HTMLInputElement::FromNode(node)) { editor = input->GetTextEditor(); } else if (RefPtr<HTMLTextAreaElement> textarea = HTMLTextAreaElement::FromNode(node)) { editor = textarea->GetTextEditor(); } else { node = node->GetParentNode(); continue; } // Inside an editable element. Get the correct selection // controller and selection. NS_ASSERTION(editor, "Editable element has no editor!"); if (!editor) { break; } editor->GetSelectionController(getter_AddRefs(selectionController)); if (selectionController) { selection = selectionController->GetSelection( nsISelectionController::SELECTION_NORMAL); } mFoundEditable = node->AsElement(); if (!shouldFocusEditableElement) { break; } // Otherwise move focus/caret to editable element if (fm) { nsCOMPtr<Element> newFocusElement = mFoundEditable; fm->SetFocus(newFocusElement, 0); } break; } // If we reach here without setting mFoundEditable, then something // besides editable elements gave us an independent selection // controller. List controls with multiple visible elements can do // this (nsAreaSelectsFrame), and possibly others. We fall back to // grabbing the document's selection controller in this case. } if (!mFoundEditable) { // Not using a separate selection controller, so just get the // document's controller and selection. GetSelection(presShell, getter_AddRefs(selectionController), getter_AddRefs(selection)); } mSelectionController = do_GetWeakReference(selectionController); // Select the found text if (selection) { selection->RemoveAllRanges(IgnoreErrors()); selection->AddRangeAndSelectFramesAndNotifyListeners(*returnRange, IgnoreErrors()); } if (!mFoundEditable && fm) { fm->MoveFocus(window->GetOuterWindow(), nullptr, nsIFocusManager::MOVEFOCUS_CARET, nsIFocusManager::FLAG_NOSCROLL | nsIFocusManager::FLAG_NOSWITCHFRAME, getter_AddRefs(mFoundLink)); } // Change selection color to ATTENTION and scroll to it. Careful: we // must wait until after we goof with focus above before changing to // ATTENTION, or when we MoveFocus() and the selection is not on a // link, we'll blur, which will lose the ATTENTION. if (selectionController) { // Beware! This may flush notifications via synchronous // ScrollSelectionIntoView. SetSelectionModeAndRepaint(nsISelectionController::SELECTION_ATTENTION); selectionController->ScrollSelectionIntoView( nsISelectionController::SELECTION_NORMAL, nsISelectionController::SELECTION_WHOLE_SELECTION, nsISelectionController::SCROLL_CENTER_VERTICALLY | nsISelectionController::SCROLL_SYNCHRONOUS); } mCurrentWindow = window; *aResult = hasWrapped ? FIND_WRAPPED : FIND_FOUND; return NS_OK; } // ======= end-inner-while (go through a single document) ========== if (aDontIterateFrames) { return NS_OK; } // ---------- Nothing found yet, try next document ------------- bool hasTriedFirstDoc = false; do { // ==== Second inner loop - get another while ==== if (it != it_end) { currentContainer = *it; ++it; NS_ASSERTION(currentContainer, "We're not at the end yet!"); currentDocShell = do_QueryInterface(currentContainer); if (currentDocShell) break; } else if (hasTriedFirstDoc) // Avoid potential infinite loop return NS_ERROR_FAILURE; // No content doc shells // Reached last doc shell, loop around back to first doc shell rootContentDocShell->GetAllDocShellsInSubtree( nsIDocShellTreeItem::typeContent, nsIDocShell::ENUMERATE_FORWARDS, docShells); it = docShells.begin(); it_end = docShells.end(); hasTriedFirstDoc = true; } while (it != it_end); // ==== end second inner while === bool continueLoop = false; if (currentDocShell != startingDocShell) continueLoop = true; // Try next document else if (!hasWrapped || aIsFirstVisiblePreferred) { // Finished searching through docshells: // If aFirstVisiblePreferred == true, we may need to go through all // docshells twice -once to look for visible matches, the second time // for any match aIsFirstVisiblePreferred = false; hasWrapped = true; continueLoop = true; // Go through all docs again } if (continueLoop) { if (NS_FAILED(GetSearchContainers( currentContainer, nullptr, aIsFirstVisiblePreferred, findPrev, getter_AddRefs(presShell), getter_AddRefs(presContext)))) { continue; } if (findPrev) { // Reverse mode: swap start and end points, so that we start // at end of document and go to beginning RefPtr<nsRange> tempRange = mStartPointRange->CloneRange(); if (!mEndPointRange) { mEndPointRange = nsRange::Create(presShell->GetDocument()); } mStartPointRange = mEndPointRange; mEndPointRange = tempRange; } continue; } // ------------- Failed -------------- break; } // end-outer-while: go through all docs return NS_ERROR_FAILURE; } NS_IMETHODIMP nsTypeAheadFind::GetSearchString(nsAString& aSearchString) { aSearchString = mTypeAheadBuffer; return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::GetFoundLink(Element** aFoundLink) { NS_ENSURE_ARG_POINTER(aFoundLink); *aFoundLink = mFoundLink; NS_IF_ADDREF(*aFoundLink); return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::GetFoundEditable(Element** aFoundEditable) { NS_ENSURE_ARG_POINTER(aFoundEditable); *aFoundEditable = mFoundEditable; NS_IF_ADDREF(*aFoundEditable); return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::GetCurrentWindow(mozIDOMWindow** aCurrentWindow) { NS_ENSURE_ARG_POINTER(aCurrentWindow); *aCurrentWindow = mCurrentWindow; NS_IF_ADDREF(*aCurrentWindow); return NS_OK; } nsresult nsTypeAheadFind::GetSearchContainers( nsISupports* aContainer, nsISelectionController* aSelectionController, bool aIsFirstVisiblePreferred, bool aFindPrev, PresShell** aPresShell, nsPresContext** aPresContext) { NS_ENSURE_ARG_POINTER(aContainer); NS_ENSURE_ARG_POINTER(aPresShell); NS_ENSURE_ARG_POINTER(aPresContext); *aPresShell = nullptr; *aPresContext = nullptr; nsCOMPtr<nsIDocShell> docShell(do_QueryInterface(aContainer)); if (!docShell) return NS_ERROR_FAILURE; RefPtr<PresShell> presShell = docShell->GetPresShell(); RefPtr<nsPresContext> presContext = docShell->GetPresContext(); if (!presShell || !presContext) return NS_ERROR_FAILURE; Document* doc = presShell->GetDocument(); if (!doc) return NS_ERROR_FAILURE; nsCOMPtr<nsIContent> rootContent; if (doc->IsHTMLOrXHTML()) { rootContent = doc->GetBody(); } if (!rootContent) { rootContent = doc->GetRootElement(); if (!rootContent) { return NS_ERROR_FAILURE; } } if (!mSearchRange) { mSearchRange = nsRange::Create(doc); } nsCOMPtr<nsINode> searchRootNode(rootContent); mSearchRange->SelectNodeContents(*searchRootNode, IgnoreErrors()); if (!mStartPointRange) { mStartPointRange = nsRange::Create(doc); } mStartPointRange->SetStartAndEnd(searchRootNode, 0, searchRootNode, 0); if (!mEndPointRange) { mEndPointRange = nsRange::Create(doc); } mEndPointRange->SetStartAndEnd(searchRootNode, searchRootNode->Length(), searchRootNode, searchRootNode->Length()); // Consider current selection as null if // it's not in the currently focused document RefPtr<const nsRange> currentSelectionRange; RefPtr<Document> selectionDocument = GetDocument(); if (aSelectionController && selectionDocument && selectionDocument == doc) { RefPtr<Selection> selection = aSelectionController->GetSelection( nsISelectionController::SELECTION_NORMAL); if (selection) { currentSelectionRange = selection->GetRangeAt(0); } } if (!currentSelectionRange) { mStartPointRange = mSearchRange->CloneRange(); // We want to search in the visible selection range. That means that the // start point needs to be the end if we're looking backwards, or vice // versa. mStartPointRange->Collapse(!aFindPrev); } else { uint32_t startOffset; nsCOMPtr<nsINode> startNode; if (aFindPrev) { startNode = currentSelectionRange->GetStartContainer(); startOffset = currentSelectionRange->StartOffset(); } else { startNode = currentSelectionRange->GetEndContainer(); startOffset = currentSelectionRange->EndOffset(); } if (!startNode) { startNode = rootContent; } // We need to set the start point this way, other methods haven't worked mStartPointRange->SelectNode(*startNode, IgnoreErrors()); mStartPointRange->SetStart(*startNode, startOffset, IgnoreErrors()); mStartPointRange->Collapse(true); // collapse to start } presShell.forget(aPresShell); presContext.forget(aPresContext); return NS_OK; } void nsTypeAheadFind::RangeStartsInsideLink(nsRange* aRange, bool* aIsInsideLink, bool* aIsStartingLink) { *aIsInsideLink = false; *aIsStartingLink = true; // ------- Get nsIContent to test ------- uint32_t startOffset = aRange->StartOffset(); nsCOMPtr<nsIContent> startContent = nsIContent::FromNodeOrNull(aRange->GetStartContainer()); if (!startContent) { MOZ_ASSERT_UNREACHABLE("startContent should never be null"); return; } nsCOMPtr<nsIContent> origContent = startContent; if (startContent->IsElement()) { nsIContent* childContent = aRange->GetChildAtStartOffset(); if (childContent) { startContent = childContent; } } else if (startOffset > 0) { const nsTextFragment* textFrag = startContent->GetText(); if (textFrag) { // look for non whitespace character before start offset for (uint32_t index = 0; index < startOffset; index++) { // FIXME: take content language into account when deciding whitespace. if (!mozilla::dom::IsSpaceCharacter( textFrag->CharAt(static_cast<int32_t>(index)))) { *aIsStartingLink = false; // not at start of a node break; } } } } // ------- Check to see if inside link --------- // We now have the correct start node for the range // Search for links, starting with startNode, and going up parent chain while (true) { // Keep testing while startContent is equal to something, // eventually we'll run out of ancestors if (startContent->IsHTMLElement()) { nsCOMPtr<mozilla::dom::Link> link(do_QueryInterface(startContent)); if (link) { // Check to see if inside HTML link *aIsInsideLink = startContent->AsElement()->HasAttr(kNameSpaceID_None, nsGkAtoms::href); return; } } else { // Any xml element can be an xlink *aIsInsideLink = startContent->IsElement() && startContent->AsElement()->HasAttr( kNameSpaceID_XLink, nsGkAtoms::href); if (*aIsInsideLink) { if (!startContent->AsElement()->AttrValueIs( kNameSpaceID_XLink, nsGkAtoms::type, u"simple"_ns, eCaseMatters)) { *aIsInsideLink = false; // Xlink must be type="simple" } return; } } // Get the parent nsCOMPtr<nsIContent> parent = startContent->GetParent(); if (!parent) break; nsIContent* parentsFirstChild = parent->GetFirstChild(); // We don't want to look at a whitespace-only first child if (parentsFirstChild && parentsFirstChild->TextIsOnlyWhitespace()) { parentsFirstChild = parentsFirstChild->GetNextSibling(); } if (parentsFirstChild != startContent) { // startContent wasn't a first child, so we conclude that // if this is inside a link, it's not at the beginning of it *aIsStartingLink = false; } startContent = parent; } *aIsStartingLink = false; } MOZ_CAN_RUN_SCRIPT_BOUNDARY NS_IMETHODIMP nsTypeAheadFind::Find( const nsAString& aSearchString, bool aLinksOnly, uint32_t aMode, bool aDontIterateFrames, uint16_t* aResult) { if (aMode == nsITypeAheadFind::FIND_PREVIOUS || aMode == nsITypeAheadFind::FIND_NEXT) { if (mTypeAheadBuffer.IsEmpty()) { *aResult = FIND_NOTFOUND; } else { FindItNow(aMode, aLinksOnly, false, aDontIterateFrames, aResult); } return NS_OK; } // Find again ignores error return values, so do so here as well. nsresult rv = FindInternal(aMode, aSearchString, aLinksOnly, aDontIterateFrames, aResult); return (aMode == nsITypeAheadFind::FIND_INITIAL) ? rv : NS_OK; } nsresult nsTypeAheadFind::FindInternal(uint32_t aMode, const nsAString& aSearchString, bool aLinksOnly, bool aDontIterateFrames, uint16_t* aResult) { *aResult = FIND_NOTFOUND; RefPtr<Document> doc = GetDocument(); NS_ENSURE_TRUE(doc, NS_ERROR_FAILURE); RefPtr<PresShell> presShell = doc->GetPresShell(); NS_ENSURE_TRUE(presShell, NS_ERROR_FAILURE); RefPtr<Selection> selection; nsCOMPtr<nsISelectionController> selectionController = do_QueryReferent(mSelectionController); if (!selectionController) { GetSelection(presShell, getter_AddRefs(selectionController), getter_AddRefs(selection)); // cache for reuse mSelectionController = do_GetWeakReference(selectionController); } else { selection = selectionController->GetSelection( nsISelectionController::SELECTION_NORMAL); } if (selection) { selection->CollapseToStart(IgnoreErrors()); } if (aSearchString.IsEmpty()) { mTypeAheadBuffer.Truncate(); // These will be initialized to their true values after the first character // is typed mStartFindRange = nullptr; mSelectionController = nullptr; *aResult = FIND_FOUND; return NS_OK; } bool atEnd = false; bool isInitial = aMode == nsITypeAheadFind::FIND_INITIAL; if (isInitial) { if (mTypeAheadBuffer.Length()) { const nsAString& oldStr = Substring(mTypeAheadBuffer, 0, mTypeAheadBuffer.Length()); const nsAString& newStr = Substring(aSearchString, 0, mTypeAheadBuffer.Length()); if (oldStr.Equals(newStr)) atEnd = true; const nsAString& newStr2 = Substring(aSearchString, 0, aSearchString.Length()); const nsAString& oldStr2 = Substring(mTypeAheadBuffer, 0, aSearchString.Length()); if (oldStr2.Equals(newStr2)) atEnd = true; if (!atEnd) mStartFindRange = nullptr; } } int32_t bufferLength = mTypeAheadBuffer.Length(); mTypeAheadBuffer = aSearchString; bool isFirstVisiblePreferred = false; // --------- Initialize find if 1st char ---------- if (bufferLength == 0 && isInitial) { // If you can see the selection (not collapsed or thru caret browsing), // or if already focused on a page element, start there. // Otherwise we're going to start at the first visible element bool isSelectionCollapsed = !selection || selection->IsCollapsed(); // If true, we will scan from top left of visible area // If false, we will scan from start of selection isFirstVisiblePreferred = !atEnd && !mCaretBrowsingOn && isSelectionCollapsed; if (isFirstVisiblePreferred) { // Get the focused content. If there is a focused node, ensure the // selection is at that point. Otherwise, we will just want to start // from the caret position or the beginning of the document. nsPresContext* presContext = presShell->GetPresContext(); NS_ENSURE_TRUE(presContext, NS_OK); nsCOMPtr<Document> document = presShell->GetDocument(); if (!document) return NS_ERROR_UNEXPECTED; nsFocusManager* fm = nsFocusManager::GetFocusManager(); if (fm) { nsPIDOMWindowOuter* window = document->GetWindow(); RefPtr<Element> focusedElement; nsCOMPtr<mozIDOMWindowProxy> focusedWindow; fm->GetFocusedElementForWindow(window, false, getter_AddRefs(focusedWindow), getter_AddRefs(focusedElement)); // If the root element is focused, then it's actually the document // that has the focus, so ignore this. if (focusedElement && focusedElement != document->GetRootElement()) { fm->MoveCaretToFocus(window); isFirstVisiblePreferred = false; } } } } // ----------- Find the text! --------------------- // Beware! This may flush notifications via synchronous // ScrollSelectionIntoView. nsresult rv = FindItNow(aMode, aLinksOnly, isFirstVisiblePreferred, aDontIterateFrames, aResult); // ---------Handle success or failure --------------- if (NS_SUCCEEDED(rv)) { if (mTypeAheadBuffer.Length() == 1) { // If first letter, store where the first find succeeded // (mStartFindRange) mStartFindRange = nullptr; if (selection) { RefPtr<const nsRange> startFindRange = selection->GetRangeAt(0); if (startFindRange) { mStartFindRange = startFindRange->CloneRange(); } } } } else if (isInitial) { // Error sound, except when whole word matching is ON. if (!mEntireWord && mTypeAheadBuffer.Length() > mLastFindLength) PlayNotFoundSound(); } SaveFind(); return NS_OK; } void nsTypeAheadFind::GetSelection(PresShell* aPresShell, nsISelectionController** aSelCon, Selection** aDOMSel) { if (!aPresShell) return; // if aCurrentNode is nullptr, get selection for document *aDOMSel = nullptr; nsPresContext* presContext = aPresShell->GetPresContext(); nsIFrame* frame = aPresShell->GetRootFrame(); if (presContext && frame) { frame->GetSelectionController(presContext, aSelCon); if (*aSelCon) { RefPtr<Selection> sel = (*aSelCon)->GetSelection(nsISelectionController::SELECTION_NORMAL); sel.forget(aDOMSel); } } } NS_IMETHODIMP nsTypeAheadFind::GetFoundRange(nsRange** aFoundRange) { NS_ENSURE_ARG_POINTER(aFoundRange); if (mFoundRange == nullptr) { *aFoundRange = nullptr; return NS_OK; } *aFoundRange = mFoundRange->CloneRange().take(); return NS_OK; } NS_IMETHODIMP nsTypeAheadFind::IsRangeVisible(nsRange* aRange, bool aMustBeInViewPort, bool* aResult) { *aResult = IsRangeVisible(aRange, aMustBeInViewPort, false, nullptr); return NS_OK; } bool nsTypeAheadFind::IsRangeVisible(nsRange* aRange, bool aMustBeInViewPort, bool aGetTopVisibleLeaf, bool* aUsesIndependentSelection) { // We need to know if the range start is visible. // Otherwise, return the first visible range start in aFirstVisibleRange nsCOMPtr<nsIContent> content = nsIContent::FromNodeOrNull(aRange->GetStartContainer()); if (!content) { return false; } nsIFrame* frame = content->GetPrimaryFrame(); if (!frame) { return false; // No frame! Not visible then. } if (!frame->StyleVisibility()->IsVisible()) { return false; } // Detect if we are _inside_ a text control, or something else with its own // selection controller. if (aUsesIndependentSelection) { *aUsesIndependentSelection = (frame->GetStateBits() & NS_FRAME_INDEPENDENT_SELECTION); } return aMustBeInViewPort ? IsRangeRendered(aRange) : true; } NS_IMETHODIMP nsTypeAheadFind::IsRangeRendered(nsRange* aRange, bool* aResult) { *aResult = IsRangeRendered(aRange); return NS_OK; } bool nsTypeAheadFind::IsRangeRendered(nsRange* aRange) { using FrameForPointOption = nsLayoutUtils::FrameForPointOption; nsCOMPtr<nsIContent> content = nsIContent::FromNodeOrNull(aRange->GetClosestCommonInclusiveAncestor()); if (!content) { return false; } nsIFrame* frame = content->GetPrimaryFrame(); if (!frame) { return false; // No frame! Not visible then. } if (!frame->StyleVisibility()->IsVisible()) { return false; } // Having a primary frame doesn't mean that the range is visible inside the // viewport. Do a hit-test to determine that quickly and properly. AutoTArray<nsIFrame*, 8> frames; nsIFrame* rootFrame = frame->PresShell()->GetRootFrame(); RefPtr<nsRange> range = static_cast<nsRange*>(aRange); // NOTE(emilio): This used to flush layout, _after_ checking style above. // Instead, don't flush. RefPtr<mozilla::dom::DOMRectList> rects = range->GetClientRects(true, /* aFlushLayout = */ false); for (uint32_t i = 0; i < rects->Length(); ++i) { RefPtr<mozilla::dom::DOMRect> rect = rects->Item(i); nsRect r(nsPresContext::CSSPixelsToAppUnits((float)rect->X()), nsPresContext::CSSPixelsToAppUnits((float)rect->Y()), nsPresContext::CSSPixelsToAppUnits((float)rect->Width()), nsPresContext::CSSPixelsToAppUnits((float)rect->Height())); // Append visible frames to frames array. nsLayoutUtils::GetFramesForArea( RelativeTo{rootFrame}, r, frames, {{FrameForPointOption::IgnorePaintSuppression, FrameForPointOption::IgnoreRootScrollFrame, FrameForPointOption::OnlyVisible}}); // See if any of the frames contain the content. If they do, then the range // is visible. We search for the content rather than the original frame, // because nsTextContinuation frames might be returned instead of the // original frame. for (const auto& f : frames) { if (f->GetContent() == content) { return true; } } frames.ClearAndRetainStorage(); } return false; } already_AddRefed<Document> nsTypeAheadFind::GetDocument() { // Try the last document we found and ensure it's sane. RefPtr<Document> doc = do_QueryReferent(mDocument); if (doc && doc->GetPresShell() && doc->GetDocShell()) { return doc.forget(); } // Otherwise fall back to the document from which we were initialized (the one // from mDocShell). mDocument = nullptr; nsCOMPtr<nsIDocShell> ds = do_QueryReferent(mDocShell); if (!ds) { return nullptr; } doc = ds->GetExtantDocument(); mDocument = do_GetWeakReference(doc); return doc.forget(); }
utf-8
1
MPL-2.0 or GPL-2 or LGPL-2.1
1998-2016, Mozilla Project
sptk-3.9/.pc/1011_fix_spelling_errors.patch/bin/mgcep/_mgcep.c
/* ----------------------------------------------------------------- */ /* The Speech Signal Processing Toolkit (SPTK) */ /* developed by SPTK Working Group */ /* http://sp-tk.sourceforge.net/ */ /* ----------------------------------------------------------------- */ /* */ /* Copyright (c) 1984-2007 Tokyo Institute of Technology */ /* Interdisciplinary Graduate School of */ /* Science and Engineering */ /* */ /* 1996-2015 Nagoya Institute of Technology */ /* Department of Computer Science */ /* */ /* All rights reserved. */ /* */ /* Redistribution and use in source and binary forms, with or */ /* without modification, are permitted provided that the following */ /* conditions are met: */ /* */ /* - Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* - Redistributions in binary form must reproduce the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer in the documentation and/or other materials provided */ /* with the distribution. */ /* - Neither the name of the SPTK working group nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */ /* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */ /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ /* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */ /* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */ /* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */ /* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ /* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */ /* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ /* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */ /* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* ----------------------------------------------------------------- */ /**************************************************************** $Id: _mgcep.c,v 1.31 2015/12/14 01:14:19 uratec Exp $ Mel-Generalized Cepstral Analysis int mgcep(xw, flng, b, m, a, g, n, itr1, itr2, dd, etype, e, f, itype); double *xw : input sequence int flng : frame length double *b : coefficient b'(m) int m : order of mel cepstrum double a : alpha double g : gamma int n : order of recursions int itr1 : minimum number of iteration int itr2 : maximum number of iteration double dd : end condition int etype : 0 -> e is not used 1 -> e is initial value for log-periodogram 2 -> e is floor periodogram in db double e : value for log-periodogram or floor periodogram in db double f : mimimum value of the determinant of the normal matrix int itype : input data type return value : 0 -> completed by end condition -1-> completed by maximum iteration *****************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #if defined(WIN32) #include "SPTK.h" #else #include <SPTK.h> #endif /* gain(epsilon) calculation */ static double gain(double *er, double *c, int m, double g) { int i; double t; if (g != 0.0) { for (t = 0.0, i = 1; i <= m; i++) t += er[i] * c[i]; return (er[0] + g * t); } else return (er[0]); } /* b'(m) to c(m) */ static void b2c(double *b, int m1, double *c, int m2, double a) { int i, j; static double *d = NULL, *g; static int size; double k; if (d == NULL) { size = m2; d = dgetmem(size + size + 2); g = d + size + 1; } if (m2 > size) { free(d); size = m2; d = dgetmem(size + size + 2); g = d + size + 1; } k = 1 - a * a; fillz(g, sizeof(*g), m2 + 1); for (i = -m1; i <= 0; i++) { d[0] = g[0]; g[0] = b[-i]; if (1 <= m2) g[1] = k * d[0] + a * (d[1] = g[1]); for (j = 2; j <= m2; j++) g[j] = d[j - 1] + a * ((d[j] = g[j]) - g[j - 1]); } movem(g, c, sizeof(*g), m2 + 1); return; } /* recursion for p(m) */ static void ptrans(double *p, int m, double a) { double d, o; d = p[m]; for (m--; m > 0; m--) { o = p[m] + a * d; d = p[m]; p[m] = o; } o = a * d; p[m] = (1. - a * a) * p[m] + o + o; return; } /* recursion for q(m) */ static void qtrans(double *q, int m, double a) { int i; double d, o; m += m; i = 1; d = q[i]; for (i++; i <= m; i++) { o = q[i] + a * d; d = q[i]; q[i] = o; } return; } int mgcep(double *xw, int flng, double *b, const int m, const double a, const double g, const int n, const int itr1, const int itr2, const double dd, const int etype, const double e, const double f, const int itype) { int i, j, flag = 0; static double *x = NULL, *y, *d; static int size_x, size_c; double ep, epo, eps = 0.0, min, max; if (etype == 1 && e < 0.0) { fprintf(stderr, "mgcep : value of e must be e>=0!\n"); exit(1); } if (etype == 2 && e >= 0.0) { fprintf(stderr, "mgcep : value of E must be E<0!\n"); exit(1); } if (etype == 1) { eps = e; } if (x == NULL) { x = dgetmem(flng + flng); y = x + flng; size_x = flng; d = dgetmem(m + 1); size_c = m; } if (flng > size_x) { free(x); x = dgetmem(flng + flng); y = x + flng; size_x = flng; } if (m > size_c) { free(d); d = dgetmem(m + 1); size_c = m; } movem(xw, x, sizeof(*x), flng); switch (itype) { case 0: /* windowed data sequence */ fftr(x, y, flng); for (i = 0; i < flng; i++) { x[i] = x[i] * x[i] + y[i] * y[i] + eps; /* periodogram */ } break; case 1: /* dB */ for (i = 0; i <= flng / 2; i++) { x[i] = exp((x[i] / 20.0) * log(10.0)); /* dB -> amplitude spectrum */ x[i] = x[i] * x[i] + eps; /* amplitude -> periodogram */ } break; case 2: /* log */ for (i = 0; i <= flng / 2; i++) { x[i] = exp(x[i]); /* log -> amplitude spectrum */ x[i] = x[i] * x[i] + eps; /* amplitude -> periodogram */ } break; case 3: /* amplitude */ for (i = 0; i <= flng / 2; i++) { x[i] = x[i] * x[i] + eps; /* amplitude -> periodogram */ } break; case 4: /* periodogram */ for (i = 0; i <= flng / 2; i++) { x[i] = x[i] + eps; } break; default: fprintf(stderr, "mgcep : Input type %d is not supported!\n", itype); exit(1); } if (itype > 0) { for (i = 1; i < flng / 2; i++) x[flng - i] = x[i]; } if (etype == 2 && e < 0.0) { max = x[0]; for (i = 1; i < flng; i++) { if (max < x[i]) max = x[i]; } max = sqrt(max); min = max * pow(10.0, e / 20.0); /* floor is 20*log10(min/max) */ min = min * min; for (i = 0; i < flng; i++) { if (x[i] < min) x[i] = min; } } /* initial value */ fillz(b, sizeof(*b), m + 1); ep = newton(x, flng, b, m, a, -1.0, n, 0, f); if (g != -1.0) { if (a != 0.0) { ignorm(b, b, m, -1.0); /* K, b'r(m) -> br(m) */ b2mc(b, b, m, a); /* br(m) -> c~r(m) */ gnorm(b, d, m, -1.0); /* c~r(m) -> K~, c~'r(m) */ } else movem(b, d, sizeof(*b), m + 1); gc2gc(d, m, -1.0, b, m, g); /* K~, c~'r(m) -> K~, c~'r'(m) */ if (a != 0.0) { ignorm(b, b, m, g); /* K~, c~'r'(m) -> c~r(m) */ mc2b(b, b, m, a); /* c~r(m) -> br(m) */ gnorm(b, b, m, g); /* br(m) -> K, b'r'(m) */ } } /* Newton-Raphson method */ if (g != -1.0) { for (j = 1; j <= itr2; j++) { epo = ep; ep = newton(x, flng, b, m, a, g, n, j, f); if (j >= itr1) if (fabs((epo - ep) / ep) < dd) { flag = 1; break; } } } if (flag) return (0); else return (-1); } double newton(double *x, const int flng, double *c, const int m, const double a, const double g, const int n, const int j, const double f) { int i, m2; double t = 0, s, tr, ti, trr, tii; static double *cr = NULL, *ci, *pr, *qr, *qi, *rr, *ri, *b; static int size_cr, size_b; if (cr == NULL) { cr = dgetmem(7 * flng); ci = cr + flng; pr = ci + flng; qr = pr + flng; qi = qr + flng; rr = qi + flng; ri = rr + flng; size_cr = flng; b = dgetmem(m + 1); size_b = m; } if (flng > size_cr) { free(cr); cr = dgetmem(7 * flng); ci = cr + flng; pr = ci + flng; qr = pr + flng; qi = qr + flng; rr = qi + flng; ri = rr + flng; size_cr = flng; } if (m > size_b) { free(b); b = dgetmem(m + 1); size_b = m; } m2 = m + m; fillz(cr, sizeof(*cr), flng); movem(&c[1], &cr[1], sizeof(*c), m); if (a != 0.0) b2c(cr, m, cr, n, -a); fftr(cr, ci, flng); /* cr +j ci : FFT[c] */ if (g == -1.0) movem(x, pr, sizeof(*x), flng); else if (g == 0.0) for (i = 0; i < flng; i++) pr[i] = x[i] / exp(cr[i] + cr[i]); else for (i = 0; i < flng; i++) { tr = 1 + g * cr[i]; ti = g * ci[i]; s = (trr = tr * tr) + (tii = ti * ti); t = x[i] * pow(s, -1.0 / g); pr[i] = (t /= s); rr[i] = tr * t; ri[i] = ti * t; t /= s; qr[i] = (trr - tii) * t; s = tr * ti * t; qi[i] = s + s; } ifftr(pr, ci, flng); if (a != 0.0) b2c(pr, n, pr, m2, a); if (g == 0.0 || g == -1.0) { movem(pr, qr, sizeof(*pr), m2 + 1); movem(pr, rr, sizeof(*pr), m + 1); } else { ifft(qr, qi, flng); ifft(rr, ri, flng); if (a != 0.0) { b2c(qr, n, qr, n, a); b2c(rr, n, rr, m, a); } } if (a != 0.0) { ptrans(pr, m, a); qtrans(qr, m, a); } /* c[0] : gain, t : epsilon */ if (g != -1.0) c[0] = sqrt(t = gain(rr, c, m, g)); if (g == -1.0) fillz(qr, sizeof(*qr), m2 + 1); else if (g != 0.0) for (i = 2; i <= m2; i++) qr[i] *= 1.0 + g; if (theq(pr, &qr[2], &b[1], &rr[1], m, f)) { fprintf(stderr, "mgcep : Error in theq() at %dth iteration!\n", j); exit(1); } for (i = 1; i <= m; i++) c[i] += b[i]; /* c[0] : gain, t : epsilon */ if (g == -1.0) c[0] = sqrt(t = gain(rr, c, m, g)); return (log(t)); }
utf-8
1
BSD-3-clause
1984-2007 Tokyo Institute of Technology Interdisciplinary Graduate School of Science and Engineering 1996-2015 Nagoya Institute of Technology Department of Computer Science
sundials-5.8.0+dfsg/src/idas/idas_impl.h
/* ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file (private version) for the main IDAS solver. * ----------------------------------------------------------------- */ #ifndef _IDAS_IMPL_H #define _IDAS_IMPL_H #include <stdarg.h> #include "idas/idas.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ================================================================= * M A I N I N T E G R A T O R M E M O R Y B L O C K * ================================================================= */ /* Basic IDA constants */ #define HMAX_INV_DEFAULT RCONST(0.0) /* hmax_inv default value */ #define MAXORD_DEFAULT 5 /* maxord default value */ #define MXORDP1 6 /* max. number of N_Vectors in phi */ #define MXSTEP_DEFAULT 500 /* mxstep default value */ /* Return values for lower level routines used by IDASolve and functions provided to the nonlinear solver */ #define IDA_RES_RECVR +1 #define IDA_LSETUP_RECVR +2 #define IDA_LSOLVE_RECVR +3 #define IDA_CONSTR_RECVR +5 #define IDA_NLS_SETUP_RECVR +6 #define IDA_QRHS_RECVR +10 #define IDA_SRES_RECVR +11 #define IDA_QSRHS_RECVR +12 /* itol */ #define IDA_NN 0 #define IDA_SS 1 #define IDA_SV 2 #define IDA_WF 3 #define IDA_EE 4 /* * ---------------------------------------------------------------- * Types: struct IDAMemRec, IDAMem * ---------------------------------------------------------------- * The type IDAMem is type pointer to struct IDAMemRec. * This structure contains fields to keep track of problem state. * ---------------------------------------------------------------- */ typedef struct IDAMemRec { realtype ida_uround; /* machine unit roundoff */ /*-------------------------- Problem Specification Data --------------------------*/ IDAResFn ida_res; /* F(t,y(t),y'(t))=0; the function F */ void *ida_user_data; /* user pointer passed to res */ int ida_itol; /* itol = IDA_SS, IDA_SV, IDA_WF, IDA_NN */ realtype ida_rtol; /* relative tolerance */ realtype ida_Satol; /* scalar absolute tolerance */ N_Vector ida_Vatol; /* vector absolute tolerance */ booleantype ida_atolmin0; /* flag indicating that min(atol) = 0 */ booleantype ida_user_efun; /* SUNTRUE if user provides efun */ IDAEwtFn ida_efun; /* function to set ewt */ void *ida_edata; /* user pointer passed to efun */ booleantype ida_constraintsSet; /* constraints vector present: do constraints calc */ booleantype ida_suppressalg; /* SUNTRUE means suppress algebraic vars in local error tests */ /*----------------------- Quadrature Related Data -----------------------*/ booleantype ida_quadr; IDAQuadRhsFn ida_rhsQ; void *ida_user_dataQ; booleantype ida_errconQ; int ida_itolQ; realtype ida_rtolQ; realtype ida_SatolQ; /* scalar absolute tolerance for quadratures */ N_Vector ida_VatolQ; /* vector absolute tolerance for quadratures */ booleantype ida_atolQmin0; /* flag indicating that min(atolQ) = 0 */ /*------------------------ Sensitivity Related Data ------------------------*/ booleantype ida_sensi; int ida_Ns; int ida_ism; IDASensResFn ida_resS; void *ida_user_dataS; booleantype ida_resSDQ; realtype *ida_p; realtype *ida_pbar; int *ida_plist; int ida_DQtype; realtype ida_DQrhomax; booleantype ida_errconS; /* SUNTRUE if sensitivities in err. control */ int ida_itolS; realtype ida_rtolS; /* relative tolerance for sensitivities */ realtype *ida_SatolS; /* scalar absolute tolerances for sensi. */ N_Vector *ida_VatolS; /* vector absolute tolerances for sensi. */ booleantype *ida_atolSmin0; /* flag indicating that min(atolS[is]) = 0 */ /*----------------------------------- Quadrature Sensitivity Related Data -----------------------------------*/ booleantype ida_quadr_sensi; /* SUNTRUE if computing sensitivities of quadrs. */ IDAQuadSensRhsFn ida_rhsQS; /* fQS = (dfQ/dy)*yS + (dfQ/dp) */ void *ida_user_dataQS; /* data pointer passed to fQS */ booleantype ida_rhsQSDQ; /* SUNTRUE if using internal DQ functions */ booleantype ida_errconQS; /* SUNTRUE if yQS are considered in err. con. */ int ida_itolQS; realtype ida_rtolQS; /* relative tolerance for yQS */ realtype *ida_SatolQS; /* scalar absolute tolerances for yQS */ N_Vector *ida_VatolQS; /* vector absolute tolerances for yQS */ booleantype *ida_atolQSmin0; /* flag indicating that min(atolQS[is]) = 0 */ /*----------------------------------------------- Divided differences array and associated arrays -----------------------------------------------*/ N_Vector ida_phi[MXORDP1]; /* phi = (maxord+1) arrays of divided differences */ realtype ida_psi[MXORDP1]; /* differences in t (sums of recent step sizes) */ realtype ida_alpha[MXORDP1]; /* ratios of current stepsize to psi values */ realtype ida_beta[MXORDP1]; /* ratios of current to previous product of psi's */ realtype ida_sigma[MXORDP1]; /* product successive alpha values and factorial */ realtype ida_gamma[MXORDP1]; /* sum of reciprocals of psi values */ /*------------------------- N_Vectors for integration -------------------------*/ N_Vector ida_ewt; /* error weight vector */ N_Vector ida_yy; /* work space for y vector (= user's yret) */ N_Vector ida_yp; /* work space for y' vector (= user's ypret) */ N_Vector ida_yypredict; /* predicted y vector */ N_Vector ida_yppredict; /* predicted y' vector */ N_Vector ida_delta; /* residual vector */ N_Vector ida_id; /* bit vector for diff./algebraic components */ N_Vector ida_constraints; /* vector of inequality constraint options */ N_Vector ida_savres; /* saved residual vector */ N_Vector ida_ee; /* accumulated corrections to y vector, but set equal to estimated local errors upon successful return */ N_Vector ida_tempv1; /* work space vector */ N_Vector ida_tempv2; /* work space vector */ N_Vector ida_tempv3; /* work space vector */ N_Vector ida_ynew; /* work vector for y in IDACalcIC (= tempv2) */ N_Vector ida_ypnew; /* work vector for yp in IDACalcIC (= ee) */ N_Vector ida_delnew; /* work vector for delta in IDACalcIC (= phi[2]) */ N_Vector ida_dtemp; /* work vector in IDACalcIC (= phi[3]) */ /*---------------------------- Quadrature Related N_Vectors ----------------------------*/ N_Vector ida_phiQ[MXORDP1]; N_Vector ida_yyQ; N_Vector ida_ypQ; N_Vector ida_ewtQ; N_Vector ida_eeQ; /*--------------------------- Sensitivity Related Vectors ---------------------------*/ N_Vector *ida_phiS[MXORDP1]; N_Vector *ida_ewtS; N_Vector *ida_eeS; /* cumulative sensitivity corrections */ N_Vector *ida_yyS; /* allocated and used for: */ N_Vector *ida_ypS; /* ism = SIMULTANEOUS */ N_Vector *ida_yySpredict; /* ism = STAGGERED */ N_Vector *ida_ypSpredict; N_Vector *ida_deltaS; N_Vector ida_tmpS1; /* work space vectors | tmpS1 = tempv1 */ N_Vector ida_tmpS2; /* for resS | tmpS2 = tempv2 */ N_Vector ida_tmpS3; /* | tmpS3 = allocated */ N_Vector *ida_savresS; /* work vector in IDACalcIC for stg (= phiS[2]) */ N_Vector *ida_delnewS; /* work vector in IDACalcIC for stg (= phiS[3]) */ N_Vector *ida_yyS0; /* initial yS, ypS vectors allocated and */ N_Vector *ida_ypS0; /* deallocated in IDACalcIC function */ N_Vector *ida_yyS0new; /* work vector in IDASensLineSrch (= phiS[4]) */ N_Vector *ida_ypS0new; /* work vector in IDASensLineSrch (= eeS) */ /*-------------------------------------- Quadrature Sensitivity Related Vectors --------------------------------------*/ N_Vector *ida_phiQS[MXORDP1];/* Mod. div. diffs. for quadr. sensitivities */ N_Vector *ida_ewtQS; /* error weight vectors for sensitivities */ N_Vector *ida_eeQS; /* cumulative quadr.sensi.corrections */ N_Vector *ida_yyQS; /* Unlike yS, yQS is not allocated by the user */ N_Vector *ida_tempvQS; /* temporary storage vector (~ tempv) */ N_Vector ida_savrhsQ; /* saved quadr. rhs (needed for rhsQS calls) */ /*------------------------------ Variables for use by IDACalcIC ------------------------------*/ realtype ida_t0; /* initial t */ N_Vector ida_yy0; /* initial y vector (user-supplied). */ N_Vector ida_yp0; /* initial y' vector (user-supplied). */ int ida_icopt; /* IC calculation user option */ booleantype ida_lsoff; /* IC calculation linesearch turnoff option */ int ida_maxnh; /* max. number of h tries in IC calculation */ int ida_maxnj; /* max. number of J tries in IC calculation */ int ida_maxnit; /* max. number of Netwon iterations in IC calc. */ int ida_nbacktr; /* number of IC linesearch backtrack operations */ int ida_sysindex; /* computed system index (0 or 1) */ int ida_maxbacks; /* max backtracks per Newton step */ realtype ida_epiccon; /* IC nonlinear convergence test constant */ realtype ida_steptol; /* minimum Newton step size in IC calculation */ realtype ida_tscale; /* time scale factor = abs(tout1 - t0) */ /* Tstop information */ booleantype ida_tstopset; realtype ida_tstop; /* Step Data */ int ida_kk; /* current BDF method order */ int ida_kused; /* method order used on last successful step */ int ida_knew; /* order for next step from order decrease decision */ int ida_phase; /* flag to trigger step doubling in first few steps */ int ida_ns; /* counts steps at fixed stepsize and order */ realtype ida_hin; /* initial step */ realtype ida_h0u; /* actual initial stepsize */ realtype ida_hh; /* current step size h */ realtype ida_hused; /* step size used on last successful step */ realtype ida_rr; /* rr = hnext / hused */ realtype ida_tn; /* current internal value of t */ realtype ida_tretlast; /* value of tret previously returned by IDASolve */ realtype ida_cj; /* current value of scalar (-alphas/hh) in Jacobian */ realtype ida_cjlast; /* cj value saved from last successful step */ realtype ida_cjold; /* cj value saved from last call to lsetup */ realtype ida_cjratio; /* ratio of cj values: cj/cjold */ realtype ida_ss; /* scalar used in Newton iteration convergence test */ realtype ida_oldnrm; /* norm of previous nonlinear solver update */ realtype ida_epsNewt; /* test constant in Newton convergence test */ realtype ida_epcon; /* coeficient of the Newton covergence test */ realtype ida_toldel; /* tolerance in direct test on Newton corrections */ realtype ida_ssS; /* scalar ss for staggered sensitivities */ /*------ Limits ------*/ int ida_maxncf; /* max numer of convergence failures */ int ida_maxnef; /* max number of error test failures */ int ida_maxord; /* max value of method order k: */ int ida_maxord_alloc; /* value of maxord used when allocating memory */ long int ida_mxstep; /* max number of internal steps for one user call */ realtype ida_hmax_inv; /* inverse of max. step size hmax (default = 0.0) */ /*-------- Counters --------*/ long int ida_nst; /* number of internal steps taken */ long int ida_nre; /* number of function (res) calls */ long int ida_nrQe; long int ida_nrSe; long int ida_nrQSe; /* number of fQS calls */ long int ida_nreS; long int ida_nrQeS; /* number of fQ calls from sensi DQ */ long int ida_ncfn; /* number of corrector convergence failures */ long int ida_ncfnQ; long int ida_ncfnS; long int ida_netf; /* number of error test failures */ long int ida_netfQ; long int ida_netfS; long int ida_netfQS; /* number of quadr. sensi. error test failures */ long int ida_nni; /* number of Newton iterations performed */ long int ida_nniS; long int ida_nsetups; /* number of lsetup calls */ long int ida_nsetupsS; /*------------------ Space requirements ------------------*/ sunindextype ida_lrw1; /* no. of realtype words in 1 N_Vector */ sunindextype ida_liw1; /* no. of integer words in 1 N_Vector */ sunindextype ida_lrw1Q; sunindextype ida_liw1Q; long int ida_lrw; /* number of realtype words in IDA work vectors */ long int ida_liw; /* no. of integer words in IDA work vectors */ realtype ida_tolsf; /* tolerance scale factor (saved value) */ /*------------------------------------------- Error handler function and error ouput file -------------------------------------------*/ IDAErrHandlerFn ida_ehfun; /* Error messages are handled by ehfun */ void *ida_eh_data; /* dats pointer passed to ehfun */ FILE *ida_errfp; /* IDA error messages are sent to errfp */ /* Flags to verify correct calling sequence */ booleantype ida_SetupDone; /* set to SUNFALSE by IDAMalloc and IDAReInit set to SUNTRUE by IDACalcIC or IDASolve */ booleantype ida_VatolMallocDone; booleantype ida_constraintsMallocDone; booleantype ida_idMallocDone; booleantype ida_MallocDone; /* set to SUNFALSE by IDACreate set to SUNTRUE by IDAMAlloc tested by IDAReInit and IDASolve */ booleantype ida_VatolQMallocDone; booleantype ida_quadMallocDone; booleantype ida_VatolSMallocDone; booleantype ida_SatolSMallocDone; booleantype ida_sensMallocDone; booleantype ida_VatolQSMallocDone; booleantype ida_SatolQSMallocDone; booleantype ida_quadSensMallocDone; /*--------------------- Nonlinear Solver Data ---------------------*/ SUNNonlinearSolver NLS; /* nonlinear solver object */ booleantype ownNLS; /* flag indicating NLS ownership */ SUNNonlinearSolver NLSsim; /* nonlinear solver object for DAE+Sens solves with the simultaneous corrector option */ booleantype ownNLSsim; /* flag indicating NLS ownership */ SUNNonlinearSolver NLSstg; /* nonlinear solver object for DAE+Sens solves with the staggered corrector option */ booleantype ownNLSstg; /* flag indicating NLS ownership */ /* The following vectors are NVector wrappers for use with the simultaneous and staggered corrector methods: Simult: ypredictSim = [ida_delta, ida_deltaS] ycorSim = [ida_ee, ida_eeS] ewtSim = [ida_ewt, ida_ewtS] Stagger: ypredictStg = ida_deltaS ycorStg = ida_eeS ewtStg = ida_ewtS */ N_Vector ypredictSim, ycorSim, ewtSim; N_Vector ypredictStg, ycorStg, ewtStg; /* flags indicating if vector wrappers for the simultaneous and staggered correctors have been allocated */ booleantype simMallocDone; booleantype stgMallocDone; IDAResFn nls_res; /* F(t,y(t),y'(t))=0; used in the nonlinear solver */ /*------------------ Linear Solver Data ------------------*/ /* Linear Solver functions to be called */ int (*ida_linit)(struct IDAMemRec *idamem); int (*ida_lsetup)(struct IDAMemRec *idamem, N_Vector yyp, N_Vector ypp, N_Vector resp, N_Vector tempv1, N_Vector tempv2, N_Vector tempv3); int (*ida_lsolve)(struct IDAMemRec *idamem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector ypcur, N_Vector rescur); int (*ida_lperf)(struct IDAMemRec *idamem, int perftask); int (*ida_lfree)(struct IDAMemRec *idamem); /* Linear Solver specific memory */ void *ida_lmem; /* Flag to request a call to the setup routine */ booleantype ida_forceSetup; /* Flag to indicate successful ida_linit call */ booleantype ida_linitOK; /*---------------- Rootfinding Data ----------------*/ IDARootFn ida_gfun; /* Function g for roots sought */ int ida_nrtfn; /* number of components of g */ int *ida_iroots; /* array for root information */ int *ida_rootdir; /* array specifying direction of zero-crossing */ realtype ida_tlo; /* nearest endpoint of interval in root search */ realtype ida_thi; /* farthest endpoint of interval in root search */ realtype ida_trout; /* t return value from rootfinder routine */ realtype *ida_glo; /* saved array of g values at t = tlo */ realtype *ida_ghi; /* saved array of g values at t = thi */ realtype *ida_grout; /* array of g values at t = trout */ realtype ida_toutc; /* copy of tout (if NORMAL mode) */ realtype ida_ttol; /* tolerance on root location */ int ida_taskc; /* copy of parameter itask */ int ida_irfnd; /* flag showing whether last step had a root */ long int ida_nge; /* counter for g evaluations */ booleantype *ida_gactive; /* array with active/inactive event functions */ int ida_mxgnull; /* number of warning messages about possible g==0 */ /* Arrays for Fused Vector Operations */ /* scalar arrays */ realtype* ida_cvals; realtype ida_dvals[MAXORD_DEFAULT]; /* vector arrays */ N_Vector* ida_Xvecs; N_Vector* ida_Zvecs; /*------------------------ Adjoint sensitivity data ------------------------*/ booleantype ida_adj; /* SUNTRUE if performing ASA */ struct IDAadjMemRec *ida_adj_mem; /* Pointer to adjoint memory structure */ booleantype ida_adjMallocDone; } *IDAMem; /* * ================================================================= * A D J O I N T M O D U L E M E M O R Y B L O C K * ================================================================= */ /* * ----------------------------------------------------------------- * Forward references for pointers to various structures * ----------------------------------------------------------------- */ typedef struct IDAadjMemRec *IDAadjMem; typedef struct CkpntMemRec *CkpntMem; typedef struct DtpntMemRec *DtpntMem; typedef struct IDABMemRec *IDABMem; /* * ----------------------------------------------------------------- * Types for functions provided by an interpolation module * ----------------------------------------------------------------- * IDAAMMallocFn: Type for a function that initializes the content * field of the structures in the dt array * IDAAMFreeFn: Type for a function that deallocates the content * field of the structures in the dt array * IDAAGetYFn: Function type for a function that returns the * interpolated forward solution. * IDAAStorePnt: Function type for a function that stores a new * point in the structure d * ----------------------------------------------------------------- */ typedef booleantype (*IDAAMMallocFn)(IDAMem IDA_mem); typedef void (*IDAAMFreeFn)(IDAMem IDA_mem); typedef int (*IDAAGetYFn)(IDAMem IDA_mem, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS); typedef int (*IDAAStorePntFn)(IDAMem IDA_mem, DtpntMem d); /* * ----------------------------------------------------------------- * Types : struct CkpntMemRec, CkpntMem * ----------------------------------------------------------------- * The type CkpntMem is type pointer to struct CkpntMemRec. * This structure contains fields to store all information at a * check point that is needed to 'hot' start IDAS. * ----------------------------------------------------------------- */ struct CkpntMemRec { /* Integration limits */ realtype ck_t0; realtype ck_t1; /* Modified divided difference array */ N_Vector ck_phi[MXORDP1]; /* Do we need to carry quadratures? */ booleantype ck_quadr; /* Modified divided difference array for quadratures */ N_Vector ck_phiQ[MXORDP1]; /* Do we need to carry sensitivities? */ booleantype ck_sensi; /* number of sensitivities */ int ck_Ns; /* Modified divided difference array for sensitivities */ N_Vector *ck_phiS[MXORDP1]; /* Do we need to carry quadrature sensitivities? */ booleantype ck_quadr_sensi; /* Modified divided difference array for quadrature sensitivities */ N_Vector *ck_phiQS[MXORDP1]; /* Step data */ long int ck_nst; realtype ck_tretlast; int ck_ns; int ck_kk; int ck_kused; int ck_knew; int ck_phase; realtype ck_hh; realtype ck_hused; realtype ck_rr; realtype ck_cj; realtype ck_cjlast; realtype ck_cjold; realtype ck_cjratio; realtype ck_ss; realtype ck_ssS; realtype ck_psi[MXORDP1]; realtype ck_alpha[MXORDP1]; realtype ck_beta[MXORDP1]; realtype ck_sigma[MXORDP1]; realtype ck_gamma[MXORDP1]; /* How many phi, phiS, phiQ and phiQS were allocated? */ int ck_phi_alloc; /* Pointer to next structure in list */ struct CkpntMemRec *ck_next; }; /* * ----------------------------------------------------------------- * Type : struct DtpntMemRec * ----------------------------------------------------------------- * This structure contains fields to store all information at a * data point that is needed to interpolate solution of forward * simulations. Its content field is interpType-dependent. * ----------------------------------------------------------------- */ struct DtpntMemRec { realtype t; /* time */ void *content; /* interpType-dependent content */ }; /* Data for cubic Hermite interpolation */ typedef struct HermiteDataMemRec { N_Vector y; N_Vector yd; N_Vector *yS; N_Vector *ySd; } *HermiteDataMem; /* Data for polynomial interpolation */ typedef struct PolynomialDataMemRec { N_Vector y; N_Vector *yS; /* yd and ySd store the derivative(s) only for the first dt point. NULL otherwise. */ N_Vector yd; N_Vector *ySd; int order; } *PolynomialDataMem; /* * ----------------------------------------------------------------- * Type : struct IDABMemRec * ----------------------------------------------------------------- * The type IDABMemRec is a pointer to a structure which stores all * information for ONE backward problem. * The IDAadjMem struct contains a linked list of IDABMem pointers * ----------------------------------------------------------------- */ struct IDABMemRec { /* Index of this backward problem */ int ida_index; /* Time at which the backward problem is initialized. */ realtype ida_t0; /* Memory for this backward problem */ IDAMem IDA_mem; /* Flags to indicate that this backward problem's RHS or quad RHS * require forward sensitivities */ booleantype ida_res_withSensi; booleantype ida_rhsQ_withSensi; /* Residual function for backward run */ IDAResFnB ida_res; IDAResFnBS ida_resS; /* Right hand side quadrature function (fQB) for backward run */ IDAQuadRhsFnB ida_rhsQ; IDAQuadRhsFnBS ida_rhsQS; /* User user_data */ void *ida_user_data; /* Linear solver's data and functions */ /* Memory block for a linear solver's interface to IDAA */ void *ida_lmem; /* Function to free any memory allocated by the linear solver */ int (*ida_lfree)(IDABMem IDAB_mem); /* Memory block for a preconditioner's module interface to IDAA */ void *ida_pmem; /* Function to free any memory allocated by the preconditioner module */ int (*ida_pfree)(IDABMem IDAB_mem); /* Time at which to extract solution / quadratures */ realtype ida_tout; /* Workspace Nvectors */ N_Vector ida_yy; N_Vector ida_yp; /* Link to next structure in list. */ struct IDABMemRec *ida_next; }; /* * ----------------------------------------------------------------- * Type : struct IDAadjMemRec * ----------------------------------------------------------------- * The type IDAadjMem is type pointer to struct IDAadjMemRec. * This structure contins fields to store all information * necessary for adjoint sensitivity analysis. * ----------------------------------------------------------------- */ struct IDAadjMemRec { /* -------------------- * Forward problem data * -------------------- */ /* Integration interval */ realtype ia_tinitial, ia_tfinal; /* Flag for first call to IDASolveF */ booleantype ia_firstIDAFcall; /* Flag if IDASolveF was called with TSTOP */ booleantype ia_tstopIDAFcall; realtype ia_tstopIDAF; /* Flag if IDASolveF was called in IDA_NORMAL_MODE and encountered a root after tout */ booleantype ia_rootret; realtype ia_troot; /* ---------------------- * Backward problems data * ---------------------- */ /* Storage for backward problems */ struct IDABMemRec *IDAB_mem; /* Number of backward problems. */ int ia_nbckpbs; /* Address of current backward problem (iterator). */ struct IDABMemRec *ia_bckpbCrt; /* Flag for first call to IDASolveB */ booleantype ia_firstIDABcall; /* ---------------- * Check point data * ---------------- */ /* Storage for check point information */ struct CkpntMemRec *ck_mem; /* address of the check point structure for which data is available */ struct CkpntMemRec *ia_ckpntData; /* Number of checkpoints. */ int ia_nckpnts; /* ------------------ * Interpolation data * ------------------ */ /* Number of steps between 2 check points */ long int ia_nsteps; /* Last index used in IDAAfindIndex */ long int ia_ilast; /* Storage for data from forward runs */ struct DtpntMemRec **dt_mem; /* Actual number of data points saved in current dt_mem */ /* Commonly, np = nsteps+1 */ long int ia_np; /* Interpolation type */ int ia_interpType; /* Functions set by the interpolation module */ IDAAStorePntFn ia_storePnt; /* store a new interpolation point */ IDAAGetYFn ia_getY; /* interpolate forward solution */ IDAAMMallocFn ia_malloc; /* allocate new data point */ IDAAMFreeFn ia_free; /* destroys data point */ /* Flags controlling the interpolation module */ booleantype ia_mallocDone; /* IM initialized? */ booleantype ia_newData; /* new data available in dt_mem? */ booleantype ia_storeSensi; /* store sensitivities? */ booleantype ia_interpSensi; /* interpolate sensitivities? */ booleantype ia_noInterp; /* interpolations are temporarly */ /* disabled ( IDACalcICB ) */ /* Workspace for polynomial interpolation */ N_Vector ia_Y[MXORDP1]; /* pointers phi[i] */ N_Vector *ia_YS[MXORDP1]; /* pointers phiS[i] */ realtype ia_T[MXORDP1]; /* Workspace for wrapper functions */ N_Vector ia_yyTmp, ia_ypTmp; N_Vector *ia_yySTmp, *ia_ypSTmp; }; /* * ================================================================= * I N T E R F A C E T O L I N E A R S O L V E R S * ================================================================= */ /* * ----------------------------------------------------------------- * int (*ida_linit)(IDAMem IDA_mem); * ----------------------------------------------------------------- * The purpose of ida_linit is to allocate memory for the * solver-specific fields in the structure *(idamem->ida_lmem) and * perform any needed initializations of solver-specific memory, * such as counters/statistics. An (*ida_linit) should return * 0 if it has successfully initialized the IDA linear solver and * a non-zero value otherwise. If an error does occur, an * appropriate message should be sent to the error handler function. * ---------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lsetup)(IDAMem IDA_mem, N_Vector yyp, N_Vector ypp, * N_Vector resp, N_Vector tempv1, * N_Vector tempv2, N_Vector tempv3); * ----------------------------------------------------------------- * The job of ida_lsetup is to prepare the linear solver for * subsequent calls to ida_lsolve. Its parameters are as follows: * * idamem - problem memory pointer of type IDAMem. See the big * typedef earlier in this file. * * yyp - the predicted y vector for the current IDA internal * step. * * ypp - the predicted y' vector for the current IDA internal * step. * * resp - F(tn, yyp, ypp). * * tempv1, tempv2, tempv3 - temporary N_Vectors provided for use * by ida_lsetup. * * The ida_lsetup routine should return 0 if successful, * a positive value for a recoverable error, and a negative value * for an unrecoverable error. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lsolve)(IDAMem IDA_mem, N_Vector b, N_Vector weight, * N_Vector ycur, N_Vector ypcur, N_Vector rescur); * ----------------------------------------------------------------- * ida_lsolve must solve the linear equation P x = b, where * P is some approximation to the system Jacobian * J = (dF/dy) + cj (dF/dy') * evaluated at (tn,ycur,ypcur) and the RHS vector b is input. * The N-vector ycur contains the solver's current approximation * to y(tn), ypcur contains that for y'(tn), and the vector rescur * contains the N-vector residual F(tn,ycur,ypcur). * The solution is to be returned in the vector b. * * The ida_lsolve routine should return 0 if successful, * a positive value for a recoverable error, and a negative value * for an unrecoverable error. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lperf)(IDAMem IDA_mem, int perftask); * ----------------------------------------------------------------- * ida_lperf is called two places in IDAS where linear solver * performance data is required by IDAS. For perftask = 0, an * initialization of performance variables is performed, while for * perftask = 1, the performance is evaluated. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lfree)(IDAMem IDA_mem); * ----------------------------------------------------------------- * ida_lfree should free up any memory allocated by the linear * solver. This routine is called once a problem has been * completed and the linear solver is no longer needed. It should * return 0 upon success, nonzero on failure. * ----------------------------------------------------------------- */ /* * ================================================================= * I N T E R N A L F U N C T I O N S * ================================================================= */ /* Prototype of internal ewtSet function */ int IDAEwtSet(N_Vector ycur, N_Vector weight, void *data); /* High level error handler */ void IDAProcessError(IDAMem IDA_mem, int error_code, const char *module, const char *fname, const char *msgfmt, ...); /* Prototype of internal errHandler function */ void IDAErrHandler(int error_code, const char *module, const char *function, char *msg, void *data); /* Norm functions. Also used for IC, so they are global.*/ realtype IDAWrmsNorm(IDAMem IDA_mem, N_Vector x, N_Vector w, booleantype mask); realtype IDASensWrmsNorm(IDAMem IDA_mem, N_Vector *xS, N_Vector *wS, booleantype mask); realtype IDASensWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector *xS, N_Vector *wS, booleantype mask); /* Nonlinear solver initialization */ int idaNlsInit(IDAMem IDA_mem); int idaNlsInitSensSim(IDAMem IDA_mem); int idaNlsInitSensStg(IDAMem IDA_mem); /* Prototype for internal sensitivity residual DQ function */ int IDASensResDQ(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector resval, N_Vector *yyS, N_Vector *ypS, N_Vector *resvalS, void *user_dataS, N_Vector ytemp, N_Vector yptemp, N_Vector restemp); /* * ================================================================= * E R R O R M E S S A G E S * ================================================================= */ #if defined(SUNDIALS_EXTENDED_PRECISION) #define MSG_TIME "t = %Lg, " #define MSG_TIME_H "t = %Lg and h = %Lg, " #define MSG_TIME_INT "t = %Lg is not between tcur - hu = %Lg and tcur = %Lg." #define MSG_TIME_TOUT "tout = %Lg" #define MSG_TIME_TSTOP "tstop = %Lg" #elif defined(SUNDIALS_DOUBLE_PRECISION) #define MSG_TIME "t = %lg, " #define MSG_TIME_H "t = %lg and h = %lg, " #define MSG_TIME_INT "t = %lg is not between tcur - hu = %lg and tcur = %lg." #define MSG_TIME_TOUT "tout = %lg" #define MSG_TIME_TSTOP "tstop = %lg" #else #define MSG_TIME "t = %g, " #define MSG_TIME_H "t = %g and h = %g, " #define MSG_TIME_INT "t = %g is not between tcur - hu = %g and tcur = %g." #define MSG_TIME_TOUT "tout = %g" #define MSG_TIME_TSTOP "tstop = %g" #endif /* General errors */ #define MSG_MEM_FAIL "A memory request failed." #define MSG_NO_MEM "ida_mem = NULL illegal." #define MSG_NO_MALLOC "Attempt to call before IDAMalloc." #define MSG_BAD_NVECTOR "A required vector operation is not implemented." /* Initialization errors */ #define MSG_Y0_NULL "y0 = NULL illegal." #define MSG_YP0_NULL "yp0 = NULL illegal." #define MSG_BAD_ITOL "Illegal value for itol. The legal values are IDA_SS, IDA_SV, and IDA_WF." #define MSG_RES_NULL "res = NULL illegal." #define MSG_BAD_RTOL "rtol < 0 illegal." #define MSG_ATOL_NULL "atol = NULL illegal." #define MSG_BAD_ATOL "Some atol component < 0.0 illegal." #define MSG_ROOT_FUNC_NULL "g = NULL illegal." #define MSG_MISSING_ID "id = NULL but suppressalg option on." #define MSG_NO_TOLS "No integration tolerances have been specified." #define MSG_FAIL_EWT "The user-provide EwtSet function failed." #define MSG_BAD_EWT "Some initial ewt component = 0.0 illegal." #define MSG_Y0_FAIL_CONSTR "y0 fails to satisfy constraints." #define MSG_BAD_ISM_CONSTR "Constraints can not be enforced while forward sensitivity is used with simultaneous method." #define MSG_LSOLVE_NULL "The linear solver's solve routine is NULL." #define MSG_LINIT_FAIL "The linear solver's init routine failed." #define MSG_NLS_INIT_FAIL "The nonlinear solver's init routine failed." #define MSG_NO_QUAD "Illegal attempt to call before calling IDAQuadInit." #define MSG_BAD_EWTQ "Initial ewtQ has component(s) equal to zero (illegal)." #define MSG_BAD_ITOLQ "Illegal value for itolQ. The legal values are IDA_SS and IDA_SV." #define MSG_NO_TOLQ "No integration tolerances for quadrature variables have been specified." #define MSG_NULL_ATOLQ "atolQ = NULL illegal." #define MSG_BAD_RTOLQ "rtolQ < 0 illegal." #define MSG_BAD_ATOLQ "atolQ has negative component(s) (illegal)." #define MSG_NO_SENSI "Illegal attempt to call before calling IDASensInit." #define MSG_BAD_EWTS "Initial ewtS has component(s) equal to zero (illegal)." #define MSG_BAD_ITOLS "Illegal value for itolS. The legal values are IDA_SS, IDA_SV, and IDA_EE." #define MSG_NULL_ATOLS "atolS = NULL illegal." #define MSG_BAD_RTOLS "rtolS < 0 illegal." #define MSG_BAD_ATOLS "atolS has negative component(s) (illegal)." #define MSG_BAD_PBAR "pbar has zero component(s) (illegal)." #define MSG_BAD_PLIST "plist has negative component(s) (illegal)." #define MSG_BAD_NS "NS <= 0 illegal." #define MSG_NULL_YYS0 "yyS0 = NULL illegal." #define MSG_NULL_YPS0 "ypS0 = NULL illegal." #define MSG_BAD_ISM "Illegal value for ism. Legal values are: IDA_SIMULTANEOUS and IDA_STAGGERED." #define MSG_BAD_IS "Illegal value for is." #define MSG_NULL_DKYA "dkyA = NULL illegal." #define MSG_BAD_DQTYPE "Illegal value for DQtype. Legal values are: IDA_CENTERED and IDA_FORWARD." #define MSG_BAD_DQRHO "DQrhomax < 0 illegal." #define MSG_NULL_ABSTOLQS "abstolQS = NULL illegal parameter." #define MSG_BAD_RELTOLQS "reltolQS < 0 illegal parameter." #define MSG_BAD_ABSTOLQS "abstolQS has negative component(s) (illegal)." #define MSG_NO_QUADSENSI "Forward sensitivity analysis for quadrature variables was not activated." #define MSG_NULL_YQS0 "yQS0 = NULL illegal parameter." /* IDACalcIC error messages */ #define MSG_IC_BAD_ICOPT "icopt has an illegal value." #define MSG_IC_BAD_MAXBACKS "maxbacks <= 0 illegal." #define MSG_IC_MISSING_ID "id = NULL conflicts with icopt." #define MSG_IC_TOO_CLOSE "tout1 too close to t0 to attempt initial condition calculation." #define MSG_IC_BAD_ID "id has illegal values." #define MSG_IC_BAD_EWT "Some initial ewt component = 0.0 illegal." #define MSG_IC_RES_NONREC "The residual function failed unrecoverably. " #define MSG_IC_RES_FAIL "The residual function failed at the first call. " #define MSG_IC_SETUP_FAIL "The linear solver setup failed unrecoverably." #define MSG_IC_SOLVE_FAIL "The linear solver solve failed unrecoverably." #define MSG_IC_NO_RECOVERY "The residual routine or the linear setup or solve routine had a recoverable error, but IDACalcIC was unable to recover." #define MSG_IC_FAIL_CONSTR "Unable to satisfy the inequality constraints." #define MSG_IC_FAILED_LINS "The linesearch algorithm failed: step too small or too many backtracks." #define MSG_IC_CONV_FAILED "Newton/Linesearch algorithm failed to converge." /* IDASolve error messages */ #define MSG_YRET_NULL "yret = NULL illegal." #define MSG_YPRET_NULL "ypret = NULL illegal." #define MSG_TRET_NULL "tret = NULL illegal." #define MSG_BAD_ITASK "itask has an illegal value." #define MSG_TOO_CLOSE "tout too close to t0 to start integration." #define MSG_BAD_HINIT "Initial step is not towards tout." #define MSG_BAD_TSTOP "The value " MSG_TIME_TSTOP " is behind current " MSG_TIME "in the direction of integration." #define MSG_CLOSE_ROOTS "Root found at and very near " MSG_TIME "." #define MSG_MAX_STEPS "At " MSG_TIME ", mxstep steps taken before reaching tout." #define MSG_EWT_NOW_FAIL "At " MSG_TIME "the user-provide EwtSet function failed." #define MSG_EWT_NOW_BAD "At " MSG_TIME "some ewt component has become <= 0.0." #define MSG_TOO_MUCH_ACC "At " MSG_TIME "too much accuracy requested." #define MSG_BAD_T "Illegal value for t. " MSG_TIME_INT #define MSG_BAD_TOUT "Trouble interpolating at " MSG_TIME_TOUT ". tout too far back in direction of integration." #define MSG_BAD_K "Illegal value for k." #define MSG_NULL_DKY "dky = NULL illegal." #define MSG_NULL_DKYP "dkyp = NULL illegal." #define MSG_ERR_FAILS "At " MSG_TIME_H "the error test failed repeatedly or with |h| = hmin." #define MSG_CONV_FAILS "At " MSG_TIME_H "the corrector convergence failed repeatedly or with |h| = hmin." #define MSG_SETUP_FAILED "At " MSG_TIME "the linear solver setup failed unrecoverably." #define MSG_SOLVE_FAILED "At " MSG_TIME "the linear solver solve failed unrecoverably." #define MSG_REP_RES_ERR "At " MSG_TIME "repeated recoverable residual errors." #define MSG_RES_NONRECOV "At " MSG_TIME "the residual function failed unrecoverably." #define MSG_FAILED_CONSTR "At " MSG_TIME "unable to satisfy inequality constraints." #define MSG_RTFUNC_FAILED "At " MSG_TIME ", the rootfinding routine failed in an unrecoverable manner." #define MSG_NO_ROOT "Rootfinding was not initialized." #define MSG_INACTIVE_ROOTS "At the end of the first step, there are still some root functions identically 0. This warning will not be issued again." #define MSG_NLS_INPUT_NULL "At " MSG_TIME ", the nonlinear solver was passed a NULL input." #define MSG_NLS_SETUP_FAILED "At " MSG_TIME ", the nonlinear solver setup failed unrecoverably." #define MSG_NLS_FAIL "At " MSG_TIME ", the nonlinear solver failed in an unrecoverable manner." #define MSG_EWTQ_NOW_BAD "At " MSG_TIME ", a component of ewtQ has become <= 0." #define MSG_QRHSFUNC_FAILED "At " MSG_TIME ", the quadrature right-hand side routine failed in an unrecoverable manner." #define MSG_QRHSFUNC_UNREC "At " MSG_TIME ", the quadrature right-hand side failed in a recoverable manner, but no recovery is possible." #define MSG_QRHSFUNC_REPTD "At " MSG_TIME "repeated recoverable quadrature right-hand side function errors." #define MSG_QRHSFUNC_FIRST "The quadrature right-hand side routine failed at the first call." #define MSG_NULL_P "p = NULL when using internal DQ for sensitivity residual is illegal." #define MSG_EWTS_NOW_BAD "At " MSG_TIME ", a component of ewtS has become <= 0." #define MSG_SRES_FAILED "At " MSG_TIME ", the sensitivity residual routine failed in an unrecoverable manner." #define MSG_SRES_UNREC "At " MSG_TIME ", the sensitivity residual failed in a recoverable manner, but no recovery is possible." #define MSG_SRES_REPTD "At " MSG_TIME "repeated recoverable sensitivity residual function errors." #define MSG_NO_TOLQS "No integration tolerances for quadrature sensitivity variables have been specified." #define MSG_NULL_RHSQ "IDAS is expected to use DQ to evaluate the RHS of quad. sensi., but quadratures were not initialized." #define MSG_BAD_EWTQS "Initial ewtQS has component(s) equal to zero (illegal)." #define MSG_EWTQS_NOW_BAD "At " MSG_TIME ", a component of ewtQS has become <= 0." #define MSG_QSRHSFUNC_FAILED "At " MSG_TIME ", the sensitivity quadrature right-hand side routine failed in an unrecoverable manner." #define MSG_QSRHSFUNC_REPTD "At " MSG_TIME "repeated recoverable sensitivity quadrature right-hand side function errors." #define MSG_QSRHSFUNC_FIRST "The quadrature right-hand side routine failed at the first call." /* IDASet* / IDAGet* error messages */ #define MSG_NEG_MAXORD "maxord<=0 illegal." #define MSG_BAD_MAXORD "Illegal attempt to increase maximum order." #define MSG_NEG_HMAX "hmax < 0 illegal." #define MSG_NEG_EPCON "epcon <= 0.0 illegal." #define MSG_BAD_CONSTR "Illegal values in constraints vector." #define MSG_BAD_EPICCON "epiccon <= 0.0 illegal." #define MSG_BAD_MAXNH "maxnh <= 0 illegal." #define MSG_BAD_MAXNJ "maxnj <= 0 illegal." #define MSG_BAD_MAXNIT "maxnit <= 0 illegal." #define MSG_BAD_STEPTOL "steptol <= 0.0 illegal." #define MSG_TOO_LATE "IDAGetConsistentIC can only be called before IDASolve." /* * ================================================================= * I D A A E R R O R M E S S A G E S * ================================================================= */ #define MSGAM_NULL_IDAMEM "ida_mem = NULL illegal." #define MSGAM_NO_ADJ "Illegal attempt to call before calling IDAadjInit." #define MSGAM_BAD_INTERP "Illegal value for interp." #define MSGAM_BAD_STEPS "Steps nonpositive illegal." #define MSGAM_BAD_WHICH "Illegal value for which." #define MSGAM_NO_BCK "No backward problems have been defined yet." #define MSGAM_NO_FWD "Illegal attempt to call before calling IDASolveF." #define MSGAM_BAD_TB0 "The initial time tB0 is outside the interval over which the forward problem was solved." #define MSGAM_BAD_SENSI "At least one backward problem requires sensitivities, but they were not stored for interpolation." #define MSGAM_BAD_ITASKB "Illegal value for itaskB. Legal values are IDA_NORMAL and IDA_ONE_STEP." #define MSGAM_BAD_TBOUT "The final time tBout is outside the interval over which the forward problem was solved." #define MSGAM_BACK_ERROR "Error occured while integrating backward problem # %d" #define MSGAM_BAD_TINTERP "Bad t = %g for interpolation." #define MSGAM_BAD_T "Bad t for interpolation." #define MSGAM_WRONG_INTERP "This function cannot be called for the specified interp type." #define MSGAM_MEM_FAIL "A memory request failed." #define MSGAM_NO_INITBS "Illegal attempt to call before calling IDAInitBS." #ifdef __cplusplus } #endif #endif
utf-8
1
unknown
unknown
gcl-2.6.12/gmp4/mpn/generic/mul_1.c
/* mpn_mul_1 -- Multiply a limb vector with a single limb and store the product in a second limb vector. Copyright 1991-1994, 1996, 2000-2002 Free Software Foundation, Inc. This file is part of the GNU MP Library. The GNU MP Library is free software; you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. The GNU MP Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with the GNU MP Library. If not, see https://www.gnu.org/licenses/. */ #include "gmp.h" #include "gmp-impl.h" #include "longlong.h" #if GMP_NAIL_BITS == 0 mp_limb_t mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) { mp_limb_t ul, cl, hpl, lpl; ASSERT (n >= 1); ASSERT (MPN_SAME_OR_INCR_P (rp, up, n)); cl = 0; do { ul = *up++; umul_ppmm (hpl, lpl, ul, vl); lpl += cl; cl = (lpl < cl) + hpl; *rp++ = lpl; } while (--n != 0); return cl; } #endif #if GMP_NAIL_BITS >= 1 mp_limb_t mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl) { mp_limb_t shifted_vl, ul, lpl, hpl, prev_hpl, xw, cl, xl; ASSERT (n >= 1); ASSERT (MPN_SAME_OR_INCR_P (rp, up, n)); ASSERT_MPN (up, n); ASSERT_LIMB (vl); shifted_vl = vl << GMP_NAIL_BITS; cl = 0; prev_hpl = 0; do { ul = *up++; umul_ppmm (hpl, lpl, ul, shifted_vl); lpl >>= GMP_NAIL_BITS; xw = prev_hpl + lpl + cl; cl = xw >> GMP_NUMB_BITS; xl = xw & GMP_NUMB_MASK; *rp++ = xl; prev_hpl = hpl; } while (--n != 0); return prev_hpl + cl; } #endif
utf-8
1
unknown
unknown
firefox-esr-91.6.0esr/nsprpub/pr/src/md/unix/solaris.c
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "primpl.h" extern PRBool suspendAllOn; extern PRThread *suspendAllThread; extern void _MD_SET_PRIORITY(_MDThread *md, PRThreadPriority newPri); PRIntervalTime _MD_Solaris_TicksPerSecond(void) { /* * Ticks have a 10-microsecond resolution. So there are * 100000 ticks per second. */ return 100000UL; } /* Interval timers, implemented using gethrtime() */ PRIntervalTime _MD_Solaris_GetInterval(void) { union { hrtime_t hrt; /* hrtime_t is a 64-bit (long long) integer */ PRInt64 pr64; } time; PRInt64 resolution; PRIntervalTime ticks; time.hrt = gethrtime(); /* in nanoseconds */ /* * Convert from nanoseconds to ticks. A tick's resolution is * 10 microseconds, or 10000 nanoseconds. */ LL_I2L(resolution, 10000); LL_DIV(time.pr64, time.pr64, resolution); LL_L2UI(ticks, time.pr64); return ticks; } #ifdef _PR_PTHREADS void _MD_EarlyInit(void) { } PRWord *_MD_HomeGCRegisters(PRThread *t, PRIntn isCurrent, PRIntn *np) { *np = 0; return NULL; } #endif /* _PR_PTHREADS */ #if defined(_PR_LOCAL_THREADS_ONLY) void _MD_EarlyInit(void) { } void _MD_SolarisInit() { _PR_UnixInit(); } void _MD_SET_PRIORITY(_MDThread *thread, PRThreadPriority newPri) { return; } PRStatus _MD_InitializeThread(PRThread *thread) { return PR_SUCCESS; } PRStatus _MD_WAIT(PRThread *thread, PRIntervalTime ticks) { PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE)); _PR_MD_SWITCH_CONTEXT(thread); return PR_SUCCESS; } PRStatus _MD_WAKEUP_WAITER(PRThread *thread) { PR_ASSERT((thread == NULL) || (!(thread->flags & _PR_GLOBAL_SCOPE))); return PR_SUCCESS; } /* These functions should not be called for Solaris */ void _MD_YIELD(void) { PR_NOT_REACHED("_MD_YIELD should not be called for Solaris"); } PRStatus _MD_CREATE_THREAD( PRThread *thread, void (*start) (void *), PRThreadPriority priority, PRThreadScope scope, PRThreadState state, PRUint32 stackSize) { PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Solaris"); return(PR_FAILURE); } #ifdef USE_SETJMP PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) { if (isCurrent) { (void) setjmp(CONTEXT(t)); } *np = sizeof(CONTEXT(t)) / sizeof(PRWord); return (PRWord *) CONTEXT(t); } #else PRWord *_MD_HomeGCRegisters(PRThread *t, PRIntn isCurrent, PRIntn *np) { if (isCurrent) { (void) getcontext(CONTEXT(t)); } *np = NGREG; return (PRWord*) &t->md.context.uc_mcontext.gregs[0]; } #endif /* USE_SETJMP */ #endif /* _PR_LOCAL_THREADS_ONLY */ #ifndef _PR_PTHREADS #if defined(i386) && defined(SOLARIS2_4) /* * Because clock_gettime() on Solaris/x86 2.4 always generates a * segmentation fault, we use an emulated version _pr_solx86_clock_gettime(), * which is implemented using gettimeofday(). */ int _pr_solx86_clock_gettime(clockid_t clock_id, struct timespec *tp) { struct timeval tv; if (clock_id != CLOCK_REALTIME) { errno = EINVAL; return -1; } gettimeofday(&tv, NULL); tp->tv_sec = tv.tv_sec; tp->tv_nsec = tv.tv_usec * 1000; return 0; } #endif /* i386 && SOLARIS2_4 */ #endif /* _PR_PTHREADS */
utf-8
1
unknown
unknown
kproperty-3.2.0/src/KPropertyLineStyleItemDelegate_p.cpp
/* This file is part of the KDE project * Copyright (C) 2007 Jan Hambrecht <jaham@gmx.net> * Copyright (C) 2015 Jarosław Staniek <staniek@kde.org> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "KPropertyLineStyleItemDelegate_p.h" #include "KPropertyCoreUtils_p.h" #include "KPropertyUtils_p.h" #include <QPen> KPropertyLineStyleItemDelegate::KPropertyLineStyleItemDelegate(QObject * parent) : QAbstractItemDelegate(parent) { } KPropertyLineStyleItemDelegate::~KPropertyLineStyleItemDelegate() { } class PenStyleData : public QHash<Qt::PenStyle, QString> { public: PenStyleData() { insert(Qt::NoPen, QObject::tr("None", "No Line")); insert(Qt::SolidLine, QObject::tr("Solid Line")); insert(Qt::DashLine, QObject::tr("Dash Line")); insert(Qt::DotLine, QObject::tr("Dot Line")); insert(Qt::DashDotLine, QObject::tr("Dash-Dot Line")); insert(Qt::DashDotDotLine, QObject::tr("Dash-Dot-Dot Line")); insert(Qt::CustomDashLine, QObject::tr("Custom Dash Line")); } }; Q_GLOBAL_STATIC(PenStyleData, g_penStyleData) //static QString KPropertyLineStyleItemDelegate::styleName(Qt::PenStyle style, const QLocale &locale) { if (locale.language() == QLocale::C) { return KPropertyUtils::keyForEnumValue("PenStyle", style); } return g_penStyleData->value(style); } //static void KPropertyLineStyleItemDelegate::paintItem(QPainter *painter, const QPen &pen_, const QRect &rect, const QStyleOption &option) { const KPropertyUtilsPrivate::PainterSaver saver(painter); QPen pen(pen_); pen.setBrush(option.state & QStyle::State_Selected ? option.palette.highlightedText() : option.palette.text()); if (pen.style() == Qt::NoPen) { pen.setWidth(0); pen.setStyle(Qt::SolidLine); painter->setPen(pen); painter->drawText(rect, Qt::AlignLeft | Qt::AlignVCenter, g_penStyleData->value(Qt::NoPen)); } else { pen.setWidth(3); painter->setPen(pen); painter->drawLine(rect.left(), rect.center().y(), rect.right(), rect.center().y()); } } void KPropertyLineStyleItemDelegate::paint(QPainter *painter, const QStyleOptionViewItem &option, const QModelIndex &index) const { const KPropertyUtilsPrivate::PainterSaver saver(painter); if (option.state & QStyle::State_Selected) { painter->fillRect(option.rect, option.palette.highlight()); } QPen pen = index.data(Qt::DecorationRole).value<QPen>(); paintItem(painter, pen, option.rect, option); } QSize KPropertyLineStyleItemDelegate::sizeHint(const QStyleOptionViewItem &option, const QModelIndex &index) const { Q_UNUSED(option); Q_UNUSED(index); return QSize(100, 15); }
utf-8
1
LGPL-2+
1998-1999, Reginald Stadlbauer <reggie@kde.org> 1998-1999, Torben Weis <weis@kde.org> 2001, David Faure <faure@kde.org> 2004, Alexander Dymo <cloudtemple@mskat.net> 2004-2005, Cedric Pasteur <cedric.pasteur@free.fr> 2004, Nicolas GOUTTE <goutte@kde.org> 2004-2017, Jarosław Staniek <staniek@kde.org> 2007, Jan Hambrecht <jaham@gmx.net> 2010, Adam Pigg <adam@piggz.co.uk> 2010, Thomas Zander <zander@kde.org> 2012, Friedrich W. H. Kossebau <kossebau@kde.org> 2015, Laurent Montel <montel@kde.org>
libreoffice-7.3.1~rc1/drawinglayer/source/primitive2d/pointarrayprimitive2d.cxx
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* * This file is part of the LibreOffice project. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * This file incorporates work covered by the following license notice: * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 . */ #include <drawinglayer/primitive2d/pointarrayprimitive2d.hxx> #include <drawinglayer/primitive2d/drawinglayer_primitivetypes2d.hxx> using namespace com::sun::star; namespace drawinglayer::primitive2d { PointArrayPrimitive2D::PointArrayPrimitive2D( std::vector< basegfx::B2DPoint >&& rPositions, const basegfx::BColor& rRGBColor) : maPositions(std::move(rPositions)), maRGBColor(rRGBColor) { } bool PointArrayPrimitive2D::operator==(const BasePrimitive2D& rPrimitive) const { if(BasePrimitive2D::operator==(rPrimitive)) { const PointArrayPrimitive2D& rCompare = static_cast<const PointArrayPrimitive2D&>(rPrimitive); return (getPositions() == rCompare.getPositions() && getRGBColor() == rCompare.getRGBColor()); } return false; } basegfx::B2DRange PointArrayPrimitive2D::getB2DRange(const geometry::ViewInformation2D& /*rViewInformation*/) const { if(maB2DRange.isEmpty()) { basegfx::B2DRange aNewRange; // get the basic range from the position vector for (auto const& pos : getPositions()) { aNewRange.expand(pos); } // assign to buffered value const_cast< PointArrayPrimitive2D* >(this)->maB2DRange = aNewRange; } return maB2DRange; } // provide unique ID sal_uInt32 PointArrayPrimitive2D::getPrimitive2DID() const { return PRIMITIVE2D_ID_POINTARRAYPRIMITIVE2D; } } // end of namespace /* vim:set shiftwidth=4 softtabstop=4 expandtab: */
utf-8
1
MPL-2.0
Copyright 2000, 2010 Oracle and/or its affiliates. Copyright (c) 2000, 2010 LibreOffice contributors and/or their affiliates.
wireshark-3.6.2/epan/dissectors/packet-ipsec-tcp.c
/* * Routines for the disassembly of the proprietary Cisco IPSEC in * TCP encapsulation protocol * * Copyright 2007 Joerg Mayer (see AUTHORS file) * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * SPDX-License-Identifier: GPL-2.0-or-later */ /* TODO: * - Find out the meaning of the (unknown) trailer * - UDP checksum is wrong * - Currently doesn't handle AH (lack of sample trace) */ #include "config.h" #include <epan/packet.h> #include "packet-ndmp.h" void proto_register_tcpencap(void); void proto_reg_handoff_tcpencap(void); static int hf_tcpencap_unknown = -1; static int hf_tcpencap_zero = -1; static int hf_tcpencap_seq = -1; static int hf_tcpencap_ike_direction = -1; static int hf_tcpencap_esp_zero = -1; static int hf_tcpencap_magic = -1; static int hf_tcpencap_proto = -1; static int hf_tcpencap_magic2 = -1; static int proto_tcpencap = -1; static gint ett_tcpencap = -1; static gint ett_tcpencap_unknown = -1; static const value_string tcpencap_ikedir_vals[] = { { 0x0000, "Server to client" }, { 0x4000, "Client to server" }, { 0, NULL } }; static const value_string tcpencap_proto_vals[] = { { 0x11, "ISAKMP" }, { 0x32, "ESP" }, { 0, NULL } }; #define TRAILERLENGTH 16 #define TCP_CISCO_IPSEC 10000 static dissector_handle_t esp_handle; static dissector_handle_t udp_handle; #define TCP_ENCAP_P_ESP 1 #define TCP_ENCAP_P_UDP 2 static int packet_is_tcpencap(tvbuff_t *tvb, packet_info *pinfo, guint32 offset) { if ( /* Must be zero */ tvb_get_ntohl(tvb, offset + 0) != 0 || /* Lower 12 bits must be zero */ (tvb_get_ntohs(tvb, offset + 6) & 0xfff) != 0 || /* Protocol must be UDP or ESP */ (tvb_get_guint8(tvb, offset + 13) != 17 && tvb_get_guint8(tvb, offset + 13) != 50) ) { return FALSE; } if(check_if_ndmp(tvb, pinfo)){ return FALSE; } return TRUE; } /* * TCP Encapsulation of IPsec Packets * as supported by the cisco vpn3000 concentrator series */ static int dissect_tcpencap(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_) { proto_tree *tcpencap_tree = NULL; proto_tree *tcpencap_unknown_tree = NULL; proto_item *tree_item = NULL; proto_item *unknown_item = NULL; tvbuff_t *next_tvb; guint32 reported_length = tvb_reported_length(tvb); guint32 offset; guint8 protocol; col_set_str(pinfo->cinfo, COL_PROTOCOL, "TCPENCAP"); col_clear(pinfo->cinfo, COL_INFO); /* If the first 4 bytes are 0x01f401f4 (udp src and dst port = 500) we most likely have UDP (isakmp) traffic */ if (tvb_get_ntohl(tvb, 0) == 0x01f401f4) { /* UDP means ISAKMP */ protocol = TCP_ENCAP_P_UDP; } else { /* Hopefully ESP */ protocol = TCP_ENCAP_P_ESP; } if (tree) { tree_item = proto_tree_add_item(tree, proto_tcpencap, tvb, 0, -1, ENC_NA); tcpencap_tree = proto_item_add_subtree(tree_item, ett_tcpencap); /* Dissect the trailer following the encapsulated IPSEC/ISAKMP packet */ offset = reported_length - TRAILERLENGTH; unknown_item = proto_tree_add_item(tcpencap_tree, hf_tcpencap_unknown, tvb, offset, TRAILERLENGTH, ENC_NA); /* Try to guess the contents of the trailer */ tcpencap_unknown_tree = proto_item_add_subtree(unknown_item, ett_tcpencap_unknown); proto_tree_add_item(tcpencap_unknown_tree, hf_tcpencap_zero, tvb, offset + 0, 4, ENC_NA); proto_tree_add_item(tcpencap_unknown_tree, hf_tcpencap_seq, tvb, offset + 4, 2, ENC_BIG_ENDIAN); if (protocol == TCP_ENCAP_P_UDP) { proto_tree_add_item(tcpencap_unknown_tree, hf_tcpencap_ike_direction, tvb, offset + 6, 2, ENC_BIG_ENDIAN); } else { proto_tree_add_item(tcpencap_unknown_tree, hf_tcpencap_esp_zero, tvb, offset + 6, 2, ENC_BIG_ENDIAN); } proto_tree_add_item(tcpencap_unknown_tree, hf_tcpencap_magic, tvb, offset + 8, 5, ENC_NA); proto_tree_add_item(tcpencap_unknown_tree, hf_tcpencap_proto, tvb, offset + 13, 1, ENC_BIG_ENDIAN); proto_tree_add_item(tcpencap_unknown_tree, hf_tcpencap_magic2, tvb, offset + 14, 2, ENC_NA); } /* Create the tvbuffer for the next dissector */ next_tvb = tvb_new_subset_length_caplen(tvb, 0, reported_length - TRAILERLENGTH , -1); if (protocol == TCP_ENCAP_P_UDP) { call_dissector(udp_handle, next_tvb, pinfo, tree); } else { /* Hopefully ESP */ call_dissector(esp_handle, next_tvb, pinfo, tree); } return tvb_captured_length(tvb); } static gboolean dissect_tcpencap_heur(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data) { guint32 reported_length = tvb_reported_length(tvb); guint32 captured_length = tvb_captured_length(tvb); if (reported_length <= TRAILERLENGTH + 8 || /* Ensure we have enough bytes for packet_is_tcpencap analysis */ (reported_length - captured_length) > (TRAILERLENGTH - 13) || !packet_is_tcpencap(tvb, pinfo, reported_length - TRAILERLENGTH) ) { return FALSE; } dissect_tcpencap(tvb, pinfo, tree, data); return TRUE; } void proto_register_tcpencap(void) { static hf_register_info hf[] = { { &hf_tcpencap_unknown, { "Unknown trailer", "tcpencap.unknown", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, { &hf_tcpencap_zero, { "All zero", "tcpencap.zero", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, { &hf_tcpencap_seq, { "Sequence number", "tcpencap.seq", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, { &hf_tcpencap_esp_zero, { "ESP zero", "tcpencap.espzero", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }}, { &hf_tcpencap_ike_direction, { "ISAKMP traffic direction", "tcpencap.ikedirection", FT_UINT16, BASE_HEX, VALS(tcpencap_ikedir_vals), 0x0, NULL, HFILL }}, { &hf_tcpencap_magic, { "Magic number", "tcpencap.magic", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, { &hf_tcpencap_proto, { "Protocol", "tcpencap.proto", FT_UINT8, BASE_HEX, VALS(tcpencap_proto_vals), 0x0, NULL, HFILL }}, { &hf_tcpencap_magic2, { "Magic 2", "tcpencap.magic2", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, }; static gint *ett[] = { &ett_tcpencap, &ett_tcpencap_unknown, }; proto_tcpencap = proto_register_protocol("TCP Encapsulation of IPsec Packets", "TCPENCAP", "tcpencap"); proto_register_field_array(proto_tcpencap, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); } void proto_reg_handoff_tcpencap(void) { dissector_handle_t tcpencap_handle; tcpencap_handle = create_dissector_handle(dissect_tcpencap, proto_tcpencap); esp_handle = find_dissector_add_dependency("esp", proto_tcpencap); udp_handle = find_dissector_add_dependency("udp", proto_tcpencap); heur_dissector_add("tcp", dissect_tcpencap_heur, "TCP Encapsulation of IPsec Packets", "ipsec_tcp", proto_tcpencap, HEURISTIC_ENABLE); /* Register TCP port for dissection */ dissector_add_for_decode_as_with_preference("tcp.port", tcpencap_handle); } /* * Editor modelines - https://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: * * vi: set shiftwidth=8 tabstop=8 noexpandtab: * :indentSize=8:tabSize=8:noTabs=false: */
utf-8
1
GPL-2+
Gerald Combs <gerald@wireshark.org> and contributors
qepcad-1.74+ds/extensions/sfext/pcadst/CCADCON.c
/*====================================================================== CCADCON(n,P,C;Ps,Cs) Coarse CAD construction. Inputs n : The dimension of the CAD. P : The qepcad data structure for the projection factors. C : The partial CAD. Outputs Ps : The qepcad data structure for projection factors, for the pruned projection factors. Cs : The pruned partial CAD. ======================================================================*/ #include "qepcad.h" #include "coarsecad.h" void CCADCON(Word n, Word P, Word C, Word *Ps_, Word *Cs_) { Word dummy; CCADCONEXT(n,P,C,Ps_,Cs_,&dummy); }
utf-8
1
ISC
1990, 2008 Hoon Hong & Chris Brown <wcbrown@usna.edu>
liboqs-0.7.1/src/kem/classic_mceliece/pqclean_mceliece348864f_avx/vec.c
#include "vec.h" #include "params.h" extern void PQCLEAN_MCELIECE348864F_AVX_vec_mul_asm(uint64_t *, const uint64_t *, const uint64_t *); extern void PQCLEAN_MCELIECE348864F_AVX_vec_mul_sp_asm(uint64_t *, const uint64_t *, const uint64_t *); void PQCLEAN_MCELIECE348864F_AVX_vec_mul(uint64_t *h, const uint64_t *f, const uint64_t *g) { PQCLEAN_MCELIECE348864F_AVX_vec_mul_asm(h, f, g); } void PQCLEAN_MCELIECE348864F_AVX_vec_mul_sp(uint64_t *h, const uint64_t *f, const uint64_t *g) { PQCLEAN_MCELIECE348864F_AVX_vec_mul_sp_asm(h, f, g); } void PQCLEAN_MCELIECE348864F_AVX_vec_add(uint64_t *h, const uint64_t *f, const uint64_t *g) { int b; for (b = 0; b < GFBITS; b++) { h[b] = f[b] ^ g[b]; } }
utf-8
1
Expat
2016-2021, Open Quantum Safe project
llvm-toolchain-12-12.0.1/libcxx/src/thread.cpp
//===------------------------- thread.cpp----------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "__config" #ifndef _LIBCPP_HAS_NO_THREADS #include "thread" #include "exception" #include "vector" #include "future" #include "limits" #if __has_include(<unistd.h>) # include <unistd.h> // for sysconf #endif #if defined(__NetBSD__) #pragma weak pthread_create // Do not create libpthread dependency #endif #if defined(_LIBCPP_WIN32API) #include <windows.h> #endif #if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB) #pragma comment(lib, "pthread") #endif _LIBCPP_BEGIN_NAMESPACE_STD thread::~thread() { if (!__libcpp_thread_isnull(&__t_)) terminate(); } void thread::join() { int ec = EINVAL; if (!__libcpp_thread_isnull(&__t_)) { ec = __libcpp_thread_join(&__t_); if (ec == 0) __t_ = _LIBCPP_NULL_THREAD; } if (ec) __throw_system_error(ec, "thread::join failed"); } void thread::detach() { int ec = EINVAL; if (!__libcpp_thread_isnull(&__t_)) { ec = __libcpp_thread_detach(&__t_); if (ec == 0) __t_ = _LIBCPP_NULL_THREAD; } if (ec) __throw_system_error(ec, "thread::detach failed"); } unsigned thread::hardware_concurrency() _NOEXCEPT { #if defined(_SC_NPROCESSORS_ONLN) long result = sysconf(_SC_NPROCESSORS_ONLN); // sysconf returns -1 if the name is invalid, the option does not exist or // does not have a definite limit. // if sysconf returns some other negative number, we have no idea // what is going on. Default to something safe. if (result < 0) return 0; return static_cast<unsigned>(result); #elif defined(_LIBCPP_WIN32API) SYSTEM_INFO info; GetSystemInfo(&info); return info.dwNumberOfProcessors; #else // defined(CTL_HW) && defined(HW_NCPU) // TODO: grovel through /proc or check cpuid on x86 and similar // instructions on other architectures. # if defined(_LIBCPP_WARNING) _LIBCPP_WARNING("hardware_concurrency not yet implemented") # else # warning hardware_concurrency not yet implemented # endif return 0; // Means not computable [thread.thread.static] #endif // defined(CTL_HW) && defined(HW_NCPU) } namespace this_thread { void sleep_for(const chrono::nanoseconds& ns) { if (ns > chrono::nanoseconds::zero()) { __libcpp_thread_sleep_for(ns); } } } // this_thread __thread_specific_ptr<__thread_struct>& __thread_local_data() { static __thread_specific_ptr<__thread_struct> __p; return __p; } // __thread_struct_imp template <class T> class _LIBCPP_HIDDEN __hidden_allocator { public: typedef T value_type; T* allocate(size_t __n) {return static_cast<T*>(::operator new(__n * sizeof(T)));} void deallocate(T* __p, size_t) {::operator delete(static_cast<void*>(__p));} size_t max_size() const {return size_t(~0) / sizeof(T);} }; class _LIBCPP_HIDDEN __thread_struct_imp { typedef vector<__assoc_sub_state*, __hidden_allocator<__assoc_sub_state*> > _AsyncStates; typedef vector<pair<condition_variable*, mutex*>, __hidden_allocator<pair<condition_variable*, mutex*> > > _Notify; _AsyncStates async_states_; _Notify notify_; __thread_struct_imp(const __thread_struct_imp&); __thread_struct_imp& operator=(const __thread_struct_imp&); public: __thread_struct_imp() {} ~__thread_struct_imp(); void notify_all_at_thread_exit(condition_variable* cv, mutex* m); void __make_ready_at_thread_exit(__assoc_sub_state* __s); }; __thread_struct_imp::~__thread_struct_imp() { for (_Notify::iterator i = notify_.begin(), e = notify_.end(); i != e; ++i) { i->second->unlock(); i->first->notify_all(); } for (_AsyncStates::iterator i = async_states_.begin(), e = async_states_.end(); i != e; ++i) { (*i)->__make_ready(); (*i)->__release_shared(); } } void __thread_struct_imp::notify_all_at_thread_exit(condition_variable* cv, mutex* m) { notify_.push_back(pair<condition_variable*, mutex*>(cv, m)); } void __thread_struct_imp::__make_ready_at_thread_exit(__assoc_sub_state* __s) { async_states_.push_back(__s); __s->__add_shared(); } // __thread_struct __thread_struct::__thread_struct() : __p_(new __thread_struct_imp) { } __thread_struct::~__thread_struct() { delete __p_; } void __thread_struct::notify_all_at_thread_exit(condition_variable* cv, mutex* m) { __p_->notify_all_at_thread_exit(cv, m); } void __thread_struct::__make_ready_at_thread_exit(__assoc_sub_state* __s) { __p_->__make_ready_at_thread_exit(__s); } _LIBCPP_END_NAMESPACE_STD #endif // !_LIBCPP_HAS_NO_THREADS
utf-8
1
unknown
unknown
codelite-14.0+dfsg/wxcrafter/myxh_searchctrl.h
///////////////////////////////////////////////////////////////////////////// // Name: wx/xrc/xh_srchctl.h // Purpose: XRC resource handler for wxSearchCtrl // Author: Sander Berents // Created: 2007/07/12 // RCS-ID: $Id: xh_srchctrl.h 48140 2007-08-16 21:10:14Z VZ $ // Copyright: (c) 2007 Sander Berents // Licence: wxWindows licence ///////////////////////////////////////////////////////////////////////////// #ifndef _WX_MYXH_SRCH_H_ #define _WX_MYXH_SRCH_H_ #include <wx/xrc/xmlres.h> #if wxUSE_XRC && wxUSE_SEARCHCTRL class MyWxSearchCtrlXmlHandler : public wxXmlResourceHandler { public: MyWxSearchCtrlXmlHandler(); virtual wxObject* DoCreateResource(); virtual bool CanHandle(wxXmlNode* node); }; #endif // wxUSE_XRC && wxUSE_SEARCHCTRL #endif // _WX_XH_SRCH_H_
utf-8
1
CodeLite
2007-2014 Eran Ifrah <eran.ifrah@gmail.com> 2011-2014 David Hart <david@codelite.co.uk> 2014-2016 The CodeLite Team
zimg-3.0.3+ds1/src/zimg/depth/arm/dither_arm.h
#pragma once #ifdef ZIMG_ARM #ifndef ZIMG_DEPTH_ARM_DITHER_ARM_H_ #define ZIMG_DEPTH_ARM_DITHER_ARM_H_ #include <memory> #include "depth/dither.h" namespace zimg { namespace graph { class ImageFilter; } // namespace graph namespace depth { #define DECLARE_ORDERED_DITHER(x, cpu) \ void ordered_dither_##x##_##cpu(const float *dither, unsigned dither_offset, unsigned dither_mask, \ const void *src, void *dst, float scale, float offset, unsigned bits, unsigned left, unsigned right) DECLARE_ORDERED_DITHER(b2b, neon); DECLARE_ORDERED_DITHER(b2w, neon); DECLARE_ORDERED_DITHER(w2b, neon); DECLARE_ORDERED_DITHER(w2w, neon); DECLARE_ORDERED_DITHER(h2b, neon); DECLARE_ORDERED_DITHER(h2w, neon); DECLARE_ORDERED_DITHER(f2b, neon); DECLARE_ORDERED_DITHER(f2w, neon); #undef DECLARE_ORDERED_DITHER dither_convert_func select_ordered_dither_func_arm(const PixelFormat &pixel_in, const PixelFormat &pixel_out, CPUClass cpu); dither_f16c_func select_dither_f16c_func_arm(CPUClass cpu); bool needs_dither_f16c_func_arm(CPUClass cpu); } // namespace depth } // namespace zimg #endif // ZIMG_DEPTH_ARM_DITHER_ARM_H_ #endif // ZIMG_ARM
utf-8
1
WTFPL-2
2014-2020, sekrit-twc
scipy-1.7.3/scipy/_lib/boost/boost/smart_ptr/owner_equal_to.hpp
#ifndef BOOST_SMART_PTR_OWNER_EQUAL_TO_HPP_INCLUDED #define BOOST_SMART_PTR_OWNER_EQUAL_TO_HPP_INCLUDED // Copyright 2020 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/config.hpp> namespace boost { template<class T = void> struct owner_equal_to { typedef bool result_type; typedef T first_argument_type; typedef T second_argument_type; template<class U, class V> bool operator()( U const & u, V const & v ) const BOOST_NOEXCEPT { return u.owner_equals( v ); } }; } // namespace boost #endif // #ifndef BOOST_SMART_PTR_OWNER_EQUAL_TO_HPP_INCLUDED
utf-8
1
BSD-3-clause
2003-2019 SciPy Developers 1999-2005 Travis Oliphant 2001-2002 Enthought, Inc. 2002 Eric Jones 2002 Patrick J. Miller 2002-2003 Jochen Kuepper 2002-2004 Pearu Peterson 2002-2005 Jean-Sebastien Roy 2003-2005 Peter J. Verveer 2003-2006 Ed Schofield 2003-2012 SciPy Developers. 2004 David M. Cooke 2006 Bart Vandereycken 2006 BasSw 2006 Johannes Loehnert 2007 Andrew D Straw 2007 John Travers, Robert Hetland 2007-2008 Damian Eads 2008 Tiziano Zito Gary Strangman 2010 Pauli Virtanen 2010, 2011 Pim Schellart 2009 Yosef Meller
pcl-1.12.1+dfsg/apps/in_hand_scanner/include/pcl/apps/in_hand_scanner/mesh_processing.h
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2009-2012, Willow Garage, Inc. * Copyright (c) 2012-, Open Perception, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the copyright holder(s) nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id$ * */ #pragma once #include <pcl/apps/in_hand_scanner/common_types.h> namespace pcl { namespace ihs { /** \brief Contains methods that take advantage of the connectivity information in the mesh. * \author Martin Saelzle * \ingroup apps */ class MeshProcessing { public: using Mesh = pcl::ihs::Mesh; using HalfEdgeIndices = Mesh::HalfEdgeIndices; static_assert (Mesh::IsManifold::value, "MeshProcessing currently works only on the manifold mesh."); /** \brief Constructor. */ MeshProcessing (); /** \brief Inserts triangles into jagged boundaries, removes isolated triangles and closes triangular holes. * \param[in,out] mesh The mesh which should be processed. * \param[in] boundary_collection Collection of boundary half-edges. * \param[in] cleanup Calls mesh.cleanup () if true. */ void processBoundary (Mesh& mesh, const std::vector <HalfEdgeIndices>& boundary_collection, const bool cleanup=true) const; }; } // End namespace ihs } // End namespace pcl
utf-8
1
BSD-3-clause
2001, Addison-Wesley 2012, Aitor Aldoma, Federico Tombari 2011, Alexandru-Eugen Ichim 2001, Andrei Alexandrescu 2007-2012, Ares Lagae 2008, Ben Gurion University of the Negev, Beer Sheva, Israel 1999-2007, Brian Paul 2014, Centrum Wiskunde Informatica 2000-2012, Chih-Chung Chang and Chih-Jen Lin 2011, Dirk Holz, University of Bonn 2018, Fizyr BV. - https://fizyr.com 2006, Frederic Heem <frederic.heem@telsey.it> 2010, Gael Guennebaud <gael.guennebaud@inria.fr> 2011, Geoffrey Biggs 2015, Google, Inc 2009, Hauke Heibel <hauke.heibel@gmail.com> 2000-2008, Intel Corporation 2013, Intelligent Robotics Lab, DLUT 2012, Jeremie Papon 2012, Jochen Sprickerhof 2012, KU Leuven 1993-2008, Ken Martin, Will Schroeder, Bill Lorensen 2012, Keven Ring 1997-2002, Makoto Matsumoto and Takuji Nishimura 2003-2010, Mark Borgerding 2013, Martin Szarski 2015, Michael 'v4hn' Goerner 2009-2010, NVIDIA Corporation 2012-2021, Open Perception, Inc 2014, RadiantBlue Technologies, Inc 2011 Suat Gedikli <gedikli@willowgarage.com> 2004, Sylvain Paris and Francois Sillion 2012, Texas A&M University 2011, The Autonomous Systems Lab (ASL), ETH Zurich, Stefan Leutenegger, Simon Lynen and Margarita Chli 2011-2015, The MITRE Corporation 2009-2012, Urban Robotics, Inc 2016, Voxar Labs, CIn-UFPE / DEINFO-UFRPE 2009-2014, Willow Garage, Inc 2012, Yani Ioannou <yani.ioannou@gmail.com> 2001, softSurfer (www.softsurfer.com) 2011, wwww.pointclouds.org 2020, ysuzuki19
cppad-2021.00.00.8/test_more/general/log10.cpp
/* -------------------------------------------------------------------------- CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-17 Bradley M. Bell CppAD is distributed under the terms of the Eclipse Public License Version 2.0. This Source Code may also be made available under the following Secondary License when the conditions for such availability set forth in the Eclipse Public License, Version 2.0 are satisfied: GNU General Public License, Version 2.0 or later. ---------------------------------------------------------------------------- */ /* Old example now used just for validation testing. */ # include <cppad/cppad.hpp> bool log10(void) { bool ok = true; using CppAD::log10; using CppAD::log; using namespace CppAD; double eps99 = 99.0 * std::numeric_limits<double>::epsilon(); // independent variable vector, indices, values, and declaration CPPAD_TESTVECTOR(AD<double>) U(1); size_t s = 0; U[s] = 10.; Independent(U); // dependent variable vector, indices, and values CPPAD_TESTVECTOR(AD<double>) Z(2); size_t x = 0; size_t y = 1; Z[x] = log10(U[s]); Z[y] = log10(Z[x]); // define f : U -> Z and vectors for derivative calculations ADFun<double> f(U, Z); CPPAD_TESTVECTOR(double) v( f.Domain() ); CPPAD_TESTVECTOR(double) w( f.Range() ); // check values ok &= NearEqual(Z[x] , 1., eps99 , eps99); ok &= NearEqual(Z[y] , 0., eps99 , eps99); // forward computation of partials w.r.t. s double l10 = log(10.); v[s] = 1.; w = f.Forward(1, v); ok &= NearEqual(w[x], 1./(U[s]*l10) , eps99 , eps99); // dx/ds ok &= NearEqual(w[y], 1./(U[s]*Z[x]*l10*l10), eps99 , eps99); // dy/ds // reverse computation of partials of y w[x] = 0.; w[y] = 1.; v = f.Reverse(1,w); ok &= NearEqual(v[s], 1./(U[s]*Z[x]*l10*l10), eps99 , eps99); // dy/ds return ok; }
utf-8
1
GPL-3.0_or_EPL-1.0
2003-14 Bradley M. Bell <bradbell@seanet.com>
plink-1.07+dfsg/epi.cpp
////////////////////////////////////////////////////////////////// // // // PLINK (c) 2005-2006 Shaun Purcell // // // // This file is distributed under the GNU General Public // // License, Version 2. Please see the file COPYING for more // // details // // // ////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <sstream> #include <iomanip> #include <cmath> #include <algorithm> #include <map> #include "plink.h" #include "options.h" #include "helper.h" #include "crandom.h" #include "linear.h" #include "logistic.h" #include "stats.h" extern ofstream LOG; using namespace std; //////////////////////////////////////// // Epistasis tests (no permutation) void Plink::calcEpistasis() { /////////////////////////////////////////// // SNP major mode or individual major mode? if (par::fast_epistasis) { if ( ! par::SNP_major ) Ind2SNP(); } else { if ( par::SNP_major ) SNP2Ind(); } ////////////////////////////////////////////// // Set up results files ofstream EPI; string f = par::output_file_name; if (par::qt) f += ".epi.qt"; else { if (par::epi_caseonly) f += ".epi.co"; else f += ".epi.cc"; } EPI.open(f.c_str(),ios::out); printLOG("Writing epistasis pairwise results to [ " + f + " ] \n"); EPI.precision(4); if ( !par::epi_quickscan ) { EPI << setw(4) << "CHR1" << " " << setw(par::pp_maxsnp) << "SNP1" << " " << setw(4) << "CHR2" << " " << setw(par::pp_maxsnp) << "SNP2" << " "; if (!par::fast_epistasis) { if (par::bt) EPI << setw(12) << "OR_INT" << " "; else EPI << setw(12) << "BETA_INT" << " "; } EPI << setw(12) << "STAT" << " " << setw(12) << "P" << " " << "\n"; } else EPI << setw(4) << "CHR1" << " " << setw(par::pp_maxsnp) << "SNP1" << " " << setw(4) << "CHR2" << " " << setw(par::pp_maxsnp) << "SNP2" << " " << setw(12) << "CHISQ" << " " << "\n"; //////////////////////////////////////////////////////////////////// // epi1 and epi2 thresholds were given in terms of 0.01 (two-sided) // calculate appropriate absolute Z scores printLOG("Threshold for displaying epistatic result (--epi1) : p <= "+dbl2str(par::epi_alpha1)+"\n"); printLOG("Threshold for counting epistatic result (--epi2) : p <= "+dbl2str(par::epi_alpha2)+"\n"); par::epi_alpha1 = fabs(ltqnorm(par::epi_alpha1 / 2)); par::epi_alpha2 = fabs(ltqnorm(par::epi_alpha2 / 2)); // Fast epistasis: caae-only or case/control // Regression based test: case/control or quantitative trait // Take a list of SNPs, or all SNPs (vector<bool> epi1) // Test these against either themselves, or all SNPs (vector<bool> epi2) // A B // ALL x ALL skip e1>e2 // SET1 x ALL // SET1 x SET1 skip e1>e2 // SET1 x SET2 bool skip_symm = false; // Only output epistatic tests that have p < par::epi_alpha1; // Do not even attempt to save any epistatic results -- go straight to STDOUT // Also present summary results for all epi1 SNPs // (i.e. average / proportion of significant epistatic tests // at a certain alpha level, par::epi_alpha2 vector<bool> sA(nl_all,false); vector<bool> sB(nl_all,false); // Are we using a test set? If so, construct now if (par::set_test) { if (snpset.size()>2) error("Can only specify one or two SETs when testing for epistasis"); if (snpset.size()==0) error("There are no valid sets specified"); for (int e=0;e<snpset[0].size();e++) sA[snpset[0][e]] = true; // Has a second set been specified? if (snpset.size()==2) { printLOG("SET1 x SET2 epistasis mode\n"); for (int e=0;e<snpset[1].size();e++) sB[snpset[1][e]] = true; } else if (par::set_by_set) // Otherwise, has SET x SET flag been given? { printLOG("SET1 x SET1 epistasis mode\n"); skip_symm = true; for (int e=0;e<snpset[0].size();e++) sB[snpset[0][e]] = true; } else // All SNPs in second set { printLOG("SET1 x ALL epistasis mode\n"); for (int e=0;e<nl_all;e++) sB[e] = true; } } else { printLOG("ALL x ALL epistasis mode\n"); skip_symm = true; for (int e=0;e<nl_all;e++) { sA[e] = true; sB[e] = true; } } // Use fast aff coding if (par::bt) affCoding(*this); // Count how many items in the SET1 int epc = 0; for (vector<bool>::iterator e1 = sA.begin(); e1 != sA.end(); e1++) if (*e1) epc++; int epcc = 0; // Keep track of how many epistatic tests actually performed long int nepi = 0; vector<int> summary_sig(nl_all,0); vector<int> summary_good(nl_all,0); vector<double> best_score(nl_all,0); vector<int> best_partner(nl_all); ////////////////////////////////////////// // Begin iterating over pairs : SET x SET for (int e1=0;e1<nl_all;e1++) { if (sA[e1]) { if (!par::silent) { cout << "Peforming tests of epistasis: group " << ++epcc << " of " << epc << " \r"; cout.flush(); } for (int e2=0;e2<nl_all;e2++) { /////////////////////////////////////////// // Skip this test under certain conditions // The SNP not in the set if (!sB[e2]) { cout << "skipping...\n"; continue; } // We've already performed this test if (e1>=e2 && skip_symm) continue; // Same SNP if (e1==e2) continue; // Skip X chromosome for now if (par::chr_sex[locus[e1]->chr] || par::chr_sex[locus[e2]->chr] || par::chr_haploid[locus[e1]->chr] || par::chr_haploid[locus[e2]->chr]) continue; // SNPs too close (case-only analysis) if (par::epi_caseonly) if ( locus[e1]->chr == locus[e2]->chr) if ( fabs((double)(locus[e1]->bp - locus[e2]->bp)) < par::epi_caseonly_kb_gap*1000 ) continue; ////////////////////////////////// // Perform test of epistasis here if (par::bt && par::fast_epistasis) { double z; // statistic from either method // Odds ratio test // make two 2x2 tables int a11, a12, a21, a22; int u11, u12, u21, u22; a11=a12=a21=a22=0; u11=u12=u21=u22=0; vector<bool>::iterator a1 = SNP[e1]->one.begin(); vector<bool>::iterator a2 = SNP[e1]->two.begin(); vector<bool>::iterator b1 = SNP[e2]->one.begin(); vector<bool>::iterator b2 = SNP[e2]->two.begin(); vector<Individual*>::iterator person = sample.begin(); while ( person != sample.end() ) { if( (*person)->missing ) { // Next person a1++; a2++; b1++; b2++; person++; continue; } if ((*person)->aff) // if affected { if ( ! *b1 ) { if ( ! *b2 ) // ??x00 { if ( ! *a1 ) { if ( ! *a2 ) a11+=4; // 00 x 00 else { a11+=2; a21+=2; } // 01 x 00 } else if ( *a2 ) a21+=4; // 11 x 00 } else // ??x01 { if ( ! *a1 ) { if ( ! *a2 ) { a11+=2; a12+=2; } // 00 x 01 else { a11++; a21++; a12++; a22++; } // 01x01 } else if ( *a2 ) { a21+=2; a22+=2; } // 11 x 01 } } else if ( *b2 ) // ?? x 11 { if ( ! *a1 ) { if ( ! *a2 ) a12+=4; // 00 x 01 else { a12+=2; a22+=2; } // 01 x 01 } else if ( *a2 ) a22+=4; // 11 x 01 } } // Unaffecteds? else if ( !par::epi_caseonly ) // unaffected { if ( ! *b1 ) { if ( ! *b2 ) // ??x00 { if ( ! *a1 ) { if ( ! *a2 ) u11+=4; // 00 x 00 else { u11+=2; u21+=2; } // 01 x 00 } else if ( *a2 ) u21+=4; // 11 x 00 } else // ??x01 { if ( ! *a1 ) { if ( ! *a2 ) { u11+=2; u12+=2; } // 00 x 01 else { u11++; u21++; u12++; u22++; } // 01x01 } else if ( *a2 ) { u21+=2; u22+=2; } // 11 x 01 } } else if ( *b2 ) // ?? x 11 { if ( ! *a1 ) { if ( ! *a2 ) u12+=4; // 00 x 01 else { u12+=2; u22+=2; } // 01 x 01 } else if ( *a2 ) u22+=4; // 11 x 01 } } // Next person a1++; a2++; b1++; b2++; person++; } // Calculate log(OR) and SEs double or_aff, v_aff, or_unf, v_unf; or_aff = log( (double)(a11*a22)/ (double)(a12*a21) ); v_aff = 1/(double)a11 + 1/(double)a12 + 1/(double)a21 + 1/(double)a22; // Case-only z-score (if requested) if (par::epi_caseonly) z = fabs( or_aff / sqrt(v_aff) ); else // Standard case-control analysis { or_unf = log( (double)(u11*u22)/ (double)(u12*u21) ); v_unf = 1/(double)u11 + 1/(double)u12 + 1/(double)u21 + 1/(double)u22; z = fabs( (or_aff - or_unf) / sqrt ( v_aff + v_unf ) ); } ////////////////////////////// // --nop option in effect // Just output z score, if valid & above threshold if (par::epi_quickscan) { // Is this worth recording? if ( realnum(z) ) { nepi++; if (z >= par::epi_alpha1) EPI << setw(4) << locus[e1]->chr << " " << setw(par::pp_maxsnp) << locus[e1]->name << " " << setw(4) << locus[e2]->chr << " " << setw(par::pp_maxsnp) << locus[e2]->name << " " << setw(12) << z*z << "\n"; EPI.flush(); continue; } } ///////////////////////////////// // More full parsing of results double zero = 0; // Check this is a proper result if ( par::epi_filter && realnum(z) ) { // One more test performed nepi++; // Count as a good result summary_good[e1]++; if (sA[e2]) summary_good[e2]++; // Do we want to record this as part of the summary for the first set? if (z >= par::epi_alpha2) { // first variable will always be in A set summary_sig[e1]++; // but the second may also be in A set if (sA[e2]) summary_sig[e2]++; } // Is this result the best scrore yet for marker in set A? if (z > best_score[e1]) { best_score[e1] = z; best_partner[e1] = e2; } // The second marker might also be in set A if (sA[e2]) { if (z > best_score[e2]) { best_score[e2] = z; best_partner[e2] = e1; } } // Is this worth recording? if (z >= par::epi_alpha1) { EPI << setw(4) << locus[e1]->chr << " " << setw(par::pp_maxsnp) << locus[e1]->name << " " << setw(4) << locus[e2]->chr << " " << setw(par::pp_maxsnp) << locus[e2]->name << " " << setw(12) << z*z << " " << setw(12) << normdist(-z) * 2 << " " << "\n"; EPI.flush(); } else continue; // skip to next pair (skip logistic test) } else if (!par::epi_filter) { // Record all results here, whether NA or otherwise EPI << setw(4) << locus[e1]->chr << " " << setw(par::pp_maxsnp) << locus[e1]->name << " " << setw(4) << locus[e2]->chr << " " << setw(par::pp_maxsnp) << locus[e2]->name << " " << setw(12) << z*z << " " << setw(12) << normdist(-z) * 2 << " " << "\n"; EPI.flush(); } else continue; // if bad statistic for this test, do not try logistic } // End of binary OR test /////////////////////////////////////////////// // Logistic or linear regression epistasis test if ( !par::fast_epistasis ) { Model * lm; if (par::bt) { LogisticModel * m = new LogisticModel(this); lm = m; } else { LinearModel * m = new LinearModel(this); lm = m; } // Set missing data lm->setMissing(); // Main effect of SNP 1 lm->addAdditiveSNP(e1); lm->label.push_back("ADD1"); // Main effect of SNP 2 lm->addAdditiveSNP(e2); lm->label.push_back("ADD2"); // Epistasis lm->addInteraction(1,2); lm->label.push_back("EPI"); // Build design matrix lm->buildDesignMatrix(); // Prune out any remaining missing individuals // No longer needed // lm->pruneY(); // Fit linear model lm->fitLM(); // Did model fit okay? lm->validParameters(); // Obtain estimates and statistic lm->testParameter = 3; // interaction vector_t b = lm->getCoefs(); double chisq = lm->getStatistic(); double pvalue = chiprobP(chisq,1); double z = sqrt(chisq); // Is this result worth displaying? if (lm->isValid()) { // One more valid test performed nepi++; // Count as a good result summary_good[e1]++; if (sA[e2]) summary_good[e2]++; // Do we want to record this as part of the summary for the first set? if ( z >= par::epi_alpha2) { // first variable will always be in A set summary_sig[e1]++; // but the second may also be in A set if (sA[e2]) summary_sig[e2]++; } // Is this result the best scrore yet for marker in set A? if (z > best_score[e1]) { best_score[e1] = z; best_partner[e1] = e2; } // The second marker might also be in set A if (sA[e2]) { if (z > best_score[e2]) { best_score[e2] = z; best_partner[e2] = e1; } } } // Is this result worth displaying? if ( z >= par::epi_alpha1 ) { EPI << setw(4) << locus[e1]->chr << " " << setw(par::pp_maxsnp) << locus[e1]->name << " " << setw(4) << locus[e2]->chr << " " << setw(par::pp_maxsnp) << locus[e2]->name << " "; if (lm->isValid()) { if ( par::bt) EPI << setw(12) << exp(b[3]) << " " << setw(12) << chisq << " " << setw(12) << pvalue << " " << "\n"; else EPI << setw(12) << b[3] << " " << setw(12) << chisq << " " << setw(12) << pvalue << " " << "\n"; } else EPI << setw(12) << "NA" << " " << setw(12) << "NA" << " " << setw(12) << "NA" << " " << "\n"; EPI.flush(); } // Clean up delete lm; } } // Next pair of SNPs } } if (!par::silent) cout << "\n"; EPI.close(); ////////////////////// // Summary of results // Skip this for now if (true) { f += ".summary"; EPI.open(f.c_str(),ios::out); EPI.clear(); printLOG("Performed a total of "+int2str(nepi)+" valid SNPxSNP tests\n"); printLOG("Writing epistasis summary results to [ " + f + " ] \n"); EPI.precision(4); EPI << setw(4) << "CHR" << " " << setw(par::pp_maxsnp) << "SNP" << " " << setw(12) << "N_SIG" << " " << setw(12) << "N_TOT" << " " << setw(12) << "PROP" << " " << setw(12) << "BEST_CHISQ" << " " << setw(4) << "BEST_CHR" << " " << setw(par::pp_maxsnp) << "BEST_SNP" << " " << "\n"; int c=0; for (int e1=0;e1<nl_all;e1++) { if (sA[e1]) { EPI << setw(4) << locus[e1]->chr << " " << setw(par::pp_maxsnp) << locus[e1]->name << " " << setw(12) << summary_sig[e1] << " " << setw(12) << summary_good[e1] << " " << setw(12) << (double)summary_sig[e1] / (double)summary_good[e1] << " " << setw(12) << best_score[e1] * best_score[e1] << " " << setw(4) << locus[best_partner[e1]]->chr << " " << setw(par::pp_maxsnp) << locus[best_partner[e1]]->name << " " << "\n"; } } EPI.close(); } }
utf-8
1
GPL-2
2005-2009 Shaun Purcell <plink@chgr.mgh.harvard.edu>
grass-7.8.6/lib/vector/Vlib/ascii.c
/*! \file lib/vector/Vlib/ascii.c \brief Vector library - GRASS ASCII vector format Higher level functions for reading/writing/manipulating vectors. (C) 2001-2015 by the GRASS Development Team This program is free software under the GNU General Public License (>=v2). Read the file COPYING that comes with GRASS for details. \author Original author CERL \author Updated for GRASS 7 (SF support) by Martin Landa <landa.martin gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <grass/vector.h> #include <grass/dbmi.h> #include <grass/glocale.h> #define BUFFSIZE 128 static int srch(const void *, const void *); static int get_cat(const struct line_cats *, const struct cat_list *, const int *, int, int, int *); static void free_col_arrays(int *, char *, char **); /*! \brief Read data in GRASS ASCII vector format \param ascii pointer to the input ASCII file \param[out] Map pointer to the output Map_info structure \return number of read features \return -1 on error */ int Vect_read_ascii(FILE *ascii, struct Map_info *Map) { char ctype; char buff[BUFFSIZE]; char east_str[256], north_str[256]; double *xarray; double *yarray; double *zarray; double *x, *y, *z; int i, n_points, n_coors, n_cats, n_lines; int type, with_z, skip_feat, nskipped_3d; int alloc_points; struct line_pnts *Points; struct line_cats *Cats; int catn, cat; /* Must always use this to create an initialized line_pnts structure */ Points = Vect_new_line_struct(); Cats = Vect_new_cats_struct(); /*alloc_points = 1000 ; */ alloc_points = 1; xarray = (double *)G_calloc(alloc_points, sizeof(double)); yarray = (double *)G_calloc(alloc_points, sizeof(double)); zarray = (double *)G_calloc(alloc_points, sizeof(double)); n_lines = nskipped_3d = 0; with_z = Vect_is_3d(Map); while (G_getl2(buff, BUFFSIZE - 1, ascii) != 0) { n_cats = 0; skip_feat = FALSE; if (buff[0] == '\0') { G_debug(3, "a2b: skipping blank line"); continue; } if (sscanf(buff, "%1c%d%d", &ctype, &n_coors, &n_cats) < 2 || n_coors < 0 || n_cats < 0) { if (ctype == '#') { G_debug(2, "a2b: skipping commented line"); continue; } G_warning(_("Error reading ASCII file: (bad type) [%s]"), buff); return -1; } if (ctype == '#') { G_debug(2, "a2b: Skipping commented line"); continue; } switch (ctype) { case 'A': type = GV_BOUNDARY; break; case 'B': type = GV_BOUNDARY; break; case 'C': type = GV_CENTROID; break; case 'L': type = GV_LINE; break; case 'P': type = GV_POINT; break; case 'F': type = GV_FACE; break; case 'K': type = GV_KERNEL; break; case 'a': case 'b': case 'c': case 'l': case 'p': type = 0; /* dead -> ignore */ break; default: { G_warning(_("Error reading ASCII file: (unknown type) [%s]"), buff); return -1; } } G_debug(5, "feature type = %d", type); if ((type & (GV_FACE | GV_KERNEL)) && !with_z) { skip_feat = TRUE; nskipped_3d++; } n_points = 0; x = xarray; y = yarray; z = zarray; /* Collect the points */ for (i = 0; i < n_coors; i++) { if (G_getl2(buff, BUFFSIZE - 1, ascii) == 0) { G_warning(_("End of ASCII file reached before end of coordinates")); return -1; } if (buff[0] == '\0') { G_debug(3, "a2b: skipping blank line while reading vertices"); i--; continue; } *z = 0; if (sscanf(buff, "%lf%lf%lf", x, y, z) < 2) { if (sscanf(buff, " %s %s %lf", east_str, north_str, z) < 2) { G_warning(_("Error reading ASCII file: (bad point) [%s]"), buff); return -1; } else { if (!G_scan_easting(east_str, x, G_projection())) { G_warning(_("Unparsable longitude value: [%s]"), east_str); return -1; } if (!G_scan_northing(north_str, y, G_projection())) { G_warning(_("Unparsable latitude value: [%s]"), north_str); return -1; } } } G_debug(5, "coor in: %s -> x = %f y = %f z = %f", G_chop(buff), *x, *y, *z); n_points++; x++; y++; z++; if (n_points >= alloc_points) { alloc_points = n_points + 1000; xarray = (double *)G_realloc((void *)xarray, alloc_points * sizeof(double)); yarray = (double *)G_realloc((void *)yarray, alloc_points * sizeof(double)); zarray = (double *)G_realloc((void *)zarray, alloc_points * sizeof(double)); x = xarray + n_points; y = yarray + n_points; z = zarray + n_points; } } /* Collect the cats */ Vect_reset_cats(Cats); for (i = 0; i < n_cats; i++) { if (G_getl2(buff, BUFFSIZE - 1, ascii) == 0) { G_warning(_("End of ASCII file reached before end of categories")); return -1; } if (buff[0] == '\0') { G_debug(3, "a2b: skipping blank line while reading category info"); i--; continue; } if (sscanf(buff, "%u%u", &catn, &cat) != 2) { G_warning(_("Error reading categories: [%s]"), buff); return -1; } Vect_cat_set(Cats, catn, cat); } if (skip_feat) continue; /* Allocation is handled for line_pnts */ if (0 > Vect_copy_xyz_to_pnts(Points, xarray, yarray, zarray, n_points)) { G_warning(_("Unable to copy points")); return -1; } if (type > 0) { if (-1 == Vect_write_line(Map, type, Points, Cats)) { return -1; } n_lines++; } } if (nskipped_3d > 0) G_warning(_("Vector map <%s> is 2D. %d 3D features (faces or kernels) skipped."), Vect_get_name(Map), nskipped_3d); Vect_destroy_line_struct(Points); Vect_destroy_cats_struct(Cats); return n_lines; } /*! \brief Read header of GRASS ASCII vector format \param dascii pointer to the ASCII file \param Map pointer to Map_info structure \return 0 on success \return -1 on error */ int Vect_read_ascii_head(FILE *dascii, struct Map_info *Map) { char buff[1024]; char *ptr; for (;;) { if (0 == G_getl2(buff, sizeof(buff) - 1, dascii)) return (0); /* Last line of header */ if (strncmp(buff, "VERTI:", 6) == 0) return (0); if (!(ptr = strchr(buff, ':'))) { G_warning(_("Unexpected data in vector header:\n[%s]"), buff); return -1; } ptr++; /* Search for the start of text */ while (*ptr == ' ') ptr++; if (strncmp(buff, "ORGANIZATION:", 13) == 0) Vect_set_organization(Map, ptr); else if (strncmp(buff, "DIGIT DATE:", 11) == 0) Vect_set_date(Map, ptr); else if (strncmp(buff, "DIGIT NAME:", 11) == 0) Vect_set_person(Map, ptr); else if (strncmp(buff, "MAP NAME:", 9) == 0) Vect_set_map_name(Map, ptr); else if (strncmp(buff, "MAP DATE:", 9) == 0) Vect_set_map_date(Map, ptr); else if (strncmp(buff, "MAP SCALE:", 10) == 0) Vect_set_scale(Map, atoi(ptr)); else if (strncmp(buff, "OTHER INFO:", 11) == 0) Vect_set_comment(Map, ptr); else if (strncmp(buff, "ZONE:", 5) == 0 || strncmp(buff, "UTM ZONE:", 9) == 0) Vect_set_zone(Map, atoi(ptr)); else if (strncmp(buff, "WEST EDGE:", 10) == 0) { } else if (strncmp(buff, "EAST EDGE:", 10) == 0) { } else if (strncmp(buff, "SOUTH EDGE:", 11) == 0) { } else if (strncmp(buff, "NORTH EDGE:", 11) == 0) { } else if (strncmp(buff, "MAP THRESH:", 11) == 0) Vect_set_thresh(Map, atof(ptr)); else { G_warning(_("Unknown keyword <%s> in vector head"), buff); } } /* NOTREACHED */ } /*! \brief Write data to GRASS ASCII vector format Prints message if some features without category are skipped. \param[out] ascii pointer to the output ASCII file \param[out] att att file (< version 5 only) \param Map pointer to Map_info structure \param ver version number 4 or 5 \param format format GV_ASCII_FORMAT_POINT or GV_ASCII_FORMAT_STD \param dp number of significant digits \param fs field separator \param region_flag check region \param type feature type filter \param field field number \param Clist list of categories to filter features or NULL \param where SQL select where statement to filter features or NULL \param column_names array of columns to be included to the output or NULL "*" as the first item in the array indicates all columns \param header TRUE to print also header \return number of written features \return -1 on error */ int Vect_write_ascii(FILE *ascii, FILE *att, struct Map_info *Map, int ver, int format, int dp, char *fs, int region_flag, int type, int field, const struct cat_list *Clist, const char* where, const char **column_names, int header) { int ltype, ctype, i, cat, line, left, right, found; double *xptr, *yptr, *zptr, x, y; static struct line_pnts *Points; struct line_cats *Cats, *ACats; char *xstring, *ystring, *zstring; size_t xsize, ysize, zsize; struct Cell_head window; struct ilist *fcats; int count, n_skipped; /* where || columns */ struct field_info *Fi; dbDriver *driver; dbValue value; dbHandle handle; int *cats, ncats, more; dbTable *Table; dbString dbstring; dbColumn *Column; dbValue *Value; char *buf; size_t bufsize; dbCursor cursor; /* columns */ char **columns; int *coltypes; char *all_columns; Fi = NULL; driver = NULL; columns = NULL; coltypes = NULL; all_columns = NULL; G_zero(&value, sizeof(dbValue)); db_init_string(&dbstring); xstring = NULL; ystring = NULL; zstring = NULL; xsize = 0; ysize = 0; zsize = 0; buf = NULL; bufsize = 0; /* get the region */ G_get_window(&window); count = ncats = 0; xstring = ystring = zstring = NULL; cats = NULL; if (field > 0 && (where || column_names)) { Fi = Vect_get_field(Map, field); if (!Fi) { G_fatal_error(_("Database connection not defined for layer %d"), field); } driver = db_start_driver(Fi->driver); if (!driver) G_fatal_error(_("Unable to start driver <%s>"), Fi->driver); db_init_handle(&handle); db_set_handle(&handle, Fi->database, NULL); if (db_open_database(driver, &handle) != DB_OK) G_fatal_error(_("Unable to open database <%s> by driver <%s>"), Fi->database, Fi->driver); /* select cats (sorted array) */ ncats = db_select_int(driver, Fi->table, Fi->key, where, &cats); G_debug(3, "%d categories selected from table <%s>", ncats, Fi->table); if (!column_names) { db_close_database(driver); db_shutdown_driver(driver); } else { int icol, ncols; const char *col_name; int len_all = 0; db_set_string(&dbstring, Fi->table); if (db_describe_table(driver, &dbstring, &Table) != DB_OK) { G_warning(_("Unable to describe table <%s>"), Fi->table); return -1; } ncols = db_get_table_number_of_columns(Table); columns = (char **) G_malloc((ncols + 1) * sizeof(char *)); if (column_names[0] && strcmp(column_names[0], "*") == 0) { /* all columns */ icol = 0; for (i = 0; i < ncols; i++) { col_name = db_get_column_name(db_get_table_column(Table, i)); /* key column skipped */ if (strcmp(Fi->key, col_name) != 0) columns[icol++] = G_store(col_name); } columns[icol] = NULL; } else { int j; icol = 0; i = 0; while (column_names[i]) { /* key column skipped */ if (strcmp(Fi->key, column_names[i]) != 0) { found = 0; for (j = 0; j < ncols; j++) { col_name = db_get_column_name(db_get_table_column(Table, j)); if (strcmp(col_name, column_names[i]) == 0) { columns[icol++] = G_store(col_name); found = 1; break; } } if (!found) { G_warning(_("Column <%s> does not exist"), column_names[i]); G_important_message(_("Available columns:")); for (j = 0; j < ncols; j++) { col_name = db_get_column_name(db_get_table_column(Table, j)); G_important_message("%s", col_name); } G_warning(_("Export cancelled")); db_close_database(driver); db_shutdown_driver(driver); return -1; } } i++; } columns[icol] = NULL; } db_zero_string(&dbstring); db_free_table(Table); Table = NULL; if (columns[0]) { /* selected columns only */ i = 0; while (columns[i]) len_all += strlen(columns[i++]); coltypes = G_malloc(i * sizeof(int)); all_columns = G_malloc(len_all + i + 2); i = 0; strcpy(all_columns, columns[0]); while (columns[i]) { /* get column types */ coltypes[i] = db_column_Ctype(driver, Fi->table, columns[i]); if (coltypes[i] < 0) { db_close_database(driver); db_shutdown_driver(driver); G_warning(_("Unknown type of column <%s>, export cancelled"), columns[i]); return -1; } if (i > 0) { strcat(all_columns, ","); strcat(all_columns, columns[i]); } i++; } } else { /* no column or only key column selected */ G_free(columns); columns = NULL; db_close_database(driver); db_shutdown_driver(driver); } } } if (format == GV_ASCII_FORMAT_POINT && header) { /* print header */ if (Map->head.with_z) fprintf(ascii, "east%snorth%sheight%scat", fs, fs, fs); else fprintf(ascii, "east%snorth%scat", fs, fs); if (columns) { for (i = 0; columns[i]; i++) { if (db_select_value (driver, Fi->table, Fi->key, cat, columns[i], &value) < 0) G_fatal_error(_("Unable to select record from table <%s> (key %s, column %s)"), Fi->table, Fi->key, columns[i]); if (columns[i]) fprintf(ascii, "%s%s", fs, columns[i]); else fprintf(ascii, "%s", columns[i]); /* can not happen */ } } fprintf(ascii, "%s", HOST_NEWLINE); } Points = Vect_new_line_struct(); Cats = Vect_new_cats_struct(); ACats = Vect_new_cats_struct(); fcats = Vect_new_list(); /* by default, read_next_line will NOT read Dead lines */ /* but we can override that (in Level I only) by specifying */ /* the type -1, which means match all line types */ Vect_rewind(Map); count = n_skipped = line = 0; while (TRUE) { ltype = Vect_read_next_line(Map, Points, Cats); if (ltype == -1 ) { /* failure */ if (columns) { db_close_database(driver); db_shutdown_driver(driver); free_col_arrays(coltypes, all_columns, column_names && strcmp(column_names[0], "*") == 0 ? columns : NULL); } return -1; } if (ltype == -2) { /* EOF */ if (columns) { db_close_database(driver); db_shutdown_driver(driver); free_col_arrays(coltypes, all_columns, column_names && strcmp(column_names[0], "*") == 0 ? columns : NULL); } break; } line++; if (!(ltype & type)) continue; if (format == GV_ASCII_FORMAT_POINT && !(ltype & GV_POINTS)) continue; found = get_cat(Cats, Clist, cats, ncats, field, &cat); if (!found && field > 0 && ltype == GV_BOUNDARY && type & GV_AREA && Vect_level(Map) > 1) { Vect_get_line_areas(Map, line, &left, &right); if (left < 0) left = Vect_get_isle_area(Map, abs(left)); if (left > 0) { Vect_get_area_cats(Map, left, ACats); found = get_cat(ACats, Clist, cats, ncats, field, &cat); } if (right < 0) right = Vect_get_isle_area(Map, abs(right)); if (!found && right > 0) { Vect_get_area_cats(Map, right, ACats); found = get_cat(ACats, Clist, cats, ncats, field, &cat); } } if (!found) { if (Cats->n_cats < 1) n_skipped++; continue; } if (ver < 5) { Vect_cat_get(Cats, 1, &cat); } switch (ltype) { case GV_BOUNDARY: if (ver == 5) ctype = 'B'; else ctype = 'A'; break; case GV_CENTROID: if (ver < 5) { if (att != NULL) { if (cat > 0) { G_rasprintf(&xstring, &xsize, "%.*f", dp, Points->x[0]); G_trim_decimal(xstring); G_rasprintf(&ystring, &ysize, "%.*f", dp, Points->y[0]); G_trim_decimal(ystring); fprintf(att, "A %s %s %d%s", xstring, ystring, cat, HOST_NEWLINE); } } continue; } ctype = 'C'; break; case GV_LINE: ctype = 'L'; break; case GV_POINT: ctype = 'P'; break; case GV_FACE: ctype = 'F'; break; case GV_KERNEL: ctype = 'K'; break; default: ctype = 'X'; G_warning(_("Unknown feature type %d"), (int)ltype); break; } if (format == GV_ASCII_FORMAT_POINT) { if (region_flag) { if ((window.east < Points->x[0]) || (window.west > Points->x[0])) continue; } G_rasprintf(&xstring, &xsize, "%.*f", dp, Points->x[0]); G_trim_decimal(xstring); if (region_flag) { if ((window.north < Points->y[0]) || (window.south > Points->y[0])) continue; } G_rasprintf(&ystring, &ysize, "%.*f", dp, Points->y[0]); G_trim_decimal(ystring); Vect_field_cat_get(Cats, field, fcats); if (Map->head.with_z && ver == 5) { if (region_flag) { if ((window.top < Points->z[0]) || (window.bottom > Points->z[0])) continue; } G_rasprintf(&zstring, &zsize, "%.*f", dp, Points->z[0]); G_trim_decimal(zstring); fprintf(ascii, "%s%s%s%s%s", xstring, fs, ystring, fs, zstring); } else { fprintf(ascii, "%s%s%s", xstring, fs, ystring); } if (fcats->n_values > 0 && cat > -1) { if (fcats->n_values > 1) { G_warning(_("Feature has more categories. Only one category (%d) " "is exported."), cat); } fprintf(ascii, "%s%d", fs, cat); /* print attributes */ if (columns) { G_rasprintf(&buf, &bufsize, "SELECT %s FROM %s WHERE %s = %d", all_columns, Fi->table, Fi->key, cat); G_debug(2, "SQL: %s", buf); db_set_string(&dbstring, buf); if (db_open_select_cursor (driver, &dbstring, &cursor, DB_SEQUENTIAL) != DB_OK) { db_close_database(driver); db_shutdown_driver(driver); G_fatal_error(_("Cannot select attributes for cat = %d"), cat); } if (db_fetch(&cursor, DB_NEXT, &more) != DB_OK) { db_close_database(driver); db_shutdown_driver(driver); G_fatal_error(_("Unable to fetch data from table")); } Table = db_get_cursor_table(&cursor); for (i = 0; columns[i]; i++) { Column = db_get_table_column(Table, i); Value = db_get_column_value(Column); if (db_test_value_isnull(Value)) { fprintf(ascii, "%s", fs); } else { switch(coltypes[i]) { case DB_C_TYPE_INT: { fprintf(ascii, "%s%d", fs, db_get_value_int(Value)); break; } case DB_C_TYPE_DOUBLE: { fprintf(ascii, "%s%.*f", fs, dp, db_get_value_double(Value)); break; } case DB_C_TYPE_STRING: { fprintf(ascii, "%s%s", fs, db_get_value_string(Value)); break; } case DB_C_TYPE_DATETIME: { break; } case -1: G_fatal_error(_("Column <%s> not found in table <%s>"), columns[i], Fi->table); default: G_fatal_error(_("Column <%s>: unsupported data type"), columns[i]); } } } db_close_cursor(&cursor); } } fprintf(ascii, "%s", HOST_NEWLINE); } else if (format == GV_ASCII_FORMAT_STD) { /* FORMAT_STANDARD */ if (ver == 5 && Cats->n_cats > 0) fprintf(ascii, "%c %d %d%s", ctype, Points->n_points, Cats->n_cats, HOST_NEWLINE); else fprintf(ascii, "%c %d%s", ctype, Points->n_points, HOST_NEWLINE); xptr = Points->x; yptr = Points->y; zptr = Points->z; while (Points->n_points--) { G_rasprintf(&xstring, &xsize, "%.*f", dp, *xptr++); G_trim_decimal(xstring); G_rasprintf(&ystring, &ysize, "%.*f", dp, *yptr++); G_trim_decimal(ystring); if (ver == 5) { if (Map->head.with_z) { G_rasprintf(&zstring, &zsize, "%.*f", dp, *zptr++); G_trim_decimal(zstring); fprintf(ascii, " %-12s %-12s %-12s%s", xstring, ystring, zstring, HOST_NEWLINE); } else { fprintf(ascii, " %-12s %-12s%s", xstring, ystring, HOST_NEWLINE); } } /*Version 4 */ else { fprintf(ascii, " %-12s %-12s%s", ystring, xstring, HOST_NEWLINE); } } if (ver == 5) { for (i = 0; i < Cats->n_cats; i++) { fprintf(ascii, " %-5d %-10d%s", Cats->field[i], Cats->cat[i], HOST_NEWLINE); } } else { if (cat > -1) { if (ltype == GV_POINT) { G_rasprintf(&xstring, &xsize, "%.*f", dp, Points->x[0]); G_trim_decimal(xstring); G_rasprintf(&ystring, &ysize, "%.*f", dp, Points->y[0]); G_trim_decimal(ystring); fprintf(att, "P %s %s %d%s", xstring, ystring, cat, HOST_NEWLINE); } else { x = (Points->x[1] + Points->x[0]) / 2; y = (Points->y[1] + Points->y[0]) / 2; G_rasprintf(&xstring, &xsize, "%.*f", dp, x); G_trim_decimal(xstring); G_rasprintf(&ystring, &ysize, "%.*f", dp, y); G_trim_decimal(ystring); fprintf(att, "L %s %s %d%s", xstring, ystring, cat, HOST_NEWLINE); } } } } else if (format == GV_ASCII_FORMAT_WKT) { if (ltype & (GV_BOUNDARY | GV_CENTROID | GV_FACE | GV_KERNEL)) continue; /* Well-Known Text */ Vect_sfa_line_astext(Points, ltype, Vect_is_3d(Map), dp, ascii); count++; } else { G_fatal_error(_("Unknown format")); } count++; } if (format == GV_ASCII_FORMAT_WKT) { /* process areas - topology required */ int i, area, nareas, isle, nisles; if (Vect_level(Map) < 2) { G_warning(_("Topology not available, unable to process areas")); nareas = 0; } else { nareas = Vect_get_num_areas(Map); } for (area = 1; area <= nareas; area++) { if (!Vect_area_alive(Map, area)) /* skip dead areas */ continue; if (Vect_get_area_cat(Map, area, field) < 0) continue; /* get boundary -> linearring */ if (Vect_get_area_points(Map, area, Points) < 0) { G_warning(_("Unable to get boundary of area id %d"), area); continue; } fprintf(ascii, "POLYGON("); /* write outter ring */ Vect_sfa_line_astext(Points, GV_BOUNDARY, 0, dp, ascii); /* boundary is always 2D */ /* get isles (holes) -> inner rings */ nisles = Vect_get_area_num_isles(Map, area); for (i = 0; i < nisles; i++) { /* get isle boundary -> linearring */ isle = Vect_get_area_isle(Map, area, i); if (Vect_get_isle_points(Map, isle, Points) < 0) { G_warning(_("Unable to get boundary of isle id %d (area id %d)"), isle, area); continue; } fprintf(ascii, ", "); /* write inner ring */ Vect_sfa_line_astext(Points, GV_BOUNDARY, 0, dp, ascii); /* boundary is always 2D */ } fprintf(ascii, ")%s", HOST_NEWLINE); count++; } } if (n_skipped > 0) G_important_message(_("%d features without category skipped. To export also " "features without category use '%s=-1'."), n_skipped, "layer"); Vect_destroy_line_struct(Points); Vect_destroy_cats_struct(Cats); Vect_destroy_cats_struct(ACats); return count; } int srch(const void *pa, const void *pb) { int *p1 = (int *)pa; int *p2 = (int *)pb; if (*p1 < *p2) return -1; if (*p1 > *p2) return 1; return 0; } /*! \brief Write data to GRASS ASCII vector format \param[out] dascii pointer to the output ASCII file \param Map pointer to Map_info structure */ void Vect_write_ascii_head(FILE *dascii, struct Map_info *Map) { fprintf(dascii, "ORGANIZATION: %s%s", Vect_get_organization(Map), HOST_NEWLINE); fprintf(dascii, "DIGIT DATE: %s%s", Vect_get_date(Map), HOST_NEWLINE); fprintf(dascii, "DIGIT NAME: %s%s", Vect_get_person(Map), HOST_NEWLINE); fprintf(dascii, "MAP NAME: %s%s", Vect_get_map_name(Map), HOST_NEWLINE); fprintf(dascii, "MAP DATE: %s%s", Vect_get_map_date(Map), HOST_NEWLINE); fprintf(dascii, "MAP SCALE: %d%s", Vect_get_scale(Map), HOST_NEWLINE); fprintf(dascii, "OTHER INFO: %s%s", Vect_get_comment(Map), HOST_NEWLINE); fprintf(dascii, "ZONE: %d%s", Vect_get_zone(Map), HOST_NEWLINE); fprintf(dascii, "MAP THRESH: %f%s", Vect_get_thresh(Map), HOST_NEWLINE); } /* check category */ int get_cat(const struct line_cats *Cats, const struct cat_list *Clist, const int *cats, int ncats, int field, int *cat) { int i; *cat = -1; if (field < 1) return TRUE; if (Clist && Clist->field == field) { for (i = 0; i < Cats->n_cats; i++) { if (Cats->field[i] == field && Vect_cat_in_cat_list(Cats->cat[i], Clist)) { *cat = Cats->cat[i]; return TRUE; } } return FALSE; } if (cats) { int *found; for (i = 0; i < Cats->n_cats; i++) { if (Cats->field[i] == field) { found = (int *)bsearch((void *) &(Cats->cat[i]), cats, ncats, sizeof(int), srch); if (found) { /* found */ *cat = *found; return TRUE; } } } return FALSE; } if (!Clist && !cats && field > 0) { Vect_cat_get(Cats, field, cat); if (*cat > -1) return TRUE; } return FALSE; } /* free column arrays, see Vect_write_ascii() */ void free_col_arrays(int *coltypes, char *all_columns, char **columns) { G_free(coltypes); G_free(all_columns); if (columns) { int i = 0; while (columns[i]) G_free(columns[i++]); G_free(columns); } }
utf-8
1
GPL-2+
1989-2021, GRASS Development Team 2003-2021, Markus Neteler 2003-2021, Glynn Clements 2003-2021, Luca Delucchi 2006-2021, Martin Landa 2011-2021, Vaclav Petras 2012-2021, Stepan Turek 2011-2020, Markus Metz 2017-2018, Supreet Singh 2014-2018, Tereza Fiedlerova 2018, Shubham Sharma 1992-2017, Helena Mitasova 2011-2017, Soeren Gebbert 2017, Maris Nartiss 2017, Sunveer Singh 2012-2016, Anna Petrasova 2016, Adam Laza 2016, Zofie Cimburova 2014-2015, Matej Krejci 2015, Jachym Ceppicky 2015, pietro 2015, Stephanie Wendel 1993-2014, James Darrell McCauley <darrell@mccauley-usa.com> 2003-2014, Per Henrik Johansen 2006-2014, Hamish Bowman 2007-2014, Lars Ahlzen 2011-2013, Anna Kratochvilova 2013, GKX Associates Inc. 2009-2010, 2012, Daniel Bundala 2012, Eric Momsen 2001-2011, Frank Warmerdam 2011, Tom Kralidis 2007-2010, E. Jorge Tizado 2007, 2010, Laura Toma 2006-2009, Cedric Shoc 2009, Gabor Grothendieck 2007-2008, Martin Schroeder 2008, Marcus D. Hanwell <marcus@cryos.org> 2005-2006, Politecnico di Milano 2004-2005, GDF Hannover 2002-2003, University of Sannio (BN) - Italy 2003, Christo Zietsman 1989, 1993-2000, 2002, Lubos Mitas 1995, 2002, J. Hofierka 1998-2002, Free Software Foundation, Inc 2002, Jaro Hofierka 2002, Roberto Micarelli 1996-2000, Brian Gough 2000, David D.Gray <ddgray@armadce.demon.co.uk> 1995, Bill Brown <brown@gis.uiuc.edu> & Michael Shapiro 1995, M. Ruesink 1995, J. Caplan, 1995, M. Zlocha 1993, 1995, D. McCauley 1992-1993, 1995, D. Gerdes 1992-1993, 1995, I. Kosinovsky 1994, Bill Brown, USACERL 1993, RMIT 1992, USA-CERL
zfs-linux-2.1.2/module/zfs/arc.c
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, Joyent, Inc. * Copyright (c) 2011, 2020, Delphix. All rights reserved. * Copyright (c) 2014, Saso Kiselkov. All rights reserved. * Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. * Copyright (c) 2020, George Amanakis. All rights reserved. * Copyright (c) 2019, Klara Inc. * Copyright (c) 2019, Allan Jude * Copyright (c) 2020, The FreeBSD Foundation [1] * * [1] Portions of this software were developed by Allan Jude * under sponsorship from the FreeBSD Foundation. */ /* * DVA-based Adjustable Replacement Cache * * While much of the theory of operation used here is * based on the self-tuning, low overhead replacement cache * presented by Megiddo and Modha at FAST 2003, there are some * significant differences: * * 1. The Megiddo and Modha model assumes any page is evictable. * Pages in its cache cannot be "locked" into memory. This makes * the eviction algorithm simple: evict the last page in the list. * This also make the performance characteristics easy to reason * about. Our cache is not so simple. At any given moment, some * subset of the blocks in the cache are un-evictable because we * have handed out a reference to them. Blocks are only evictable * when there are no external references active. This makes * eviction far more problematic: we choose to evict the evictable * blocks that are the "lowest" in the list. * * There are times when it is not possible to evict the requested * space. In these circumstances we are unable to adjust the cache * size. To prevent the cache growing unbounded at these times we * implement a "cache throttle" that slows the flow of new data * into the cache until we can make space available. * * 2. The Megiddo and Modha model assumes a fixed cache size. * Pages are evicted when the cache is full and there is a cache * miss. Our model has a variable sized cache. It grows with * high use, but also tries to react to memory pressure from the * operating system: decreasing its size when system memory is * tight. * * 3. The Megiddo and Modha model assumes a fixed page size. All * elements of the cache are therefore exactly the same size. So * when adjusting the cache size following a cache miss, its simply * a matter of choosing a single page to evict. In our model, we * have variable sized cache blocks (ranging from 512 bytes to * 128K bytes). We therefore choose a set of blocks to evict to make * space for a cache miss that approximates as closely as possible * the space used by the new block. * * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" * by N. Megiddo & D. Modha, FAST 2003 */ /* * The locking model: * * A new reference to a cache buffer can be obtained in two * ways: 1) via a hash table lookup using the DVA as a key, * or 2) via one of the ARC lists. The arc_read() interface * uses method 1, while the internal ARC algorithms for * adjusting the cache use method 2. We therefore provide two * types of locks: 1) the hash table lock array, and 2) the * ARC list locks. * * Buffers do not have their own mutexes, rather they rely on the * hash table mutexes for the bulk of their protection (i.e. most * fields in the arc_buf_hdr_t are protected by these mutexes). * * buf_hash_find() returns the appropriate mutex (held) when it * locates the requested buffer in the hash table. It returns * NULL for the mutex if the buffer was not in the table. * * buf_hash_remove() expects the appropriate hash mutex to be * already held before it is invoked. * * Each ARC state also has a mutex which is used to protect the * buffer list associated with the state. When attempting to * obtain a hash table lock while holding an ARC list lock you * must use: mutex_tryenter() to avoid deadlock. Also note that * the active state mutex must be held before the ghost state mutex. * * It as also possible to register a callback which is run when the * arc_meta_limit is reached and no buffers can be safely evicted. In * this case the arc user should drop a reference on some arc buffers so * they can be reclaimed and the arc_meta_limit honored. For example, * when using the ZPL each dentry holds a references on a znode. These * dentries must be pruned before the arc buffer holding the znode can * be safely evicted. * * Note that the majority of the performance stats are manipulated * with atomic operations. * * The L2ARC uses the l2ad_mtx on each vdev for the following: * * - L2ARC buflist creation * - L2ARC buflist eviction * - L2ARC write completion, which walks L2ARC buflists * - ARC header destruction, as it removes from L2ARC buflists * - ARC header release, as it removes from L2ARC buflists */ /* * ARC operation: * * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure. * This structure can point either to a block that is still in the cache or to * one that is only accessible in an L2 ARC device, or it can provide * information about a block that was recently evicted. If a block is * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough * information to retrieve it from the L2ARC device. This information is * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block * that is in this state cannot access the data directly. * * Blocks that are actively being referenced or have not been evicted * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within * the arc_buf_hdr_t that will point to the data block in memory. A block can * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd). * * The L1ARC's data pointer may or may not be uncompressed. The ARC has the * ability to store the physical data (b_pabd) associated with the DVA of the * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block, * it will match its on-disk compression characteristics. This behavior can be * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the * compressed ARC functionality is disabled, the b_pabd will point to an * uncompressed version of the on-disk data. * * Data in the L1ARC is not accessed by consumers of the ARC directly. Each * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it. * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC * consumer. The ARC will provide references to this data and will keep it * cached until it is no longer in use. The ARC caches only the L1ARC's physical * data block and will evict any arc_buf_t that is no longer referenced. The * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the * "overhead_size" kstat. * * Depending on the consumer, an arc_buf_t can be requested in uncompressed or * compressed form. The typical case is that consumers will want uncompressed * data, and when that happens a new data buffer is allocated where the data is * decompressed for them to use. Currently the only consumer who wants * compressed arc_buf_t's is "zfs send", when it streams data exactly as it * exists on disk. When this happens, the arc_buf_t's data buffer is shared * with the arc_buf_hdr_t. * * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The * first one is owned by a compressed send consumer (and therefore references * the same compressed data buffer as the arc_buf_hdr_t) and the second could be * used by any other consumer (and has its own uncompressed copy of the data * buffer). * * arc_buf_hdr_t * +-----------+ * | fields | * | common to | * | L1- and | * | L2ARC | * +-----------+ * | l2arc_buf_hdr_t * | | * +-----------+ * | l1arc_buf_hdr_t * | | arc_buf_t * | b_buf +------------>+-----------+ arc_buf_t * | b_pabd +-+ |b_next +---->+-----------+ * +-----------+ | |-----------| |b_next +-->NULL * | |b_comp = T | +-----------+ * | |b_data +-+ |b_comp = F | * | +-----------+ | |b_data +-+ * +->+------+ | +-----------+ | * compressed | | | | * data | |<--------------+ | uncompressed * +------+ compressed, | data * shared +-->+------+ * data | | * | | * +------+ * * When a consumer reads a block, the ARC must first look to see if the * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new * arc_buf_t and either copies uncompressed data into a new data buffer from an * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the * hdr is compressed and the desired compression characteristics of the * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be * the last buffer in the hdr's b_buf list, however a shared compressed buf can * be anywhere in the hdr's list. * * The diagram below shows an example of an uncompressed ARC hdr that is * sharing its data with an arc_buf_t (note that the shared uncompressed buf is * the last element in the buf list): * * arc_buf_hdr_t * +-----------+ * | | * | | * | | * +-----------+ * l2arc_buf_hdr_t| | * | | * +-----------+ * l1arc_buf_hdr_t| | * | | arc_buf_t (shared) * | b_buf +------------>+---------+ arc_buf_t * | | |b_next +---->+---------+ * | b_pabd +-+ |---------| |b_next +-->NULL * +-----------+ | | | +---------+ * | |b_data +-+ | | * | +---------+ | |b_data +-+ * +->+------+ | +---------+ | * | | | | * uncompressed | | | | * data +------+ | | * ^ +->+------+ | * | uncompressed | | | * | data | | | * | +------+ | * +---------------------------------+ * * Writing to the ARC requires that the ARC first discard the hdr's b_pabd * since the physical block is about to be rewritten. The new data contents * will be contained in the arc_buf_t. As the I/O pipeline performs the write, * it may compress the data before writing it to disk. The ARC will be called * with the transformed data and will bcopy the transformed on-disk block into * a newly allocated b_pabd. Writes are always done into buffers which have * either been loaned (and hence are new and don't have other readers) or * buffers which have been released (and hence have their own hdr, if there * were originally other readers of the buf's original hdr). This ensures that * the ARC only needs to update a single buf and its hdr after a write occurs. * * When the L2ARC is in use, it will also take advantage of the b_pabd. The * L2ARC will always write the contents of b_pabd to the L2ARC. This means * that when compressed ARC is enabled that the L2ARC blocks are identical * to the on-disk block in the main data pool. This provides a significant * advantage since the ARC can leverage the bp's checksum when reading from the * L2ARC to determine if the contents are valid. However, if the compressed * ARC is disabled, then the L2ARC's block must be transformed to look * like the physical block in the main data pool before comparing the * checksum and determining its validity. * * The L1ARC has a slightly different system for storing encrypted data. * Raw (encrypted + possibly compressed) data has a few subtle differences from * data that is just compressed. The biggest difference is that it is not * possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded. * The other difference is that encryption cannot be treated as a suggestion. * If a caller would prefer compressed data, but they actually wind up with * uncompressed data the worst thing that could happen is there might be a * performance hit. If the caller requests encrypted data, however, we must be * sure they actually get it or else secret information could be leaked. Raw * data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore, * may have both an encrypted version and a decrypted version of its data at * once. When a caller needs a raw arc_buf_t, it is allocated and the data is * copied out of this header. To avoid complications with b_pabd, raw buffers * cannot be shared. */ #include <sys/spa.h> #include <sys/zio.h> #include <sys/spa_impl.h> #include <sys/zio_compress.h> #include <sys/zio_checksum.h> #include <sys/zfs_context.h> #include <sys/arc.h> #include <sys/zfs_refcount.h> #include <sys/vdev.h> #include <sys/vdev_impl.h> #include <sys/dsl_pool.h> #include <sys/multilist.h> #include <sys/abd.h> #include <sys/zil.h> #include <sys/fm/fs/zfs.h> #include <sys/callb.h> #include <sys/kstat.h> #include <sys/zthr.h> #include <zfs_fletcher.h> #include <sys/arc_impl.h> #include <sys/trace_zfs.h> #include <sys/aggsum.h> #include <sys/wmsum.h> #include <cityhash.h> #include <sys/vdev_trim.h> #include <sys/zfs_racct.h> #include <sys/zstd/zstd.h> #ifndef _KERNEL /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ boolean_t arc_watch = B_FALSE; #endif /* * This thread's job is to keep enough free memory in the system, by * calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves * arc_available_memory(). */ static zthr_t *arc_reap_zthr; /* * This thread's job is to keep arc_size under arc_c, by calling * arc_evict(), which improves arc_is_overflowing(). */ static zthr_t *arc_evict_zthr; static kmutex_t arc_evict_lock; static boolean_t arc_evict_needed = B_FALSE; /* * Count of bytes evicted since boot. */ static uint64_t arc_evict_count; /* * List of arc_evict_waiter_t's, representing threads waiting for the * arc_evict_count to reach specific values. */ static list_t arc_evict_waiters; /* * When arc_is_overflowing(), arc_get_data_impl() waits for this percent of * the requested amount of data to be evicted. For example, by default for * every 2KB that's evicted, 1KB of it may be "reused" by a new allocation. * Since this is above 100%, it ensures that progress is made towards getting * arc_size under arc_c. Since this is finite, it ensures that allocations * can still happen, even during the potentially long time that arc_size is * more than arc_c. */ int zfs_arc_eviction_pct = 200; /* * The number of headers to evict in arc_evict_state_impl() before * dropping the sublist lock and evicting from another sublist. A lower * value means we're more likely to evict the "correct" header (i.e. the * oldest header in the arc state), but comes with higher overhead * (i.e. more invocations of arc_evict_state_impl()). */ int zfs_arc_evict_batch_limit = 10; /* number of seconds before growing cache again */ int arc_grow_retry = 5; /* * Minimum time between calls to arc_kmem_reap_soon(). */ int arc_kmem_cache_reap_retry_ms = 1000; /* shift of arc_c for calculating overflow limit in arc_get_data_impl */ int zfs_arc_overflow_shift = 8; /* shift of arc_c for calculating both min and max arc_p */ int arc_p_min_shift = 4; /* log2(fraction of arc to reclaim) */ int arc_shrink_shift = 7; /* percent of pagecache to reclaim arc to */ #ifdef _KERNEL uint_t zfs_arc_pc_percent = 0; #endif /* * log2(fraction of ARC which must be free to allow growing). * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, * when reading a new block into the ARC, we will evict an equal-sized block * from the ARC. * * This must be less than arc_shrink_shift, so that when we shrink the ARC, * we will still not allow it to grow. */ int arc_no_grow_shift = 5; /* * minimum lifespan of a prefetch block in clock ticks * (initialized in arc_init()) */ static int arc_min_prefetch_ms; static int arc_min_prescient_prefetch_ms; /* * If this percent of memory is free, don't throttle. */ int arc_lotsfree_percent = 10; /* * The arc has filled available memory and has now warmed up. */ boolean_t arc_warm; /* * These tunables are for performance analysis. */ unsigned long zfs_arc_max = 0; unsigned long zfs_arc_min = 0; unsigned long zfs_arc_meta_limit = 0; unsigned long zfs_arc_meta_min = 0; unsigned long zfs_arc_dnode_limit = 0; unsigned long zfs_arc_dnode_reduce_percent = 10; int zfs_arc_grow_retry = 0; int zfs_arc_shrink_shift = 0; int zfs_arc_p_min_shift = 0; int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ /* * ARC dirty data constraints for arc_tempreserve_space() throttle. */ unsigned long zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */ unsigned long zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */ unsigned long zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */ /* * Enable or disable compressed arc buffers. */ int zfs_compressed_arc_enabled = B_TRUE; /* * ARC will evict meta buffers that exceed arc_meta_limit. This * tunable make arc_meta_limit adjustable for different workloads. */ unsigned long zfs_arc_meta_limit_percent = 75; /* * Percentage that can be consumed by dnodes of ARC meta buffers. */ unsigned long zfs_arc_dnode_limit_percent = 10; /* * These tunables are Linux specific */ unsigned long zfs_arc_sys_free = 0; int zfs_arc_min_prefetch_ms = 0; int zfs_arc_min_prescient_prefetch_ms = 0; int zfs_arc_p_dampener_disable = 1; int zfs_arc_meta_prune = 10000; int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED; int zfs_arc_meta_adjust_restarts = 4096; int zfs_arc_lotsfree_percent = 10; /* The 6 states: */ arc_state_t ARC_anon; arc_state_t ARC_mru; arc_state_t ARC_mru_ghost; arc_state_t ARC_mfu; arc_state_t ARC_mfu_ghost; arc_state_t ARC_l2c_only; arc_stats_t arc_stats = { { "hits", KSTAT_DATA_UINT64 }, { "misses", KSTAT_DATA_UINT64 }, { "demand_data_hits", KSTAT_DATA_UINT64 }, { "demand_data_misses", KSTAT_DATA_UINT64 }, { "demand_metadata_hits", KSTAT_DATA_UINT64 }, { "demand_metadata_misses", KSTAT_DATA_UINT64 }, { "prefetch_data_hits", KSTAT_DATA_UINT64 }, { "prefetch_data_misses", KSTAT_DATA_UINT64 }, { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, { "mru_hits", KSTAT_DATA_UINT64 }, { "mru_ghost_hits", KSTAT_DATA_UINT64 }, { "mfu_hits", KSTAT_DATA_UINT64 }, { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, { "deleted", KSTAT_DATA_UINT64 }, { "mutex_miss", KSTAT_DATA_UINT64 }, { "access_skip", KSTAT_DATA_UINT64 }, { "evict_skip", KSTAT_DATA_UINT64 }, { "evict_not_enough", KSTAT_DATA_UINT64 }, { "evict_l2_cached", KSTAT_DATA_UINT64 }, { "evict_l2_eligible", KSTAT_DATA_UINT64 }, { "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 }, { "evict_l2_eligible_mru", KSTAT_DATA_UINT64 }, { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, { "evict_l2_skip", KSTAT_DATA_UINT64 }, { "hash_elements", KSTAT_DATA_UINT64 }, { "hash_elements_max", KSTAT_DATA_UINT64 }, { "hash_collisions", KSTAT_DATA_UINT64 }, { "hash_chains", KSTAT_DATA_UINT64 }, { "hash_chain_max", KSTAT_DATA_UINT64 }, { "p", KSTAT_DATA_UINT64 }, { "c", KSTAT_DATA_UINT64 }, { "c_min", KSTAT_DATA_UINT64 }, { "c_max", KSTAT_DATA_UINT64 }, { "size", KSTAT_DATA_UINT64 }, { "compressed_size", KSTAT_DATA_UINT64 }, { "uncompressed_size", KSTAT_DATA_UINT64 }, { "overhead_size", KSTAT_DATA_UINT64 }, { "hdr_size", KSTAT_DATA_UINT64 }, { "data_size", KSTAT_DATA_UINT64 }, { "metadata_size", KSTAT_DATA_UINT64 }, { "dbuf_size", KSTAT_DATA_UINT64 }, { "dnode_size", KSTAT_DATA_UINT64 }, { "bonus_size", KSTAT_DATA_UINT64 }, #if defined(COMPAT_FREEBSD11) { "other_size", KSTAT_DATA_UINT64 }, #endif { "anon_size", KSTAT_DATA_UINT64 }, { "anon_evictable_data", KSTAT_DATA_UINT64 }, { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, { "mru_size", KSTAT_DATA_UINT64 }, { "mru_evictable_data", KSTAT_DATA_UINT64 }, { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, { "mru_ghost_size", KSTAT_DATA_UINT64 }, { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, { "mfu_size", KSTAT_DATA_UINT64 }, { "mfu_evictable_data", KSTAT_DATA_UINT64 }, { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, { "mfu_ghost_size", KSTAT_DATA_UINT64 }, { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, { "l2_hits", KSTAT_DATA_UINT64 }, { "l2_misses", KSTAT_DATA_UINT64 }, { "l2_prefetch_asize", KSTAT_DATA_UINT64 }, { "l2_mru_asize", KSTAT_DATA_UINT64 }, { "l2_mfu_asize", KSTAT_DATA_UINT64 }, { "l2_bufc_data_asize", KSTAT_DATA_UINT64 }, { "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 }, { "l2_feeds", KSTAT_DATA_UINT64 }, { "l2_rw_clash", KSTAT_DATA_UINT64 }, { "l2_read_bytes", KSTAT_DATA_UINT64 }, { "l2_write_bytes", KSTAT_DATA_UINT64 }, { "l2_writes_sent", KSTAT_DATA_UINT64 }, { "l2_writes_done", KSTAT_DATA_UINT64 }, { "l2_writes_error", KSTAT_DATA_UINT64 }, { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, { "l2_evict_reading", KSTAT_DATA_UINT64 }, { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, { "l2_free_on_write", KSTAT_DATA_UINT64 }, { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, { "l2_cksum_bad", KSTAT_DATA_UINT64 }, { "l2_io_error", KSTAT_DATA_UINT64 }, { "l2_size", KSTAT_DATA_UINT64 }, { "l2_asize", KSTAT_DATA_UINT64 }, { "l2_hdr_size", KSTAT_DATA_UINT64 }, { "l2_log_blk_writes", KSTAT_DATA_UINT64 }, { "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 }, { "l2_log_blk_asize", KSTAT_DATA_UINT64 }, { "l2_log_blk_count", KSTAT_DATA_UINT64 }, { "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 }, { "l2_rebuild_success", KSTAT_DATA_UINT64 }, { "l2_rebuild_unsupported", KSTAT_DATA_UINT64 }, { "l2_rebuild_io_errors", KSTAT_DATA_UINT64 }, { "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 }, { "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 }, { "l2_rebuild_lowmem", KSTAT_DATA_UINT64 }, { "l2_rebuild_size", KSTAT_DATA_UINT64 }, { "l2_rebuild_asize", KSTAT_DATA_UINT64 }, { "l2_rebuild_bufs", KSTAT_DATA_UINT64 }, { "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 }, { "l2_rebuild_log_blks", KSTAT_DATA_UINT64 }, { "memory_throttle_count", KSTAT_DATA_UINT64 }, { "memory_direct_count", KSTAT_DATA_UINT64 }, { "memory_indirect_count", KSTAT_DATA_UINT64 }, { "memory_all_bytes", KSTAT_DATA_UINT64 }, { "memory_free_bytes", KSTAT_DATA_UINT64 }, { "memory_available_bytes", KSTAT_DATA_INT64 }, { "arc_no_grow", KSTAT_DATA_UINT64 }, { "arc_tempreserve", KSTAT_DATA_UINT64 }, { "arc_loaned_bytes", KSTAT_DATA_UINT64 }, { "arc_prune", KSTAT_DATA_UINT64 }, { "arc_meta_used", KSTAT_DATA_UINT64 }, { "arc_meta_limit", KSTAT_DATA_UINT64 }, { "arc_dnode_limit", KSTAT_DATA_UINT64 }, { "arc_meta_max", KSTAT_DATA_UINT64 }, { "arc_meta_min", KSTAT_DATA_UINT64 }, { "async_upgrade_sync", KSTAT_DATA_UINT64 }, { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 }, { "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 }, { "arc_need_free", KSTAT_DATA_UINT64 }, { "arc_sys_free", KSTAT_DATA_UINT64 }, { "arc_raw_size", KSTAT_DATA_UINT64 }, { "cached_only_in_progress", KSTAT_DATA_UINT64 }, { "abd_chunk_waste_size", KSTAT_DATA_UINT64 }, }; arc_sums_t arc_sums; #define ARCSTAT_MAX(stat, val) { \ uint64_t m; \ while ((val) > (m = arc_stats.stat.value.ui64) && \ (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ continue; \ } /* * We define a macro to allow ARC hits/misses to be easily broken down by * two separate conditions, giving a total of four different subtypes for * each of hits and misses (so eight statistics total). */ #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ if (cond1) { \ if (cond2) { \ ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ } else { \ ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ } \ } else { \ if (cond2) { \ ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ } else { \ ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ } \ } /* * This macro allows us to use kstats as floating averages. Each time we * update this kstat, we first factor it and the update value by * ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall * average. This macro assumes that integer loads and stores are atomic, but * is not safe for multiple writers updating the kstat in parallel (only the * last writer's update will remain). */ #define ARCSTAT_F_AVG_FACTOR 3 #define ARCSTAT_F_AVG(stat, value) \ do { \ uint64_t x = ARCSTAT(stat); \ x = x - x / ARCSTAT_F_AVG_FACTOR + \ (value) / ARCSTAT_F_AVG_FACTOR; \ ARCSTAT(stat) = x; \ _NOTE(CONSTCOND) \ } while (0) kstat_t *arc_ksp; /* * There are several ARC variables that are critical to export as kstats -- * but we don't want to have to grovel around in the kstat whenever we wish to * manipulate them. For these variables, we therefore define them to be in * terms of the statistic variable. This assures that we are not introducing * the possibility of inconsistency by having shadow copies of the variables, * while still allowing the code to be readable. */ #define arc_tempreserve ARCSTAT(arcstat_tempreserve) #define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes) #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ /* max size for dnodes */ #define arc_dnode_size_limit ARCSTAT(arcstat_dnode_limit) #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ #define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */ hrtime_t arc_growtime; list_t arc_prune_list; kmutex_t arc_prune_mtx; taskq_t *arc_prune_taskq; #define GHOST_STATE(state) \ ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ (state) == arc_l2c_only) #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) #define HDR_PRESCIENT_PREFETCH(hdr) \ ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) #define HDR_COMPRESSION_ENABLED(hdr) \ ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC) #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) #define HDR_L2_READING(hdr) \ (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) #define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED) #define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH) #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA) #define HDR_ISTYPE_METADATA(hdr) \ ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) #define HDR_HAS_RABD(hdr) \ (HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \ (hdr)->b_crypt_hdr.b_rabd != NULL) #define HDR_ENCRYPTED(hdr) \ (HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot)) #define HDR_AUTHENTICATED(hdr) \ (HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot)) /* For storing compression mode in b_flags */ #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1) #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \ HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS)) #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \ HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp)); #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL) #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED) #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED) #define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED) /* * Other sizes */ #define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) #define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr)) #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) /* * Hash table routines */ #define BUF_LOCKS 2048 typedef struct buf_hash_table { uint64_t ht_mask; arc_buf_hdr_t **ht_table; kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned; } buf_hash_table_t; static buf_hash_table_t buf_hash_table; #define BUF_HASH_INDEX(spa, dva, birth) \ (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) #define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) #define HDR_LOCK(hdr) \ (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) uint64_t zfs_crc64_table[256]; /* * Level 2 ARC */ #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ #define L2ARC_HEADROOM 2 /* num of writes */ /* * If we discover during ARC scan any buffers to be compressed, we boost * our headroom for the next scanning cycle by this percentage multiple. */ #define L2ARC_HEADROOM_BOOST 200 #define L2ARC_FEED_SECS 1 /* caching interval secs */ #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ /* * We can feed L2ARC from two states of ARC buffers, mru and mfu, * and each of the state has two types: data and metadata. */ #define L2ARC_FEED_TYPES 4 /* L2ARC Performance Tunables */ unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */ unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */ unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */ unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */ int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ int l2arc_feed_again = B_TRUE; /* turbo warmup */ int l2arc_norw = B_FALSE; /* no reads during writes */ int l2arc_meta_percent = 33; /* limit on headers size */ /* * L2ARC Internals */ static list_t L2ARC_dev_list; /* device list */ static list_t *l2arc_dev_list; /* device list pointer */ static kmutex_t l2arc_dev_mtx; /* device list mutex */ static l2arc_dev_t *l2arc_dev_last; /* last device used */ static list_t L2ARC_free_on_write; /* free after write buf list */ static list_t *l2arc_free_on_write; /* free after write list ptr */ static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ static uint64_t l2arc_ndev; /* number of devices */ typedef struct l2arc_read_callback { arc_buf_hdr_t *l2rcb_hdr; /* read header */ blkptr_t l2rcb_bp; /* original blkptr */ zbookmark_phys_t l2rcb_zb; /* original bookmark */ int l2rcb_flags; /* original flags */ abd_t *l2rcb_abd; /* temporary buffer */ } l2arc_read_callback_t; typedef struct l2arc_data_free { /* protected by l2arc_free_on_write_mtx */ abd_t *l2df_abd; size_t l2df_size; arc_buf_contents_t l2df_type; list_node_t l2df_list_node; } l2arc_data_free_t; typedef enum arc_fill_flags { ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */ ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */ ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */ ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */ ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */ } arc_fill_flags_t; typedef enum arc_ovf_level { ARC_OVF_NONE, /* ARC within target size. */ ARC_OVF_SOME, /* ARC is slightly overflowed. */ ARC_OVF_SEVERE /* ARC is severely overflowed. */ } arc_ovf_level_t; static kmutex_t l2arc_feed_thr_lock; static kcondvar_t l2arc_feed_thr_cv; static uint8_t l2arc_thread_exit; static kmutex_t l2arc_rebuild_thr_lock; static kcondvar_t l2arc_rebuild_thr_cv; enum arc_hdr_alloc_flags { ARC_HDR_ALLOC_RDATA = 0x1, ARC_HDR_DO_ADAPT = 0x2, ARC_HDR_USE_RESERVE = 0x4, }; static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *, int); static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *); static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *, int); static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *); static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *); static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag); static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t); static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int); static void arc_access(arc_buf_hdr_t *, kmutex_t *); static void arc_buf_watch(arc_buf_t *); static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); static uint32_t arc_bufc_to_flags(arc_buf_contents_t); static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); static void l2arc_read_done(zio_t *); static void l2arc_do_free_on_write(void); static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr, boolean_t state_only); #define l2arc_hdr_arcstats_increment(hdr) \ l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE) #define l2arc_hdr_arcstats_decrement(hdr) \ l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE) #define l2arc_hdr_arcstats_increment_state(hdr) \ l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE) #define l2arc_hdr_arcstats_decrement_state(hdr) \ l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE) /* * l2arc_mfuonly : A ZFS module parameter that controls whether only MFU * metadata and data are cached from ARC into L2ARC. */ int l2arc_mfuonly = 0; /* * L2ARC TRIM * l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of * the current write size (l2arc_write_max) we should TRIM if we * have filled the device. It is defined as a percentage of the * write size. If set to 100 we trim twice the space required to * accommodate upcoming writes. A minimum of 64MB will be trimmed. * It also enables TRIM of the whole L2ARC device upon creation or * addition to an existing pool or if the header of the device is * invalid upon importing a pool or onlining a cache device. The * default is 0, which disables TRIM on L2ARC altogether as it can * put significant stress on the underlying storage devices. This * will vary depending of how well the specific device handles * these commands. */ unsigned long l2arc_trim_ahead = 0; /* * Performance tuning of L2ARC persistence: * * l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding * an L2ARC device (either at pool import or later) will attempt * to rebuild L2ARC buffer contents. * l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls * whether log blocks are written to the L2ARC device. If the L2ARC * device is less than 1GB, the amount of data l2arc_evict() * evicts is significant compared to the amount of restored L2ARC * data. In this case do not write log blocks in L2ARC in order * not to waste space. */ int l2arc_rebuild_enabled = B_TRUE; unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024; /* L2ARC persistence rebuild control routines. */ void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen); static void l2arc_dev_rebuild_thread(void *arg); static int l2arc_rebuild(l2arc_dev_t *dev); /* L2ARC persistence read I/O routines. */ static int l2arc_dev_hdr_read(l2arc_dev_t *dev); static int l2arc_log_blk_read(l2arc_dev_t *dev, const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp, l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb, zio_t *this_io, zio_t **next_io); static zio_t *l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb); static void l2arc_log_blk_fetch_abort(zio_t *zio); /* L2ARC persistence block restoration routines. */ static void l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb, uint64_t lb_asize); static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev); /* L2ARC persistence write I/O routines. */ static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb); /* L2ARC persistence auxiliary routines. */ boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp); static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *ab); boolean_t l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check); static void l2arc_blk_fetch_done(zio_t *zio); static inline uint64_t l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev); /* * We use Cityhash for this. It's fast, and has good hash properties without * requiring any large static buffers. */ static uint64_t buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) { return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth)); } #define HDR_EMPTY(hdr) \ ((hdr)->b_dva.dva_word[0] == 0 && \ (hdr)->b_dva.dva_word[1] == 0) #define HDR_EMPTY_OR_LOCKED(hdr) \ (HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr))) #define HDR_EQUAL(spa, dva, birth, hdr) \ ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa) static void buf_discard_identity(arc_buf_hdr_t *hdr) { hdr->b_dva.dva_word[0] = 0; hdr->b_dva.dva_word[1] = 0; hdr->b_birth = 0; } static arc_buf_hdr_t * buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) { const dva_t *dva = BP_IDENTITY(bp); uint64_t birth = BP_PHYSICAL_BIRTH(bp); uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); kmutex_t *hash_lock = BUF_HASH_LOCK(idx); arc_buf_hdr_t *hdr; mutex_enter(hash_lock); for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; hdr = hdr->b_hash_next) { if (HDR_EQUAL(spa, dva, birth, hdr)) { *lockp = hash_lock; return (hdr); } } mutex_exit(hash_lock); *lockp = NULL; return (NULL); } /* * Insert an entry into the hash table. If there is already an element * equal to elem in the hash table, then the already existing element * will be returned and the new element will not be inserted. * Otherwise returns NULL. * If lockp == NULL, the caller is assumed to already hold the hash lock. */ static arc_buf_hdr_t * buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) { uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); kmutex_t *hash_lock = BUF_HASH_LOCK(idx); arc_buf_hdr_t *fhdr; uint32_t i; ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); ASSERT(hdr->b_birth != 0); ASSERT(!HDR_IN_HASH_TABLE(hdr)); if (lockp != NULL) { *lockp = hash_lock; mutex_enter(hash_lock); } else { ASSERT(MUTEX_HELD(hash_lock)); } for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; fhdr = fhdr->b_hash_next, i++) { if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) return (fhdr); } hdr->b_hash_next = buf_hash_table.ht_table[idx]; buf_hash_table.ht_table[idx] = hdr; arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); /* collect some hash table performance data */ if (i > 0) { ARCSTAT_BUMP(arcstat_hash_collisions); if (i == 1) ARCSTAT_BUMP(arcstat_hash_chains); ARCSTAT_MAX(arcstat_hash_chain_max, i); } uint64_t he = atomic_inc_64_nv( &arc_stats.arcstat_hash_elements.value.ui64); ARCSTAT_MAX(arcstat_hash_elements_max, he); return (NULL); } static void buf_hash_remove(arc_buf_hdr_t *hdr) { arc_buf_hdr_t *fhdr, **hdrp; uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); ASSERT(HDR_IN_HASH_TABLE(hdr)); hdrp = &buf_hash_table.ht_table[idx]; while ((fhdr = *hdrp) != hdr) { ASSERT3P(fhdr, !=, NULL); hdrp = &fhdr->b_hash_next; } *hdrp = hdr->b_hash_next; hdr->b_hash_next = NULL; arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE); /* collect some hash table performance data */ atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64); if (buf_hash_table.ht_table[idx] && buf_hash_table.ht_table[idx]->b_hash_next == NULL) ARCSTAT_BUMPDOWN(arcstat_hash_chains); } /* * Global data structures and functions for the buf kmem cache. */ static kmem_cache_t *hdr_full_cache; static kmem_cache_t *hdr_full_crypt_cache; static kmem_cache_t *hdr_l2only_cache; static kmem_cache_t *buf_cache; static void buf_fini(void) { int i; #if defined(_KERNEL) /* * Large allocations which do not require contiguous pages * should be using vmem_free() in the linux kernel\ */ vmem_free(buf_hash_table.ht_table, (buf_hash_table.ht_mask + 1) * sizeof (void *)); #else kmem_free(buf_hash_table.ht_table, (buf_hash_table.ht_mask + 1) * sizeof (void *)); #endif for (i = 0; i < BUF_LOCKS; i++) mutex_destroy(BUF_HASH_LOCK(i)); kmem_cache_destroy(hdr_full_cache); kmem_cache_destroy(hdr_full_crypt_cache); kmem_cache_destroy(hdr_l2only_cache); kmem_cache_destroy(buf_cache); } /* * Constructor callback - called when the cache is empty * and a new buf is requested. */ /* ARGSUSED */ static int hdr_full_cons(void *vbuf, void *unused, int kmflag) { arc_buf_hdr_t *hdr = vbuf; bzero(hdr, HDR_FULL_SIZE); hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); zfs_refcount_create(&hdr->b_l1hdr.b_refcnt); mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); list_link_init(&hdr->b_l1hdr.b_arc_node); list_link_init(&hdr->b_l2hdr.b_l2node); multilist_link_init(&hdr->b_l1hdr.b_arc_node); arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); return (0); } /* ARGSUSED */ static int hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag) { arc_buf_hdr_t *hdr = vbuf; hdr_full_cons(vbuf, unused, kmflag); bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr)); arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS); return (0); } /* ARGSUSED */ static int hdr_l2only_cons(void *vbuf, void *unused, int kmflag) { arc_buf_hdr_t *hdr = vbuf; bzero(hdr, HDR_L2ONLY_SIZE); arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); return (0); } /* ARGSUSED */ static int buf_cons(void *vbuf, void *unused, int kmflag) { arc_buf_t *buf = vbuf; bzero(buf, sizeof (arc_buf_t)); mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); return (0); } /* * Destructor callback - called when a cached buf is * no longer required. */ /* ARGSUSED */ static void hdr_full_dest(void *vbuf, void *unused) { arc_buf_hdr_t *hdr = vbuf; ASSERT(HDR_EMPTY(hdr)); cv_destroy(&hdr->b_l1hdr.b_cv); zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt); mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); } /* ARGSUSED */ static void hdr_full_crypt_dest(void *vbuf, void *unused) { arc_buf_hdr_t *hdr = vbuf; hdr_full_dest(vbuf, unused); arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS); } /* ARGSUSED */ static void hdr_l2only_dest(void *vbuf, void *unused) { arc_buf_hdr_t *hdr __maybe_unused = vbuf; ASSERT(HDR_EMPTY(hdr)); arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); } /* ARGSUSED */ static void buf_dest(void *vbuf, void *unused) { arc_buf_t *buf = vbuf; mutex_destroy(&buf->b_evict_lock); arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); } static void buf_init(void) { uint64_t *ct = NULL; uint64_t hsize = 1ULL << 12; int i, j; /* * The hash table is big enough to fill all of physical memory * with an average block size of zfs_arc_average_blocksize (default 8K). * By default, the table will take up * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). */ while (hsize * zfs_arc_average_blocksize < arc_all_memory()) hsize <<= 1; retry: buf_hash_table.ht_mask = hsize - 1; #if defined(_KERNEL) /* * Large allocations which do not require contiguous pages * should be using vmem_alloc() in the linux kernel */ buf_hash_table.ht_table = vmem_zalloc(hsize * sizeof (void*), KM_SLEEP); #else buf_hash_table.ht_table = kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); #endif if (buf_hash_table.ht_table == NULL) { ASSERT(hsize > (1ULL << 8)); hsize >>= 1; goto retry; } hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, 0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0); hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt", HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest, NULL, NULL, NULL, 0); hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL, NULL, NULL, 0); buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); for (i = 0; i < 256; i++) for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); for (i = 0; i < BUF_LOCKS; i++) mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL); } #define ARC_MINTIME (hz>>4) /* 62 ms */ /* * This is the size that the buf occupies in memory. If the buf is compressed, * it will correspond to the compressed size. You should use this method of * getting the buf size unless you explicitly need the logical size. */ uint64_t arc_buf_size(arc_buf_t *buf) { return (ARC_BUF_COMPRESSED(buf) ? HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr)); } uint64_t arc_buf_lsize(arc_buf_t *buf) { return (HDR_GET_LSIZE(buf->b_hdr)); } /* * This function will return B_TRUE if the buffer is encrypted in memory. * This buffer can be decrypted by calling arc_untransform(). */ boolean_t arc_is_encrypted(arc_buf_t *buf) { return (ARC_BUF_ENCRYPTED(buf) != 0); } /* * Returns B_TRUE if the buffer represents data that has not had its MAC * verified yet. */ boolean_t arc_is_unauthenticated(arc_buf_t *buf) { return (HDR_NOAUTH(buf->b_hdr) != 0); } void arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt, uint8_t *iv, uint8_t *mac) { arc_buf_hdr_t *hdr = buf->b_hdr; ASSERT(HDR_PROTECTED(hdr)); bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN); bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN); bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN); *byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER; } /* * Indicates how this buffer is compressed in memory. If it is not compressed * the value will be ZIO_COMPRESS_OFF. It can be made normally readable with * arc_untransform() as long as it is also unencrypted. */ enum zio_compress arc_get_compression(arc_buf_t *buf) { return (ARC_BUF_COMPRESSED(buf) ? HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF); } /* * Return the compression algorithm used to store this data in the ARC. If ARC * compression is enabled or this is an encrypted block, this will be the same * as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF. */ static inline enum zio_compress arc_hdr_get_compress(arc_buf_hdr_t *hdr) { return (HDR_COMPRESSION_ENABLED(hdr) ? HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF); } uint8_t arc_get_complevel(arc_buf_t *buf) { return (buf->b_hdr->b_complevel); } static inline boolean_t arc_buf_is_shared(arc_buf_t *buf) { boolean_t shared = (buf->b_data != NULL && buf->b_hdr->b_l1hdr.b_pabd != NULL && abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) && buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd)); IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); IMPLY(shared, ARC_BUF_SHARED(buf)); IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf)); /* * It would be nice to assert arc_can_share() too, but the "hdr isn't * already being shared" requirement prevents us from doing that. */ return (shared); } /* * Free the checksum associated with this header. If there is no checksum, this * is a no-op. */ static inline void arc_cksum_free(arc_buf_hdr_t *hdr) { ASSERT(HDR_HAS_L1HDR(hdr)); mutex_enter(&hdr->b_l1hdr.b_freeze_lock); if (hdr->b_l1hdr.b_freeze_cksum != NULL) { kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); hdr->b_l1hdr.b_freeze_cksum = NULL; } mutex_exit(&hdr->b_l1hdr.b_freeze_lock); } /* * Return true iff at least one of the bufs on hdr is not compressed. * Encrypted buffers count as compressed. */ static boolean_t arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr) { ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr)); for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) { if (!ARC_BUF_COMPRESSED(b)) { return (B_TRUE); } } return (B_FALSE); } /* * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data * matches the checksum that is stored in the hdr. If there is no checksum, * or if the buf is compressed, this is a no-op. */ static void arc_cksum_verify(arc_buf_t *buf) { arc_buf_hdr_t *hdr = buf->b_hdr; zio_cksum_t zc; if (!(zfs_flags & ZFS_DEBUG_MODIFY)) return; if (ARC_BUF_COMPRESSED(buf)) return; ASSERT(HDR_HAS_L1HDR(hdr)); mutex_enter(&hdr->b_l1hdr.b_freeze_lock); if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { mutex_exit(&hdr->b_l1hdr.b_freeze_lock); return; } fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc); if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) panic("buffer modified while frozen!"); mutex_exit(&hdr->b_l1hdr.b_freeze_lock); } /* * This function makes the assumption that data stored in the L2ARC * will be transformed exactly as it is in the main pool. Because of * this we can verify the checksum against the reading process's bp. */ static boolean_t arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio) { ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); /* * Block pointers always store the checksum for the logical data. * If the block pointer has the gang bit set, then the checksum * it represents is for the reconstituted data and not for an * individual gang member. The zio pipeline, however, must be able to * determine the checksum of each of the gang constituents so it * treats the checksum comparison differently than what we need * for l2arc blocks. This prevents us from using the * zio_checksum_error() interface directly. Instead we must call the * zio_checksum_error_impl() so that we can ensure the checksum is * generated using the correct checksum algorithm and accounts for the * logical I/O size and not just a gang fragment. */ return (zio_checksum_error_impl(zio->io_spa, zio->io_bp, BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size, zio->io_offset, NULL) == 0); } /* * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a * checksum and attaches it to the buf's hdr so that we can ensure that the buf * isn't modified later on. If buf is compressed or there is already a checksum * on the hdr, this is a no-op (we only checksum uncompressed bufs). */ static void arc_cksum_compute(arc_buf_t *buf) { arc_buf_hdr_t *hdr = buf->b_hdr; if (!(zfs_flags & ZFS_DEBUG_MODIFY)) return; ASSERT(HDR_HAS_L1HDR(hdr)); mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) { mutex_exit(&hdr->b_l1hdr.b_freeze_lock); return; } ASSERT(!ARC_BUF_ENCRYPTED(buf)); ASSERT(!ARC_BUF_COMPRESSED(buf)); hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, hdr->b_l1hdr.b_freeze_cksum); mutex_exit(&hdr->b_l1hdr.b_freeze_lock); arc_buf_watch(buf); } #ifndef _KERNEL void arc_buf_sigsegv(int sig, siginfo_t *si, void *unused) { panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr); } #endif /* ARGSUSED */ static void arc_buf_unwatch(arc_buf_t *buf) { #ifndef _KERNEL if (arc_watch) { ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), PROT_READ | PROT_WRITE)); } #endif } /* ARGSUSED */ static void arc_buf_watch(arc_buf_t *buf) { #ifndef _KERNEL if (arc_watch) ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), PROT_READ)); #endif } static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *hdr) { arc_buf_contents_t type; if (HDR_ISTYPE_METADATA(hdr)) { type = ARC_BUFC_METADATA; } else { type = ARC_BUFC_DATA; } VERIFY3U(hdr->b_type, ==, type); return (type); } boolean_t arc_is_metadata(arc_buf_t *buf) { return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0); } static uint32_t arc_bufc_to_flags(arc_buf_contents_t type) { switch (type) { case ARC_BUFC_DATA: /* metadata field is 0 if buffer contains normal data */ return (0); case ARC_BUFC_METADATA: return (ARC_FLAG_BUFC_METADATA); default: break; } panic("undefined ARC buffer type!"); return ((uint32_t)-1); } void arc_buf_thaw(arc_buf_t *buf) { arc_buf_hdr_t *hdr = buf->b_hdr; ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); arc_cksum_verify(buf); /* * Compressed buffers do not manipulate the b_freeze_cksum. */ if (ARC_BUF_COMPRESSED(buf)) return; ASSERT(HDR_HAS_L1HDR(hdr)); arc_cksum_free(hdr); arc_buf_unwatch(buf); } void arc_buf_freeze(arc_buf_t *buf) { if (!(zfs_flags & ZFS_DEBUG_MODIFY)) return; if (ARC_BUF_COMPRESSED(buf)) return; ASSERT(HDR_HAS_L1HDR(buf->b_hdr)); arc_cksum_compute(buf); } /* * The arc_buf_hdr_t's b_flags should never be modified directly. Instead, * the following functions should be used to ensure that the flags are * updated in a thread-safe way. When manipulating the flags either * the hash_lock must be held or the hdr must be undiscoverable. This * ensures that we're not racing with any other threads when updating * the flags. */ static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) { ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); hdr->b_flags |= flags; } static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) { ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); hdr->b_flags &= ~flags; } /* * Setting the compression bits in the arc_buf_hdr_t's b_flags is * done in a special way since we have to clear and set bits * at the same time. Consumers that wish to set the compression bits * must use this function to ensure that the flags are updated in * thread-safe manner. */ static void arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp) { ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); /* * Holes and embedded blocks will always have a psize = 0 so * we ignore the compression of the blkptr and set the * want to uncompress them. Mark them as uncompressed. */ if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) { arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC); ASSERT(!HDR_COMPRESSION_ENABLED(hdr)); } else { arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC); ASSERT(HDR_COMPRESSION_ENABLED(hdr)); } HDR_SET_COMPRESS(hdr, cmp); ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp); } /* * Looks for another buf on the same hdr which has the data decompressed, copies * from it, and returns true. If no such buf exists, returns false. */ static boolean_t arc_buf_try_copy_decompressed_data(arc_buf_t *buf) { arc_buf_hdr_t *hdr = buf->b_hdr; boolean_t copied = B_FALSE; ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT3P(buf->b_data, !=, NULL); ASSERT(!ARC_BUF_COMPRESSED(buf)); for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL; from = from->b_next) { /* can't use our own data buffer */ if (from == buf) { continue; } if (!ARC_BUF_COMPRESSED(from)) { bcopy(from->b_data, buf->b_data, arc_buf_size(buf)); copied = B_TRUE; break; } } /* * There were no decompressed bufs, so there should not be a * checksum on the hdr either. */ if (zfs_flags & ZFS_DEBUG_MODIFY) EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL); return (copied); } /* * Allocates an ARC buf header that's in an evicted & L2-cached state. * This is used during l2arc reconstruction to make empty ARC buffers * which circumvent the regular disk->arc->l2arc path and instead come * into being in the reverse order, i.e. l2arc->arc. */ static arc_buf_hdr_t * arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev, dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth, enum zio_compress compress, uint8_t complevel, boolean_t protected, boolean_t prefetch, arc_state_type_t arcs_state) { arc_buf_hdr_t *hdr; ASSERT(size != 0); hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP); hdr->b_birth = birth; hdr->b_type = type; hdr->b_flags = 0; arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR); HDR_SET_LSIZE(hdr, size); HDR_SET_PSIZE(hdr, psize); arc_hdr_set_compress(hdr, compress); hdr->b_complevel = complevel; if (protected) arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED); if (prefetch) arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa); hdr->b_dva = dva; hdr->b_l2hdr.b_dev = dev; hdr->b_l2hdr.b_daddr = daddr; hdr->b_l2hdr.b_arcs_state = arcs_state; return (hdr); } /* * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t. */ static uint64_t arc_hdr_size(arc_buf_hdr_t *hdr) { uint64_t size; if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF && HDR_GET_PSIZE(hdr) > 0) { size = HDR_GET_PSIZE(hdr); } else { ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0); size = HDR_GET_LSIZE(hdr); } return (size); } static int arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj) { int ret; uint64_t csize; uint64_t lsize = HDR_GET_LSIZE(hdr); uint64_t psize = HDR_GET_PSIZE(hdr); void *tmpbuf = NULL; abd_t *abd = hdr->b_l1hdr.b_pabd; ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); ASSERT(HDR_AUTHENTICATED(hdr)); ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); /* * The MAC is calculated on the compressed data that is stored on disk. * However, if compressed arc is disabled we will only have the * decompressed data available to us now. Compress it into a temporary * abd so we can verify the MAC. The performance overhead of this will * be relatively low, since most objects in an encrypted objset will * be encrypted (instead of authenticated) anyway. */ if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) { tmpbuf = zio_buf_alloc(lsize); abd = abd_get_from_buf(tmpbuf, lsize); abd_take_ownership_of_buf(abd, B_TRUE); csize = zio_compress_data(HDR_GET_COMPRESS(hdr), hdr->b_l1hdr.b_pabd, tmpbuf, lsize, hdr->b_complevel); ASSERT3U(csize, <=, psize); abd_zero_off(abd, csize, psize - csize); } /* * Authentication is best effort. We authenticate whenever the key is * available. If we succeed we clear ARC_FLAG_NOAUTH. */ if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) { ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); ASSERT3U(lsize, ==, psize); ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd, psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); } else { ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize, hdr->b_crypt_hdr.b_mac); } if (ret == 0) arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH); else if (ret != ENOENT) goto error; if (tmpbuf != NULL) abd_free(abd); return (0); error: if (tmpbuf != NULL) abd_free(abd); return (ret); } /* * This function will take a header that only has raw encrypted data in * b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in * b_l1hdr.b_pabd. If designated in the header flags, this function will * also decompress the data. */ static int arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb) { int ret; abd_t *cabd = NULL; void *tmp = NULL; boolean_t no_crypt = B_FALSE; boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); ASSERT(HDR_ENCRYPTED(hdr)); arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT); ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot, B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv, hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd, &no_crypt); if (ret != 0) goto error; if (no_crypt) { abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd, HDR_GET_PSIZE(hdr)); } /* * If this header has disabled arc compression but the b_pabd is * compressed after decrypting it, we need to decompress the newly * decrypted data. */ if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) { /* * We want to make sure that we are correctly honoring the * zfs_abd_scatter_enabled setting, so we allocate an abd here * and then loan a buffer from it, rather than allocating a * linear buffer and wrapping it in an abd later. */ cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, ARC_HDR_DO_ADAPT); tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr)); ret = zio_decompress_data(HDR_GET_COMPRESS(hdr), hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr), &hdr->b_complevel); if (ret != 0) { abd_return_buf(cabd, tmp, arc_hdr_size(hdr)); goto error; } abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, arc_hdr_size(hdr), hdr); hdr->b_l1hdr.b_pabd = cabd; } return (0); error: arc_hdr_free_abd(hdr, B_FALSE); if (cabd != NULL) arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr); return (ret); } /* * This function is called during arc_buf_fill() to prepare the header's * abd plaintext pointer for use. This involves authenticated protected * data and decrypting encrypted data into the plaintext abd. */ static int arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa, const zbookmark_phys_t *zb, boolean_t noauth) { int ret; ASSERT(HDR_PROTECTED(hdr)); if (hash_lock != NULL) mutex_enter(hash_lock); if (HDR_NOAUTH(hdr) && !noauth) { /* * The caller requested authenticated data but our data has * not been authenticated yet. Verify the MAC now if we can. */ ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset); if (ret != 0) goto error; } else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) { /* * If we only have the encrypted version of the data, but the * unencrypted version was requested we take this opportunity * to store the decrypted version in the header for future use. */ ret = arc_hdr_decrypt(hdr, spa, zb); if (ret != 0) goto error; } ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); if (hash_lock != NULL) mutex_exit(hash_lock); return (0); error: if (hash_lock != NULL) mutex_exit(hash_lock); return (ret); } /* * This function is used by the dbuf code to decrypt bonus buffers in place. * The dbuf code itself doesn't have any locking for decrypting a shared dnode * block, so we use the hash lock here to protect against concurrent calls to * arc_buf_fill(). */ static void arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock) { arc_buf_hdr_t *hdr = buf->b_hdr; ASSERT(HDR_ENCRYPTED(hdr)); ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data, arc_buf_size(buf)); buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; hdr->b_crypt_hdr.b_ebufcnt -= 1; } /* * Given a buf that has a data buffer attached to it, this function will * efficiently fill the buf with data of the specified compression setting from * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr * are already sharing a data buf, no copy is performed. * * If the buf is marked as compressed but uncompressed data was requested, this * will allocate a new data buffer for the buf, remove that flag, and fill the * buf with uncompressed data. You can't request a compressed buf on a hdr with * uncompressed data, and (since we haven't added support for it yet) if you * want compressed data your buf must already be marked as compressed and have * the correct-sized data buffer. */ static int arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb, arc_fill_flags_t flags) { int error = 0; arc_buf_hdr_t *hdr = buf->b_hdr; boolean_t hdr_compressed = (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0; boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0; dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr); ASSERT3P(buf->b_data, !=, NULL); IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf)); IMPLY(compressed, ARC_BUF_COMPRESSED(buf)); IMPLY(encrypted, HDR_ENCRYPTED(hdr)); IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf)); IMPLY(encrypted, ARC_BUF_COMPRESSED(buf)); IMPLY(encrypted, !ARC_BUF_SHARED(buf)); /* * If the caller wanted encrypted data we just need to copy it from * b_rabd and potentially byteswap it. We won't be able to do any * further transforms on it. */ if (encrypted) { ASSERT(HDR_HAS_RABD(hdr)); abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd, HDR_GET_PSIZE(hdr)); goto byteswap; } /* * Adjust encrypted and authenticated headers to accommodate * the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are * allowed to fail decryption due to keys not being loaded * without being marked as an IO error. */ if (HDR_PROTECTED(hdr)) { error = arc_fill_hdr_crypt(hdr, hash_lock, spa, zb, !!(flags & ARC_FILL_NOAUTH)); if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) { return (error); } else if (error != 0) { if (hash_lock != NULL) mutex_enter(hash_lock); arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); if (hash_lock != NULL) mutex_exit(hash_lock); return (error); } } /* * There is a special case here for dnode blocks which are * decrypting their bonus buffers. These blocks may request to * be decrypted in-place. This is necessary because there may * be many dnodes pointing into this buffer and there is * currently no method to synchronize replacing the backing * b_data buffer and updating all of the pointers. Here we use * the hash lock to ensure there are no races. If the need * arises for other types to be decrypted in-place, they must * add handling here as well. */ if ((flags & ARC_FILL_IN_PLACE) != 0) { ASSERT(!hdr_compressed); ASSERT(!compressed); ASSERT(!encrypted); if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) { ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); if (hash_lock != NULL) mutex_enter(hash_lock); arc_buf_untransform_in_place(buf, hash_lock); if (hash_lock != NULL) mutex_exit(hash_lock); /* Compute the hdr's checksum if necessary */ arc_cksum_compute(buf); } return (0); } if (hdr_compressed == compressed) { if (!arc_buf_is_shared(buf)) { abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd, arc_buf_size(buf)); } } else { ASSERT(hdr_compressed); ASSERT(!compressed); /* * If the buf is sharing its data with the hdr, unlink it and * allocate a new data buffer for the buf. */ if (arc_buf_is_shared(buf)) { ASSERT(ARC_BUF_COMPRESSED(buf)); /* We need to give the buf its own b_data */ buf->b_flags &= ~ARC_BUF_FLAG_SHARED; buf->b_data = arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); /* Previously overhead was 0; just add new overhead */ ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); } else if (ARC_BUF_COMPRESSED(buf)) { /* We need to reallocate the buf's b_data */ arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr), buf); buf->b_data = arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); /* We increased the size of b_data; update overhead */ ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr)); } /* * Regardless of the buf's previous compression settings, it * should not be compressed at the end of this function. */ buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; /* * Try copying the data from another buf which already has a * decompressed version. If that's not possible, it's time to * bite the bullet and decompress the data from the hdr. */ if (arc_buf_try_copy_decompressed_data(buf)) { /* Skip byteswapping and checksumming (already done) */ return (0); } else { error = zio_decompress_data(HDR_GET_COMPRESS(hdr), hdr->b_l1hdr.b_pabd, buf->b_data, HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr), &hdr->b_complevel); /* * Absent hardware errors or software bugs, this should * be impossible, but log it anyway so we can debug it. */ if (error != 0) { zfs_dbgmsg( "hdr %px, compress %d, psize %d, lsize %d", hdr, arc_hdr_get_compress(hdr), HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); if (hash_lock != NULL) mutex_enter(hash_lock); arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); if (hash_lock != NULL) mutex_exit(hash_lock); return (SET_ERROR(EIO)); } } } byteswap: /* Byteswap the buf's data if necessary */ if (bswap != DMU_BSWAP_NUMFUNCS) { ASSERT(!HDR_SHARED_DATA(hdr)); ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS); dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); } /* Compute the hdr's checksum if necessary */ arc_cksum_compute(buf); return (0); } /* * If this function is being called to decrypt an encrypted buffer or verify an * authenticated one, the key must be loaded and a mapping must be made * available in the keystore via spa_keystore_create_mapping() or one of its * callers. */ int arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb, boolean_t in_place) { int ret; arc_fill_flags_t flags = 0; if (in_place) flags |= ARC_FILL_IN_PLACE; ret = arc_buf_fill(buf, spa, zb, flags); if (ret == ECKSUM) { /* * Convert authentication and decryption errors to EIO * (and generate an ereport) before leaving the ARC. */ ret = SET_ERROR(EIO); spa_log_error(spa, zb); (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, spa, NULL, zb, NULL, 0); } return (ret); } /* * Increment the amount of evictable space in the arc_state_t's refcount. * We account for the space used by the hdr and the arc buf individually * so that we can add and remove them from the refcount individually. */ static void arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state) { arc_buf_contents_t type = arc_buf_type(hdr); ASSERT(HDR_HAS_L1HDR(hdr)); if (GHOST_STATE(state)) { ASSERT0(hdr->b_l1hdr.b_bufcnt); ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); (void) zfs_refcount_add_many(&state->arcs_esize[type], HDR_GET_LSIZE(hdr), hdr); return; } if (hdr->b_l1hdr.b_pabd != NULL) { (void) zfs_refcount_add_many(&state->arcs_esize[type], arc_hdr_size(hdr), hdr); } if (HDR_HAS_RABD(hdr)) { (void) zfs_refcount_add_many(&state->arcs_esize[type], HDR_GET_PSIZE(hdr), hdr); } for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { if (arc_buf_is_shared(buf)) continue; (void) zfs_refcount_add_many(&state->arcs_esize[type], arc_buf_size(buf), buf); } } /* * Decrement the amount of evictable space in the arc_state_t's refcount. * We account for the space used by the hdr and the arc buf individually * so that we can add and remove them from the refcount individually. */ static void arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state) { arc_buf_contents_t type = arc_buf_type(hdr); ASSERT(HDR_HAS_L1HDR(hdr)); if (GHOST_STATE(state)) { ASSERT0(hdr->b_l1hdr.b_bufcnt); ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); (void) zfs_refcount_remove_many(&state->arcs_esize[type], HDR_GET_LSIZE(hdr), hdr); return; } if (hdr->b_l1hdr.b_pabd != NULL) { (void) zfs_refcount_remove_many(&state->arcs_esize[type], arc_hdr_size(hdr), hdr); } if (HDR_HAS_RABD(hdr)) { (void) zfs_refcount_remove_many(&state->arcs_esize[type], HDR_GET_PSIZE(hdr), hdr); } for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { if (arc_buf_is_shared(buf)) continue; (void) zfs_refcount_remove_many(&state->arcs_esize[type], arc_buf_size(buf), buf); } } /* * Add a reference to this hdr indicating that someone is actively * referencing that memory. When the refcount transitions from 0 to 1, * we remove it from the respective arc_state_t list to indicate that * it is not evictable. */ static void add_reference(arc_buf_hdr_t *hdr, void *tag) { arc_state_t *state; ASSERT(HDR_HAS_L1HDR(hdr)); if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) { ASSERT(hdr->b_l1hdr.b_state == arc_anon); ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); } state = hdr->b_l1hdr.b_state; if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && (state != arc_anon)) { /* We don't use the L2-only state list. */ if (state != arc_l2c_only) { multilist_remove(&state->arcs_list[arc_buf_type(hdr)], hdr); arc_evictable_space_decrement(hdr, state); } /* remove the prefetch flag if we get a reference */ if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_decrement_state(hdr); arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_increment_state(hdr); } } /* * Remove a reference from this hdr. When the reference transitions from * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's * list making it eligible for eviction. */ static int remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) { int cnt; arc_state_t *state = hdr->b_l1hdr.b_state; ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); ASSERT(!GHOST_STATE(state)); /* * arc_l2c_only counts as a ghost state so we don't need to explicitly * check to prevent usage of the arc_l2c_only list. */ if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && (state != arc_anon)) { multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr); ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); arc_evictable_space_increment(hdr, state); } return (cnt); } /* * Returns detailed information about a specific arc buffer. When the * state_index argument is set the function will calculate the arc header * list position for its arc state. Since this requires a linear traversal * callers are strongly encourage not to do this. However, it can be helpful * for targeted analysis so the functionality is provided. */ void arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index) { arc_buf_hdr_t *hdr = ab->b_hdr; l1arc_buf_hdr_t *l1hdr = NULL; l2arc_buf_hdr_t *l2hdr = NULL; arc_state_t *state = NULL; memset(abi, 0, sizeof (arc_buf_info_t)); if (hdr == NULL) return; abi->abi_flags = hdr->b_flags; if (HDR_HAS_L1HDR(hdr)) { l1hdr = &hdr->b_l1hdr; state = l1hdr->b_state; } if (HDR_HAS_L2HDR(hdr)) l2hdr = &hdr->b_l2hdr; if (l1hdr) { abi->abi_bufcnt = l1hdr->b_bufcnt; abi->abi_access = l1hdr->b_arc_access; abi->abi_mru_hits = l1hdr->b_mru_hits; abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits; abi->abi_mfu_hits = l1hdr->b_mfu_hits; abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits; abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt); } if (l2hdr) { abi->abi_l2arc_dattr = l2hdr->b_daddr; abi->abi_l2arc_hits = l2hdr->b_hits; } abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON; abi->abi_state_contents = arc_buf_type(hdr); abi->abi_size = arc_hdr_size(hdr); } /* * Move the supplied buffer to the indicated state. The hash lock * for the buffer must be held by the caller. */ static void arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, kmutex_t *hash_lock) { arc_state_t *old_state; int64_t refcnt; uint32_t bufcnt; boolean_t update_old, update_new; arc_buf_contents_t buftype = arc_buf_type(hdr); /* * We almost always have an L1 hdr here, since we call arc_hdr_realloc() * in arc_read() when bringing a buffer out of the L2ARC. However, the * L1 hdr doesn't always exist when we change state to arc_anon before * destroying a header, in which case reallocating to add the L1 hdr is * pointless. */ if (HDR_HAS_L1HDR(hdr)) { old_state = hdr->b_l1hdr.b_state; refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt); bufcnt = hdr->b_l1hdr.b_bufcnt; update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); } else { old_state = arc_l2c_only; refcnt = 0; bufcnt = 0; update_old = B_FALSE; } update_new = update_old; ASSERT(MUTEX_HELD(hash_lock)); ASSERT3P(new_state, !=, old_state); ASSERT(!GHOST_STATE(new_state) || bufcnt == 0); ASSERT(old_state != arc_anon || bufcnt <= 1); /* * If this buffer is evictable, transfer it from the * old state list to the new state list. */ if (refcnt == 0) { if (old_state != arc_anon && old_state != arc_l2c_only) { ASSERT(HDR_HAS_L1HDR(hdr)); multilist_remove(&old_state->arcs_list[buftype], hdr); if (GHOST_STATE(old_state)) { ASSERT0(bufcnt); ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); update_old = B_TRUE; } arc_evictable_space_decrement(hdr, old_state); } if (new_state != arc_anon && new_state != arc_l2c_only) { /* * An L1 header always exists here, since if we're * moving to some L1-cached state (i.e. not l2c_only or * anonymous), we realloc the header to add an L1hdr * beforehand. */ ASSERT(HDR_HAS_L1HDR(hdr)); multilist_insert(&new_state->arcs_list[buftype], hdr); if (GHOST_STATE(new_state)) { ASSERT0(bufcnt); ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); update_new = B_TRUE; } arc_evictable_space_increment(hdr, new_state); } } ASSERT(!HDR_EMPTY(hdr)); if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) buf_hash_remove(hdr); /* adjust state sizes (ignore arc_l2c_only) */ if (update_new && new_state != arc_l2c_only) { ASSERT(HDR_HAS_L1HDR(hdr)); if (GHOST_STATE(new_state)) { ASSERT0(bufcnt); /* * When moving a header to a ghost state, we first * remove all arc buffers. Thus, we'll have a * bufcnt of zero, and no arc buffer to use for * the reference. As a result, we use the arc * header pointer for the reference. */ (void) zfs_refcount_add_many(&new_state->arcs_size, HDR_GET_LSIZE(hdr), hdr); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); } else { uint32_t buffers = 0; /* * Each individual buffer holds a unique reference, * thus we must remove each of these references one * at a time. */ for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { ASSERT3U(bufcnt, !=, 0); buffers++; /* * When the arc_buf_t is sharing the data * block with the hdr, the owner of the * reference belongs to the hdr. Only * add to the refcount if the arc_buf_t is * not shared. */ if (arc_buf_is_shared(buf)) continue; (void) zfs_refcount_add_many( &new_state->arcs_size, arc_buf_size(buf), buf); } ASSERT3U(bufcnt, ==, buffers); if (hdr->b_l1hdr.b_pabd != NULL) { (void) zfs_refcount_add_many( &new_state->arcs_size, arc_hdr_size(hdr), hdr); } if (HDR_HAS_RABD(hdr)) { (void) zfs_refcount_add_many( &new_state->arcs_size, HDR_GET_PSIZE(hdr), hdr); } } } if (update_old && old_state != arc_l2c_only) { ASSERT(HDR_HAS_L1HDR(hdr)); if (GHOST_STATE(old_state)) { ASSERT0(bufcnt); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); /* * When moving a header off of a ghost state, * the header will not contain any arc buffers. * We use the arc header pointer for the reference * which is exactly what we did when we put the * header on the ghost state. */ (void) zfs_refcount_remove_many(&old_state->arcs_size, HDR_GET_LSIZE(hdr), hdr); } else { uint32_t buffers = 0; /* * Each individual buffer holds a unique reference, * thus we must remove each of these references one * at a time. */ for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { ASSERT3U(bufcnt, !=, 0); buffers++; /* * When the arc_buf_t is sharing the data * block with the hdr, the owner of the * reference belongs to the hdr. Only * add to the refcount if the arc_buf_t is * not shared. */ if (arc_buf_is_shared(buf)) continue; (void) zfs_refcount_remove_many( &old_state->arcs_size, arc_buf_size(buf), buf); } ASSERT3U(bufcnt, ==, buffers); ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); if (hdr->b_l1hdr.b_pabd != NULL) { (void) zfs_refcount_remove_many( &old_state->arcs_size, arc_hdr_size(hdr), hdr); } if (HDR_HAS_RABD(hdr)) { (void) zfs_refcount_remove_many( &old_state->arcs_size, HDR_GET_PSIZE(hdr), hdr); } } } if (HDR_HAS_L1HDR(hdr)) { hdr->b_l1hdr.b_state = new_state; if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) { l2arc_hdr_arcstats_decrement_state(hdr); hdr->b_l2hdr.b_arcs_state = new_state->arcs_state; l2arc_hdr_arcstats_increment_state(hdr); } } } void arc_space_consume(uint64_t space, arc_space_type_t type) { ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); switch (type) { default: break; case ARC_SPACE_DATA: ARCSTAT_INCR(arcstat_data_size, space); break; case ARC_SPACE_META: ARCSTAT_INCR(arcstat_metadata_size, space); break; case ARC_SPACE_BONUS: ARCSTAT_INCR(arcstat_bonus_size, space); break; case ARC_SPACE_DNODE: aggsum_add(&arc_sums.arcstat_dnode_size, space); break; case ARC_SPACE_DBUF: ARCSTAT_INCR(arcstat_dbuf_size, space); break; case ARC_SPACE_HDRS: ARCSTAT_INCR(arcstat_hdr_size, space); break; case ARC_SPACE_L2HDRS: aggsum_add(&arc_sums.arcstat_l2_hdr_size, space); break; case ARC_SPACE_ABD_CHUNK_WASTE: /* * Note: this includes space wasted by all scatter ABD's, not * just those allocated by the ARC. But the vast majority of * scatter ABD's come from the ARC, because other users are * very short-lived. */ ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space); break; } if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE) aggsum_add(&arc_sums.arcstat_meta_used, space); aggsum_add(&arc_sums.arcstat_size, space); } void arc_space_return(uint64_t space, arc_space_type_t type) { ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); switch (type) { default: break; case ARC_SPACE_DATA: ARCSTAT_INCR(arcstat_data_size, -space); break; case ARC_SPACE_META: ARCSTAT_INCR(arcstat_metadata_size, -space); break; case ARC_SPACE_BONUS: ARCSTAT_INCR(arcstat_bonus_size, -space); break; case ARC_SPACE_DNODE: aggsum_add(&arc_sums.arcstat_dnode_size, -space); break; case ARC_SPACE_DBUF: ARCSTAT_INCR(arcstat_dbuf_size, -space); break; case ARC_SPACE_HDRS: ARCSTAT_INCR(arcstat_hdr_size, -space); break; case ARC_SPACE_L2HDRS: aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space); break; case ARC_SPACE_ABD_CHUNK_WASTE: ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space); break; } if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE) { ASSERT(aggsum_compare(&arc_sums.arcstat_meta_used, space) >= 0); ARCSTAT_MAX(arcstat_meta_max, aggsum_upper_bound(&arc_sums.arcstat_meta_used)); aggsum_add(&arc_sums.arcstat_meta_used, -space); } ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0); aggsum_add(&arc_sums.arcstat_size, -space); } /* * Given a hdr and a buf, returns whether that buf can share its b_data buffer * with the hdr's b_pabd. */ static boolean_t arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf) { /* * The criteria for sharing a hdr's data are: * 1. the buffer is not encrypted * 2. the hdr's compression matches the buf's compression * 3. the hdr doesn't need to be byteswapped * 4. the hdr isn't already being shared * 5. the buf is either compressed or it is the last buf in the hdr list * * Criterion #5 maintains the invariant that shared uncompressed * bufs must be the final buf in the hdr's b_buf list. Reading this, you * might ask, "if a compressed buf is allocated first, won't that be the * last thing in the list?", but in that case it's impossible to create * a shared uncompressed buf anyway (because the hdr must be compressed * to have the compressed buf). You might also think that #3 is * sufficient to make this guarantee, however it's possible * (specifically in the rare L2ARC write race mentioned in * arc_buf_alloc_impl()) there will be an existing uncompressed buf that * is shareable, but wasn't at the time of its allocation. Rather than * allow a new shared uncompressed buf to be created and then shuffle * the list around to make it the last element, this simply disallows * sharing if the new buf isn't the first to be added. */ ASSERT3P(buf->b_hdr, ==, hdr); boolean_t hdr_compressed = arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF; boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0; return (!ARC_BUF_ENCRYPTED(buf) && buf_compressed == hdr_compressed && hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && !HDR_SHARED_DATA(hdr) && (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf))); } /* * Allocate a buf for this hdr. If you care about the data that's in the hdr, * or if you want a compressed buffer, pass those flags in. Returns 0 if the * copy was made successfully, or an error code otherwise. */ static int arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb, void *tag, boolean_t encrypted, boolean_t compressed, boolean_t noauth, boolean_t fill, arc_buf_t **ret) { arc_buf_t *buf; arc_fill_flags_t flags = ARC_FILL_LOCKED; ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); VERIFY(hdr->b_type == ARC_BUFC_DATA || hdr->b_type == ARC_BUFC_METADATA); ASSERT3P(ret, !=, NULL); ASSERT3P(*ret, ==, NULL); IMPLY(encrypted, compressed); buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); buf->b_hdr = hdr; buf->b_data = NULL; buf->b_next = hdr->b_l1hdr.b_buf; buf->b_flags = 0; add_reference(hdr, tag); /* * We're about to change the hdr's b_flags. We must either * hold the hash_lock or be undiscoverable. */ ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); /* * Only honor requests for compressed bufs if the hdr is actually * compressed. This must be overridden if the buffer is encrypted since * encrypted buffers cannot be decompressed. */ if (encrypted) { buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED; flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED; } else if (compressed && arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) { buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; flags |= ARC_FILL_COMPRESSED; } if (noauth) { ASSERT0(encrypted); flags |= ARC_FILL_NOAUTH; } /* * If the hdr's data can be shared then we share the data buffer and * set the appropriate bit in the hdr's b_flags to indicate the hdr is * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new * buffer to store the buf's data. * * There are two additional restrictions here because we're sharing * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be * actively involved in an L2ARC write, because if this buf is used by * an arc_write() then the hdr's data buffer will be released when the * write completes, even though the L2ARC write might still be using it. * Second, the hdr's ABD must be linear so that the buf's user doesn't * need to be ABD-aware. It must be allocated via * zio_[data_]buf_alloc(), not as a page, because we need to be able * to abd_release_ownership_of_buf(), which isn't allowed on "linear * page" buffers because the ABD code needs to handle freeing them * specially. */ boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) && hdr->b_l1hdr.b_pabd != NULL && abd_is_linear(hdr->b_l1hdr.b_pabd) && !abd_is_linear_page(hdr->b_l1hdr.b_pabd); /* Set up b_data and sharing */ if (can_share) { buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd); buf->b_flags |= ARC_BUF_FLAG_SHARED; arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); } else { buf->b_data = arc_get_data_buf(hdr, arc_buf_size(buf), buf); ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); } VERIFY3P(buf->b_data, !=, NULL); hdr->b_l1hdr.b_buf = buf; hdr->b_l1hdr.b_bufcnt += 1; if (encrypted) hdr->b_crypt_hdr.b_ebufcnt += 1; /* * If the user wants the data from the hdr, we need to either copy or * decompress the data. */ if (fill) { ASSERT3P(zb, !=, NULL); return (arc_buf_fill(buf, spa, zb, flags)); } return (0); } static char *arc_onloan_tag = "onloan"; static inline void arc_loaned_bytes_update(int64_t delta) { atomic_add_64(&arc_loaned_bytes, delta); /* assert that it did not wrap around */ ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); } /* * Loan out an anonymous arc buffer. Loaned buffers are not counted as in * flight data by arc_tempreserve_space() until they are "returned". Loaned * buffers must be returned to the arc before they can be used by the DMU or * freed. */ arc_buf_t * arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size) { arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag, is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size); arc_loaned_bytes_update(arc_buf_size(buf)); return (buf); } arc_buf_t * arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize, enum zio_compress compression_type, uint8_t complevel) { arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag, psize, lsize, compression_type, complevel); arc_loaned_bytes_update(arc_buf_size(buf)); return (buf); } arc_buf_t * arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder, const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize, enum zio_compress compression_type, uint8_t complevel) { arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj, byteorder, salt, iv, mac, ot, psize, lsize, compression_type, complevel); atomic_add_64(&arc_loaned_bytes, psize); return (buf); } /* * Return a loaned arc buffer to the arc. */ void arc_return_buf(arc_buf_t *buf, void *tag) { arc_buf_hdr_t *hdr = buf->b_hdr; ASSERT3P(buf->b_data, !=, NULL); ASSERT(HDR_HAS_L1HDR(hdr)); (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag); (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); arc_loaned_bytes_update(-arc_buf_size(buf)); } /* Detach an arc_buf from a dbuf (tag) */ void arc_loan_inuse_buf(arc_buf_t *buf, void *tag) { arc_buf_hdr_t *hdr = buf->b_hdr; ASSERT3P(buf->b_data, !=, NULL); ASSERT(HDR_HAS_L1HDR(hdr)); (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); arc_loaned_bytes_update(arc_buf_size(buf)); } static void l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type) { l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP); df->l2df_abd = abd; df->l2df_size = size; df->l2df_type = type; mutex_enter(&l2arc_free_on_write_mtx); list_insert_head(l2arc_free_on_write, df); mutex_exit(&l2arc_free_on_write_mtx); } static void arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata) { arc_state_t *state = hdr->b_l1hdr.b_state; arc_buf_contents_t type = arc_buf_type(hdr); uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr); /* protected by hash lock, if in the hash table */ if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); ASSERT(state != arc_anon && state != arc_l2c_only); (void) zfs_refcount_remove_many(&state->arcs_esize[type], size, hdr); } (void) zfs_refcount_remove_many(&state->arcs_size, size, hdr); if (type == ARC_BUFC_METADATA) { arc_space_return(size, ARC_SPACE_META); } else { ASSERT(type == ARC_BUFC_DATA); arc_space_return(size, ARC_SPACE_DATA); } if (free_rdata) { l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type); } else { l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type); } } /* * Share the arc_buf_t's data with the hdr. Whenever we are sharing the * data buffer, we transfer the refcount ownership to the hdr and update * the appropriate kstats. */ static void arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) { ASSERT(arc_can_share(hdr, buf)); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!ARC_BUF_ENCRYPTED(buf)); ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); /* * Start sharing the data buffer. We transfer the * refcount ownership to the hdr since it always owns * the refcount whenever an arc_buf_t is shared. */ zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size, arc_hdr_size(hdr), buf, hdr); hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf)); abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd, HDR_ISTYPE_METADATA(hdr)); arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); buf->b_flags |= ARC_BUF_FLAG_SHARED; /* * Since we've transferred ownership to the hdr we need * to increment its compressed and uncompressed kstats and * decrement the overhead size. */ ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf)); } static void arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) { ASSERT(arc_buf_is_shared(buf)); ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); /* * We are no longer sharing this buffer so we need * to transfer its ownership to the rightful owner. */ zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size, arc_hdr_size(hdr), hdr, buf); arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd); abd_free(hdr->b_l1hdr.b_pabd); hdr->b_l1hdr.b_pabd = NULL; buf->b_flags &= ~ARC_BUF_FLAG_SHARED; /* * Since the buffer is no longer shared between * the arc buf and the hdr, count it as overhead. */ ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); } /* * Remove an arc_buf_t from the hdr's buf list and return the last * arc_buf_t on the list. If no buffers remain on the list then return * NULL. */ static arc_buf_t * arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf) { ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); arc_buf_t **bufp = &hdr->b_l1hdr.b_buf; arc_buf_t *lastbuf = NULL; /* * Remove the buf from the hdr list and locate the last * remaining buffer on the list. */ while (*bufp != NULL) { if (*bufp == buf) *bufp = buf->b_next; /* * If we've removed a buffer in the middle of * the list then update the lastbuf and update * bufp. */ if (*bufp != NULL) { lastbuf = *bufp; bufp = &(*bufp)->b_next; } } buf->b_next = NULL; ASSERT3P(lastbuf, !=, buf); IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL); IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL); IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf)); return (lastbuf); } /* * Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's * list and free it. */ static void arc_buf_destroy_impl(arc_buf_t *buf) { arc_buf_hdr_t *hdr = buf->b_hdr; /* * Free up the data associated with the buf but only if we're not * sharing this with the hdr. If we are sharing it with the hdr, the * hdr is responsible for doing the free. */ if (buf->b_data != NULL) { /* * We're about to change the hdr's b_flags. We must either * hold the hash_lock or be undiscoverable. */ ASSERT(HDR_EMPTY_OR_LOCKED(hdr)); arc_cksum_verify(buf); arc_buf_unwatch(buf); if (arc_buf_is_shared(buf)) { arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); } else { uint64_t size = arc_buf_size(buf); arc_free_data_buf(hdr, buf->b_data, size, buf); ARCSTAT_INCR(arcstat_overhead_size, -size); } buf->b_data = NULL; ASSERT(hdr->b_l1hdr.b_bufcnt > 0); hdr->b_l1hdr.b_bufcnt -= 1; if (ARC_BUF_ENCRYPTED(buf)) { hdr->b_crypt_hdr.b_ebufcnt -= 1; /* * If we have no more encrypted buffers and we've * already gotten a copy of the decrypted data we can * free b_rabd to save some space. */ if (hdr->b_crypt_hdr.b_ebufcnt == 0 && HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL && !HDR_IO_IN_PROGRESS(hdr)) { arc_hdr_free_abd(hdr, B_TRUE); } } } arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) { /* * If the current arc_buf_t is sharing its data buffer with the * hdr, then reassign the hdr's b_pabd to share it with the new * buffer at the end of the list. The shared buffer is always * the last one on the hdr's buffer list. * * There is an equivalent case for compressed bufs, but since * they aren't guaranteed to be the last buf in the list and * that is an exceedingly rare case, we just allow that space be * wasted temporarily. We must also be careful not to share * encrypted buffers, since they cannot be shared. */ if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) { /* Only one buf can be shared at once */ VERIFY(!arc_buf_is_shared(lastbuf)); /* hdr is uncompressed so can't have compressed buf */ VERIFY(!ARC_BUF_COMPRESSED(lastbuf)); ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); arc_hdr_free_abd(hdr, B_FALSE); /* * We must setup a new shared block between the * last buffer and the hdr. The data would have * been allocated by the arc buf so we need to transfer * ownership to the hdr since it's now being shared. */ arc_share_buf(hdr, lastbuf); } } else if (HDR_SHARED_DATA(hdr)) { /* * Uncompressed shared buffers are always at the end * of the list. Compressed buffers don't have the * same requirements. This makes it hard to * simply assert that the lastbuf is shared so * we rely on the hdr's compression flags to determine * if we have a compressed, shared buffer. */ ASSERT3P(lastbuf, !=, NULL); ASSERT(arc_buf_is_shared(lastbuf) || arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); } /* * Free the checksum if we're removing the last uncompressed buf from * this hdr. */ if (!arc_hdr_has_uncompressed_buf(hdr)) { arc_cksum_free(hdr); } /* clean up the buf */ buf->b_hdr = NULL; kmem_cache_free(buf_cache, buf); } static void arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags) { uint64_t size; boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0); ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata); IMPLY(alloc_rdata, HDR_PROTECTED(hdr)); if (alloc_rdata) { size = HDR_GET_PSIZE(hdr); ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL); hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr, alloc_flags); ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL); ARCSTAT_INCR(arcstat_raw_size, size); } else { size = arc_hdr_size(hdr); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr, alloc_flags); ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); } ARCSTAT_INCR(arcstat_compressed_size, size); ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); } static void arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata) { uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr); ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); IMPLY(free_rdata, HDR_HAS_RABD(hdr)); /* * If the hdr is currently being written to the l2arc then * we defer freeing the data by adding it to the l2arc_free_on_write * list. The l2arc will free the data once it's finished * writing it to the l2arc device. */ if (HDR_L2_WRITING(hdr)) { arc_hdr_free_on_write(hdr, free_rdata); ARCSTAT_BUMP(arcstat_l2_free_on_write); } else if (free_rdata) { arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr); } else { arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr); } if (free_rdata) { hdr->b_crypt_hdr.b_rabd = NULL; ARCSTAT_INCR(arcstat_raw_size, -size); } else { hdr->b_l1hdr.b_pabd = NULL; } if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr)) hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; ARCSTAT_INCR(arcstat_compressed_size, -size); ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); } /* * Allocate empty anonymous ARC header. The header will get its identity * assigned and buffers attached later as part of read or write operations. * * In case of read arc_read() assigns header its identify (b_dva + b_birth), * inserts it into ARC hash to become globally visible and allocates physical * (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read * completion arc_read_done() allocates ARC buffer(s) as needed, potentially * sharing one of them with the physical ABD buffer. * * In case of write arc_alloc_buf() allocates ARC buffer to be filled with * data. Then after compression and/or encryption arc_write_ready() allocates * and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD * buffer. On disk write completion arc_write_done() assigns the header its * new identity (b_dva + b_birth) and inserts into ARC hash. * * In case of partial overwrite the old data is read first as described. Then * arc_release() either allocates new anonymous ARC header and moves the ARC * buffer to it, or reuses the old ARC header by discarding its identity and * removing it from ARC hash. After buffer modification normal write process * follows as described. */ static arc_buf_hdr_t * arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize, boolean_t protected, enum zio_compress compression_type, uint8_t complevel, arc_buf_contents_t type) { arc_buf_hdr_t *hdr; VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA); if (protected) { hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE); } else { hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); } ASSERT(HDR_EMPTY(hdr)); ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); HDR_SET_PSIZE(hdr, psize); HDR_SET_LSIZE(hdr, lsize); hdr->b_spa = spa; hdr->b_type = type; hdr->b_flags = 0; arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR); arc_hdr_set_compress(hdr, compression_type); hdr->b_complevel = complevel; if (protected) arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED); hdr->b_l1hdr.b_state = arc_anon; hdr->b_l1hdr.b_arc_access = 0; hdr->b_l1hdr.b_mru_hits = 0; hdr->b_l1hdr.b_mru_ghost_hits = 0; hdr->b_l1hdr.b_mfu_hits = 0; hdr->b_l1hdr.b_mfu_ghost_hits = 0; hdr->b_l1hdr.b_bufcnt = 0; hdr->b_l1hdr.b_buf = NULL; ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); return (hdr); } /* * Transition between the two allocation states for the arc_buf_hdr struct. * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller * version is used when a cache buffer is only in the L2ARC in order to reduce * memory usage. */ static arc_buf_hdr_t * arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) { ASSERT(HDR_HAS_L2HDR(hdr)); arc_buf_hdr_t *nhdr; l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || (old == hdr_l2only_cache && new == hdr_full_cache)); /* * if the caller wanted a new full header and the header is to be * encrypted we will actually allocate the header from the full crypt * cache instead. The same applies to freeing from the old cache. */ if (HDR_PROTECTED(hdr) && new == hdr_full_cache) new = hdr_full_crypt_cache; if (HDR_PROTECTED(hdr) && old == hdr_full_cache) old = hdr_full_crypt_cache; nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); buf_hash_remove(hdr); bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); if (new == hdr_full_cache || new == hdr_full_crypt_cache) { arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR); /* * arc_access and arc_change_state need to be aware that a * header has just come out of L2ARC, so we set its state to * l2c_only even though it's about to change. */ nhdr->b_l1hdr.b_state = arc_l2c_only; /* Verify previous threads set to NULL before freeing */ ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); } else { ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); ASSERT0(hdr->b_l1hdr.b_bufcnt); ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); /* * If we've reached here, We must have been called from * arc_evict_hdr(), as such we should have already been * removed from any ghost list we were previously on * (which protects us from racing with arc_evict_state), * thus no locking is needed during this check. */ ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); /* * A buffer must not be moved into the arc_l2c_only * state if it's not finished being written out to the * l2arc device. Otherwise, the b_l1hdr.b_pabd field * might try to be accessed, even though it was removed. */ VERIFY(!HDR_L2_WRITING(hdr)); VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR); } /* * The header has been reallocated so we need to re-insert it into any * lists it was on. */ (void) buf_hash_insert(nhdr, NULL); ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); mutex_enter(&dev->l2ad_mtx); /* * We must place the realloc'ed header back into the list at * the same spot. Otherwise, if it's placed earlier in the list, * l2arc_write_buffers() could find it during the function's * write phase, and try to write it out to the l2arc. */ list_insert_after(&dev->l2ad_buflist, hdr, nhdr); list_remove(&dev->l2ad_buflist, hdr); mutex_exit(&dev->l2ad_mtx); /* * Since we're using the pointer address as the tag when * incrementing and decrementing the l2ad_alloc refcount, we * must remove the old pointer (that we're about to destroy) and * add the new pointer to the refcount. Otherwise we'd remove * the wrong pointer address when calling arc_hdr_destroy() later. */ (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr); buf_discard_identity(hdr); kmem_cache_free(old, hdr); return (nhdr); } /* * This function allows an L1 header to be reallocated as a crypt * header and vice versa. If we are going to a crypt header, the * new fields will be zeroed out. */ static arc_buf_hdr_t * arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt) { arc_buf_hdr_t *nhdr; arc_buf_t *buf; kmem_cache_t *ncache, *ocache; unsigned nsize, osize; /* * This function requires that hdr is in the arc_anon state. * Therefore it won't have any L2ARC data for us to worry * about copying. */ ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(!HDR_HAS_L2HDR(hdr)); ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt); ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); ASSERT(!list_link_active(&hdr->b_l2hdr.b_l2node)); ASSERT3P(hdr->b_hash_next, ==, NULL); if (need_crypt) { ncache = hdr_full_crypt_cache; nsize = sizeof (hdr->b_crypt_hdr); ocache = hdr_full_cache; osize = HDR_FULL_SIZE; } else { ncache = hdr_full_cache; nsize = HDR_FULL_SIZE; ocache = hdr_full_crypt_cache; osize = sizeof (hdr->b_crypt_hdr); } nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE); /* * Copy all members that aren't locks or condvars to the new header. * No lists are pointing to us (as we asserted above), so we don't * need to worry about the list nodes. */ nhdr->b_dva = hdr->b_dva; nhdr->b_birth = hdr->b_birth; nhdr->b_type = hdr->b_type; nhdr->b_flags = hdr->b_flags; nhdr->b_psize = hdr->b_psize; nhdr->b_lsize = hdr->b_lsize; nhdr->b_spa = hdr->b_spa; nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum; nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt; nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap; nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state; nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access; nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits; nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits; nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits; nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits; nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb; nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd; /* * This zfs_refcount_add() exists only to ensure that the individual * arc buffers always point to a header that is referenced, avoiding * a small race condition that could trigger ASSERTs. */ (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG); nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf; for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { mutex_enter(&buf->b_evict_lock); buf->b_hdr = nhdr; mutex_exit(&buf->b_evict_lock); } zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt); (void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG); ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt)); if (need_crypt) { arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED); } else { arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED); } /* unset all members of the original hdr */ bzero(&hdr->b_dva, sizeof (dva_t)); hdr->b_birth = 0; hdr->b_type = ARC_BUFC_INVALID; hdr->b_flags = 0; hdr->b_psize = 0; hdr->b_lsize = 0; hdr->b_spa = 0; hdr->b_l1hdr.b_freeze_cksum = NULL; hdr->b_l1hdr.b_buf = NULL; hdr->b_l1hdr.b_bufcnt = 0; hdr->b_l1hdr.b_byteswap = 0; hdr->b_l1hdr.b_state = NULL; hdr->b_l1hdr.b_arc_access = 0; hdr->b_l1hdr.b_mru_hits = 0; hdr->b_l1hdr.b_mru_ghost_hits = 0; hdr->b_l1hdr.b_mfu_hits = 0; hdr->b_l1hdr.b_mfu_ghost_hits = 0; hdr->b_l1hdr.b_acb = NULL; hdr->b_l1hdr.b_pabd = NULL; if (ocache == hdr_full_crypt_cache) { ASSERT(!HDR_HAS_RABD(hdr)); hdr->b_crypt_hdr.b_ot = DMU_OT_NONE; hdr->b_crypt_hdr.b_ebufcnt = 0; hdr->b_crypt_hdr.b_dsobj = 0; bzero(hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); bzero(hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); bzero(hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); } buf_discard_identity(hdr); kmem_cache_free(ocache, hdr); return (nhdr); } /* * This function is used by the send / receive code to convert a newly * allocated arc_buf_t to one that is suitable for a raw encrypted write. It * is also used to allow the root objset block to be updated without altering * its embedded MACs. Both block types will always be uncompressed so we do not * have to worry about compression type or psize. */ void arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder, dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv, const uint8_t *mac) { arc_buf_hdr_t *hdr = buf->b_hdr; ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET); ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED); if (!HDR_PROTECTED(hdr)) hdr = arc_hdr_realloc_crypt(hdr, B_TRUE); hdr->b_crypt_hdr.b_dsobj = dsobj; hdr->b_crypt_hdr.b_ot = ot; hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot); if (!arc_hdr_has_uncompressed_buf(hdr)) arc_cksum_free(hdr); if (salt != NULL) bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); if (iv != NULL) bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); if (mac != NULL) bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); } /* * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller. * The buf is returned thawed since we expect the consumer to modify it. */ arc_buf_t * arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size) { arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size, B_FALSE, ZIO_COMPRESS_OFF, 0, type); arc_buf_t *buf = NULL; VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE, B_FALSE, B_FALSE, &buf)); arc_buf_thaw(buf); return (buf); } /* * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this * for bufs containing metadata. */ arc_buf_t * arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize, enum zio_compress compression_type, uint8_t complevel) { ASSERT3U(lsize, >, 0); ASSERT3U(lsize, >=, psize); ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF); ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS); arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_FALSE, compression_type, complevel, ARC_BUFC_DATA); arc_buf_t *buf = NULL; VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_TRUE, B_FALSE, B_FALSE, &buf)); arc_buf_thaw(buf); ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); /* * To ensure that the hdr has the correct data in it if we call * arc_untransform() on this buf before it's been written to disk, * it's easiest if we just set up sharing between the buf and the hdr. */ arc_share_buf(hdr, buf); return (buf); } arc_buf_t * arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder, const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize, enum zio_compress compression_type, uint8_t complevel) { arc_buf_hdr_t *hdr; arc_buf_t *buf; arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ? ARC_BUFC_METADATA : ARC_BUFC_DATA; ASSERT3U(lsize, >, 0); ASSERT3U(lsize, >=, psize); ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF); ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS); hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE, compression_type, complevel, type); hdr->b_crypt_hdr.b_dsobj = dsobj; hdr->b_crypt_hdr.b_ot = ot; hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot); bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); /* * This buffer will be considered encrypted even if the ot is not an * encrypted type. It will become authenticated instead in * arc_write_ready(). */ buf = NULL; VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE, B_FALSE, B_FALSE, &buf)); arc_buf_thaw(buf); ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); return (buf); } static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr, boolean_t state_only) { l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; l2arc_dev_t *dev = l2hdr->b_dev; uint64_t lsize = HDR_GET_LSIZE(hdr); uint64_t psize = HDR_GET_PSIZE(hdr); uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); arc_buf_contents_t type = hdr->b_type; int64_t lsize_s; int64_t psize_s; int64_t asize_s; if (incr) { lsize_s = lsize; psize_s = psize; asize_s = asize; } else { lsize_s = -lsize; psize_s = -psize; asize_s = -asize; } /* If the buffer is a prefetch, count it as such. */ if (HDR_PREFETCH(hdr)) { ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s); } else { /* * We use the value stored in the L2 header upon initial * caching in L2ARC. This value will be updated in case * an MRU/MRU_ghost buffer transitions to MFU but the L2ARC * metadata (log entry) cannot currently be updated. Having * the ARC state in the L2 header solves the problem of a * possibly absent L1 header (apparent in buffers restored * from persistent L2ARC). */ switch (hdr->b_l2hdr.b_arcs_state) { case ARC_STATE_MRU_GHOST: case ARC_STATE_MRU: ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s); break; case ARC_STATE_MFU_GHOST: case ARC_STATE_MFU: ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s); break; default: break; } } if (state_only) return; ARCSTAT_INCR(arcstat_l2_psize, psize_s); ARCSTAT_INCR(arcstat_l2_lsize, lsize_s); switch (type) { case ARC_BUFC_DATA: ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s); break; case ARC_BUFC_METADATA: ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s); break; default: break; } } static void arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) { l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; l2arc_dev_t *dev = l2hdr->b_dev; uint64_t psize = HDR_GET_PSIZE(hdr); uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); ASSERT(HDR_HAS_L2HDR(hdr)); list_remove(&dev->l2ad_buflist, hdr); l2arc_hdr_arcstats_decrement(hdr); vdev_space_update(dev->l2ad_vdev, -asize, 0, 0); (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); } static void arc_hdr_destroy(arc_buf_hdr_t *hdr) { if (HDR_HAS_L1HDR(hdr)) { ASSERT(hdr->b_l1hdr.b_buf == NULL || hdr->b_l1hdr.b_bufcnt > 0); ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); } ASSERT(!HDR_IO_IN_PROGRESS(hdr)); ASSERT(!HDR_IN_HASH_TABLE(hdr)); if (HDR_HAS_L2HDR(hdr)) { l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); if (!buflist_held) mutex_enter(&dev->l2ad_mtx); /* * Even though we checked this conditional above, we * need to check this again now that we have the * l2ad_mtx. This is because we could be racing with * another thread calling l2arc_evict() which might have * destroyed this header's L2 portion as we were waiting * to acquire the l2ad_mtx. If that happens, we don't * want to re-destroy the header's L2 portion. */ if (HDR_HAS_L2HDR(hdr)) arc_hdr_l2hdr_destroy(hdr); if (!buflist_held) mutex_exit(&dev->l2ad_mtx); } /* * The header's identify can only be safely discarded once it is no * longer discoverable. This requires removing it from the hash table * and the l2arc header list. After this point the hash lock can not * be used to protect the header. */ if (!HDR_EMPTY(hdr)) buf_discard_identity(hdr); if (HDR_HAS_L1HDR(hdr)) { arc_cksum_free(hdr); while (hdr->b_l1hdr.b_buf != NULL) arc_buf_destroy_impl(hdr->b_l1hdr.b_buf); if (hdr->b_l1hdr.b_pabd != NULL) arc_hdr_free_abd(hdr, B_FALSE); if (HDR_HAS_RABD(hdr)) arc_hdr_free_abd(hdr, B_TRUE); } ASSERT3P(hdr->b_hash_next, ==, NULL); if (HDR_HAS_L1HDR(hdr)) { ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); if (!HDR_PROTECTED(hdr)) { kmem_cache_free(hdr_full_cache, hdr); } else { kmem_cache_free(hdr_full_crypt_cache, hdr); } } else { kmem_cache_free(hdr_l2only_cache, hdr); } } void arc_buf_destroy(arc_buf_t *buf, void* tag) { arc_buf_hdr_t *hdr = buf->b_hdr; if (hdr->b_l1hdr.b_state == arc_anon) { ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); VERIFY0(remove_reference(hdr, NULL, tag)); arc_hdr_destroy(hdr); return; } kmutex_t *hash_lock = HDR_LOCK(hdr); mutex_enter(hash_lock); ASSERT3P(hdr, ==, buf->b_hdr); ASSERT(hdr->b_l1hdr.b_bufcnt > 0); ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); ASSERT3P(buf->b_data, !=, NULL); (void) remove_reference(hdr, hash_lock, tag); arc_buf_destroy_impl(buf); mutex_exit(hash_lock); } /* * Evict the arc_buf_hdr that is provided as a parameter. The resultant * state of the header is dependent on its state prior to entering this * function. The following transitions are possible: * * - arc_mru -> arc_mru_ghost * - arc_mfu -> arc_mfu_ghost * - arc_mru_ghost -> arc_l2c_only * - arc_mru_ghost -> deleted * - arc_mfu_ghost -> arc_l2c_only * - arc_mfu_ghost -> deleted * * Return total size of evicted data buffers for eviction progress tracking. * When evicting from ghost states return logical buffer size to make eviction * progress at the same (or at least comparable) rate as from non-ghost states. * * Return *real_evicted for actual ARC size reduction to wake up threads * waiting for it. For non-ghost states it includes size of evicted data * buffers (the headers are not freed there). For ghost states it includes * only the evicted headers size. */ static int64_t arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, uint64_t *real_evicted) { arc_state_t *evicted_state, *state; int64_t bytes_evicted = 0; int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ? arc_min_prescient_prefetch_ms : arc_min_prefetch_ms; ASSERT(MUTEX_HELD(hash_lock)); ASSERT(HDR_HAS_L1HDR(hdr)); *real_evicted = 0; state = hdr->b_l1hdr.b_state; if (GHOST_STATE(state)) { ASSERT(!HDR_IO_IN_PROGRESS(hdr)); ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); /* * l2arc_write_buffers() relies on a header's L1 portion * (i.e. its b_pabd field) during it's write phase. * Thus, we cannot push a header onto the arc_l2c_only * state (removing its L1 piece) until the header is * done being written to the l2arc. */ if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { ARCSTAT_BUMP(arcstat_evict_l2_skip); return (bytes_evicted); } ARCSTAT_BUMP(arcstat_deleted); bytes_evicted += HDR_GET_LSIZE(hdr); DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); if (HDR_HAS_L2HDR(hdr)) { ASSERT(hdr->b_l1hdr.b_pabd == NULL); ASSERT(!HDR_HAS_RABD(hdr)); /* * This buffer is cached on the 2nd Level ARC; * don't destroy the header. */ arc_change_state(arc_l2c_only, hdr, hash_lock); /* * dropping from L1+L2 cached to L2-only, * realloc to remove the L1 header. */ hdr = arc_hdr_realloc(hdr, hdr_full_cache, hdr_l2only_cache); *real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE; } else { arc_change_state(arc_anon, hdr, hash_lock); arc_hdr_destroy(hdr); *real_evicted += HDR_FULL_SIZE; } return (bytes_evicted); } ASSERT(state == arc_mru || state == arc_mfu); evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; /* prefetch buffers have a minimum lifespan */ if (HDR_IO_IN_PROGRESS(hdr) || ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < MSEC_TO_TICK(min_lifetime))) { ARCSTAT_BUMP(arcstat_evict_skip); return (bytes_evicted); } ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt)); while (hdr->b_l1hdr.b_buf) { arc_buf_t *buf = hdr->b_l1hdr.b_buf; if (!mutex_tryenter(&buf->b_evict_lock)) { ARCSTAT_BUMP(arcstat_mutex_miss); break; } if (buf->b_data != NULL) { bytes_evicted += HDR_GET_LSIZE(hdr); *real_evicted += HDR_GET_LSIZE(hdr); } mutex_exit(&buf->b_evict_lock); arc_buf_destroy_impl(buf); } if (HDR_HAS_L2HDR(hdr)) { ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr)); } else { if (l2arc_write_eligible(hdr->b_spa, hdr)) { ARCSTAT_INCR(arcstat_evict_l2_eligible, HDR_GET_LSIZE(hdr)); switch (state->arcs_state) { case ARC_STATE_MRU: ARCSTAT_INCR( arcstat_evict_l2_eligible_mru, HDR_GET_LSIZE(hdr)); break; case ARC_STATE_MFU: ARCSTAT_INCR( arcstat_evict_l2_eligible_mfu, HDR_GET_LSIZE(hdr)); break; default: break; } } else { ARCSTAT_INCR(arcstat_evict_l2_ineligible, HDR_GET_LSIZE(hdr)); } } if (hdr->b_l1hdr.b_bufcnt == 0) { arc_cksum_free(hdr); bytes_evicted += arc_hdr_size(hdr); *real_evicted += arc_hdr_size(hdr); /* * If this hdr is being evicted and has a compressed * buffer then we discard it here before we change states. * This ensures that the accounting is updated correctly * in arc_free_data_impl(). */ if (hdr->b_l1hdr.b_pabd != NULL) arc_hdr_free_abd(hdr, B_FALSE); if (HDR_HAS_RABD(hdr)) arc_hdr_free_abd(hdr, B_TRUE); arc_change_state(evicted_state, hdr, hash_lock); ASSERT(HDR_IN_HASH_TABLE(hdr)); arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); } return (bytes_evicted); } static void arc_set_need_free(void) { ASSERT(MUTEX_HELD(&arc_evict_lock)); int64_t remaining = arc_free_memory() - arc_sys_free / 2; arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters); if (aw == NULL) { arc_need_free = MAX(-remaining, 0); } else { arc_need_free = MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count)); } } static uint64_t arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, uint64_t spa, uint64_t bytes) { multilist_sublist_t *mls; uint64_t bytes_evicted = 0, real_evicted = 0; arc_buf_hdr_t *hdr; kmutex_t *hash_lock; int evict_count = zfs_arc_evict_batch_limit; ASSERT3P(marker, !=, NULL); mls = multilist_sublist_lock(ml, idx); for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL); hdr = multilist_sublist_prev(mls, marker)) { if ((evict_count <= 0) || (bytes_evicted >= bytes)) break; /* * To keep our iteration location, move the marker * forward. Since we're not holding hdr's hash lock, we * must be very careful and not remove 'hdr' from the * sublist. Otherwise, other consumers might mistake the * 'hdr' as not being on a sublist when they call the * multilist_link_active() function (they all rely on * the hash lock protecting concurrent insertions and * removals). multilist_sublist_move_forward() was * specifically implemented to ensure this is the case * (only 'marker' will be removed and re-inserted). */ multilist_sublist_move_forward(mls, marker); /* * The only case where the b_spa field should ever be * zero, is the marker headers inserted by * arc_evict_state(). It's possible for multiple threads * to be calling arc_evict_state() concurrently (e.g. * dsl_pool_close() and zio_inject_fault()), so we must * skip any markers we see from these other threads. */ if (hdr->b_spa == 0) continue; /* we're only interested in evicting buffers of a certain spa */ if (spa != 0 && hdr->b_spa != spa) { ARCSTAT_BUMP(arcstat_evict_skip); continue; } hash_lock = HDR_LOCK(hdr); /* * We aren't calling this function from any code path * that would already be holding a hash lock, so we're * asserting on this assumption to be defensive in case * this ever changes. Without this check, it would be * possible to incorrectly increment arcstat_mutex_miss * below (e.g. if the code changed such that we called * this function with a hash lock held). */ ASSERT(!MUTEX_HELD(hash_lock)); if (mutex_tryenter(hash_lock)) { uint64_t revicted; uint64_t evicted = arc_evict_hdr(hdr, hash_lock, &revicted); mutex_exit(hash_lock); bytes_evicted += evicted; real_evicted += revicted; /* * If evicted is zero, arc_evict_hdr() must have * decided to skip this header, don't increment * evict_count in this case. */ if (evicted != 0) evict_count--; } else { ARCSTAT_BUMP(arcstat_mutex_miss); } } multilist_sublist_unlock(mls); /* * Increment the count of evicted bytes, and wake up any threads that * are waiting for the count to reach this value. Since the list is * ordered by ascending aew_count, we pop off the beginning of the * list until we reach the end, or a waiter that's past the current * "count". Doing this outside the loop reduces the number of times * we need to acquire the global arc_evict_lock. * * Only wake when there's sufficient free memory in the system * (specifically, arc_sys_free/2, which by default is a bit more than * 1/64th of RAM). See the comments in arc_wait_for_eviction(). */ mutex_enter(&arc_evict_lock); arc_evict_count += real_evicted; if (arc_free_memory() > arc_sys_free / 2) { arc_evict_waiter_t *aw; while ((aw = list_head(&arc_evict_waiters)) != NULL && aw->aew_count <= arc_evict_count) { list_remove(&arc_evict_waiters, aw); cv_broadcast(&aw->aew_cv); } } arc_set_need_free(); mutex_exit(&arc_evict_lock); /* * If the ARC size is reduced from arc_c_max to arc_c_min (especially * if the average cached block is small), eviction can be on-CPU for * many seconds. To ensure that other threads that may be bound to * this CPU are able to make progress, make a voluntary preemption * call here. */ cond_resched(); return (bytes_evicted); } /* * Evict buffers from the given arc state, until we've removed the * specified number of bytes. Move the removed buffers to the * appropriate evict state. * * This function makes a "best effort". It skips over any buffers * it can't get a hash_lock on, and so, may not catch all candidates. * It may also return without evicting as much space as requested. * * If bytes is specified using the special value ARC_EVICT_ALL, this * will evict all available (i.e. unlocked and evictable) buffers from * the given arc state; which is used by arc_flush(). */ static uint64_t arc_evict_state(arc_state_t *state, uint64_t spa, uint64_t bytes, arc_buf_contents_t type) { uint64_t total_evicted = 0; multilist_t *ml = &state->arcs_list[type]; int num_sublists; arc_buf_hdr_t **markers; num_sublists = multilist_get_num_sublists(ml); /* * If we've tried to evict from each sublist, made some * progress, but still have not hit the target number of bytes * to evict, we want to keep trying. The markers allow us to * pick up where we left off for each individual sublist, rather * than starting from the tail each time. */ markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); for (int i = 0; i < num_sublists; i++) { multilist_sublist_t *mls; markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); /* * A b_spa of 0 is used to indicate that this header is * a marker. This fact is used in arc_evict_type() and * arc_evict_state_impl(). */ markers[i]->b_spa = 0; mls = multilist_sublist_lock(ml, i); multilist_sublist_insert_tail(mls, markers[i]); multilist_sublist_unlock(mls); } /* * While we haven't hit our target number of bytes to evict, or * we're evicting all available buffers. */ while (total_evicted < bytes) { int sublist_idx = multilist_get_random_index(ml); uint64_t scan_evicted = 0; /* * Try to reduce pinned dnodes with a floor of arc_dnode_limit. * Request that 10% of the LRUs be scanned by the superblock * shrinker. */ if (type == ARC_BUFC_DATA && aggsum_compare( &arc_sums.arcstat_dnode_size, arc_dnode_size_limit) > 0) { arc_prune_async((aggsum_upper_bound( &arc_sums.arcstat_dnode_size) - arc_dnode_size_limit) / sizeof (dnode_t) / zfs_arc_dnode_reduce_percent); } /* * Start eviction using a randomly selected sublist, * this is to try and evenly balance eviction across all * sublists. Always starting at the same sublist * (e.g. index 0) would cause evictions to favor certain * sublists over others. */ for (int i = 0; i < num_sublists; i++) { uint64_t bytes_remaining; uint64_t bytes_evicted; if (total_evicted < bytes) bytes_remaining = bytes - total_evicted; else break; bytes_evicted = arc_evict_state_impl(ml, sublist_idx, markers[sublist_idx], spa, bytes_remaining); scan_evicted += bytes_evicted; total_evicted += bytes_evicted; /* we've reached the end, wrap to the beginning */ if (++sublist_idx >= num_sublists) sublist_idx = 0; } /* * If we didn't evict anything during this scan, we have * no reason to believe we'll evict more during another * scan, so break the loop. */ if (scan_evicted == 0) { /* This isn't possible, let's make that obvious */ ASSERT3S(bytes, !=, 0); /* * When bytes is ARC_EVICT_ALL, the only way to * break the loop is when scan_evicted is zero. * In that case, we actually have evicted enough, * so we don't want to increment the kstat. */ if (bytes != ARC_EVICT_ALL) { ASSERT3S(total_evicted, <, bytes); ARCSTAT_BUMP(arcstat_evict_not_enough); } break; } } for (int i = 0; i < num_sublists; i++) { multilist_sublist_t *mls = multilist_sublist_lock(ml, i); multilist_sublist_remove(mls, markers[i]); multilist_sublist_unlock(mls); kmem_cache_free(hdr_full_cache, markers[i]); } kmem_free(markers, sizeof (*markers) * num_sublists); return (total_evicted); } /* * Flush all "evictable" data of the given type from the arc state * specified. This will not evict any "active" buffers (i.e. referenced). * * When 'retry' is set to B_FALSE, the function will make a single pass * over the state and evict any buffers that it can. Since it doesn't * continually retry the eviction, it might end up leaving some buffers * in the ARC due to lock misses. * * When 'retry' is set to B_TRUE, the function will continually retry the * eviction until *all* evictable buffers have been removed from the * state. As a result, if concurrent insertions into the state are * allowed (e.g. if the ARC isn't shutting down), this function might * wind up in an infinite loop, continually trying to evict buffers. */ static uint64_t arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, boolean_t retry) { uint64_t evicted = 0; while (zfs_refcount_count(&state->arcs_esize[type]) != 0) { evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); if (!retry) break; } return (evicted); } /* * Evict the specified number of bytes from the state specified, * restricting eviction to the spa and type given. This function * prevents us from trying to evict more from a state's list than * is "evictable", and to skip evicting altogether when passed a * negative value for "bytes". In contrast, arc_evict_state() will * evict everything it can, when passed a negative value for "bytes". */ static uint64_t arc_evict_impl(arc_state_t *state, uint64_t spa, int64_t bytes, arc_buf_contents_t type) { uint64_t delta; if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) { delta = MIN(zfs_refcount_count(&state->arcs_esize[type]), bytes); return (arc_evict_state(state, spa, delta, type)); } return (0); } /* * The goal of this function is to evict enough meta data buffers from the * ARC in order to enforce the arc_meta_limit. Achieving this is slightly * more complicated than it appears because it is common for data buffers * to have holds on meta data buffers. In addition, dnode meta data buffers * will be held by the dnodes in the block preventing them from being freed. * This means we can't simply traverse the ARC and expect to always find * enough unheld meta data buffer to release. * * Therefore, this function has been updated to make alternating passes * over the ARC releasing data buffers and then newly unheld meta data * buffers. This ensures forward progress is maintained and meta_used * will decrease. Normally this is sufficient, but if required the ARC * will call the registered prune callbacks causing dentry and inodes to * be dropped from the VFS cache. This will make dnode meta data buffers * available for reclaim. */ static uint64_t arc_evict_meta_balanced(uint64_t meta_used) { int64_t delta, prune = 0, adjustmnt; uint64_t total_evicted = 0; arc_buf_contents_t type = ARC_BUFC_DATA; int restarts = MAX(zfs_arc_meta_adjust_restarts, 0); restart: /* * This slightly differs than the way we evict from the mru in * arc_evict because we don't have a "target" value (i.e. no * "meta" arc_p). As a result, I think we can completely * cannibalize the metadata in the MRU before we evict the * metadata from the MFU. I think we probably need to implement a * "metadata arc_p" value to do this properly. */ adjustmnt = meta_used - arc_meta_limit; if (adjustmnt > 0 && zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) { delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]), adjustmnt); total_evicted += arc_evict_impl(arc_mru, 0, delta, type); adjustmnt -= delta; } /* * We can't afford to recalculate adjustmnt here. If we do, * new metadata buffers can sneak into the MRU or ANON lists, * thus penalize the MFU metadata. Although the fudge factor is * small, it has been empirically shown to be significant for * certain workloads (e.g. creating many empty directories). As * such, we use the original calculation for adjustmnt, and * simply decrement the amount of data evicted from the MRU. */ if (adjustmnt > 0 && zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) { delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]), adjustmnt); total_evicted += arc_evict_impl(arc_mfu, 0, delta, type); } adjustmnt = meta_used - arc_meta_limit; if (adjustmnt > 0 && zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) { delta = MIN(adjustmnt, zfs_refcount_count(&arc_mru_ghost->arcs_esize[type])); total_evicted += arc_evict_impl(arc_mru_ghost, 0, delta, type); adjustmnt -= delta; } if (adjustmnt > 0 && zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) { delta = MIN(adjustmnt, zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type])); total_evicted += arc_evict_impl(arc_mfu_ghost, 0, delta, type); } /* * If after attempting to make the requested adjustment to the ARC * the meta limit is still being exceeded then request that the * higher layers drop some cached objects which have holds on ARC * meta buffers. Requests to the upper layers will be made with * increasingly large scan sizes until the ARC is below the limit. */ if (meta_used > arc_meta_limit) { if (type == ARC_BUFC_DATA) { type = ARC_BUFC_METADATA; } else { type = ARC_BUFC_DATA; if (zfs_arc_meta_prune) { prune += zfs_arc_meta_prune; arc_prune_async(prune); } } if (restarts > 0) { restarts--; goto restart; } } return (total_evicted); } /* * Evict metadata buffers from the cache, such that arcstat_meta_used is * capped by the arc_meta_limit tunable. */ static uint64_t arc_evict_meta_only(uint64_t meta_used) { uint64_t total_evicted = 0; int64_t target; /* * If we're over the meta limit, we want to evict enough * metadata to get back under the meta limit. We don't want to * evict so much that we drop the MRU below arc_p, though. If * we're over the meta limit more than we're over arc_p, we * evict some from the MRU here, and some from the MFU below. */ target = MIN((int64_t)(meta_used - arc_meta_limit), (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) + zfs_refcount_count(&arc_mru->arcs_size) - arc_p)); total_evicted += arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA); /* * Similar to the above, we want to evict enough bytes to get us * below the meta limit, but not so much as to drop us below the * space allotted to the MFU (which is defined as arc_c - arc_p). */ target = MIN((int64_t)(meta_used - arc_meta_limit), (int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p))); total_evicted += arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); return (total_evicted); } static uint64_t arc_evict_meta(uint64_t meta_used) { if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY) return (arc_evict_meta_only(meta_used)); else return (arc_evict_meta_balanced(meta_used)); } /* * Return the type of the oldest buffer in the given arc state * * This function will select a random sublist of type ARC_BUFC_DATA and * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist * is compared, and the type which contains the "older" buffer will be * returned. */ static arc_buf_contents_t arc_evict_type(arc_state_t *state) { multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA]; multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA]; int data_idx = multilist_get_random_index(data_ml); int meta_idx = multilist_get_random_index(meta_ml); multilist_sublist_t *data_mls; multilist_sublist_t *meta_mls; arc_buf_contents_t type; arc_buf_hdr_t *data_hdr; arc_buf_hdr_t *meta_hdr; /* * We keep the sublist lock until we're finished, to prevent * the headers from being destroyed via arc_evict_state(). */ data_mls = multilist_sublist_lock(data_ml, data_idx); meta_mls = multilist_sublist_lock(meta_ml, meta_idx); /* * These two loops are to ensure we skip any markers that * might be at the tail of the lists due to arc_evict_state(). */ for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { if (data_hdr->b_spa != 0) break; } for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { if (meta_hdr->b_spa != 0) break; } if (data_hdr == NULL && meta_hdr == NULL) { type = ARC_BUFC_DATA; } else if (data_hdr == NULL) { ASSERT3P(meta_hdr, !=, NULL); type = ARC_BUFC_METADATA; } else if (meta_hdr == NULL) { ASSERT3P(data_hdr, !=, NULL); type = ARC_BUFC_DATA; } else { ASSERT3P(data_hdr, !=, NULL); ASSERT3P(meta_hdr, !=, NULL); /* The headers can't be on the sublist without an L1 header */ ASSERT(HDR_HAS_L1HDR(data_hdr)); ASSERT(HDR_HAS_L1HDR(meta_hdr)); if (data_hdr->b_l1hdr.b_arc_access < meta_hdr->b_l1hdr.b_arc_access) { type = ARC_BUFC_DATA; } else { type = ARC_BUFC_METADATA; } } multilist_sublist_unlock(meta_mls); multilist_sublist_unlock(data_mls); return (type); } /* * Evict buffers from the cache, such that arcstat_size is capped by arc_c. */ static uint64_t arc_evict(void) { uint64_t total_evicted = 0; uint64_t bytes; int64_t target; uint64_t asize = aggsum_value(&arc_sums.arcstat_size); uint64_t ameta = aggsum_value(&arc_sums.arcstat_meta_used); /* * If we're over arc_meta_limit, we want to correct that before * potentially evicting data buffers below. */ total_evicted += arc_evict_meta(ameta); /* * Adjust MRU size * * If we're over the target cache size, we want to evict enough * from the list to get back to our target size. We don't want * to evict too much from the MRU, such that it drops below * arc_p. So, if we're over our target cache size more than * the MRU is over arc_p, we'll evict enough to get back to * arc_p here, and then evict more from the MFU below. */ target = MIN((int64_t)(asize - arc_c), (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) + zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p)); /* * If we're below arc_meta_min, always prefer to evict data. * Otherwise, try to satisfy the requested number of bytes to * evict from the type which contains older buffers; in an * effort to keep newer buffers in the cache regardless of their * type. If we cannot satisfy the number of bytes from this * type, spill over into the next type. */ if (arc_evict_type(arc_mru) == ARC_BUFC_METADATA && ameta > arc_meta_min) { bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA); total_evicted += bytes; /* * If we couldn't evict our target number of bytes from * metadata, we try to get the rest from data. */ target -= bytes; total_evicted += arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA); } else { bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA); total_evicted += bytes; /* * If we couldn't evict our target number of bytes from * data, we try to get the rest from metadata. */ target -= bytes; total_evicted += arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA); } /* * Re-sum ARC stats after the first round of evictions. */ asize = aggsum_value(&arc_sums.arcstat_size); ameta = aggsum_value(&arc_sums.arcstat_meta_used); /* * Adjust MFU size * * Now that we've tried to evict enough from the MRU to get its * size back to arc_p, if we're still above the target cache * size, we evict the rest from the MFU. */ target = asize - arc_c; if (arc_evict_type(arc_mfu) == ARC_BUFC_METADATA && ameta > arc_meta_min) { bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); total_evicted += bytes; /* * If we couldn't evict our target number of bytes from * metadata, we try to get the rest from data. */ target -= bytes; total_evicted += arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA); } else { bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA); total_evicted += bytes; /* * If we couldn't evict our target number of bytes from * data, we try to get the rest from data. */ target -= bytes; total_evicted += arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); } /* * Adjust ghost lists * * In addition to the above, the ARC also defines target values * for the ghost lists. The sum of the mru list and mru ghost * list should never exceed the target size of the cache, and * the sum of the mru list, mfu list, mru ghost list, and mfu * ghost list should never exceed twice the target size of the * cache. The following logic enforces these limits on the ghost * caches, and evicts from them as needed. */ target = zfs_refcount_count(&arc_mru->arcs_size) + zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c; bytes = arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); total_evicted += bytes; target -= bytes; total_evicted += arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); /* * We assume the sum of the mru list and mfu list is less than * or equal to arc_c (we enforced this above), which means we * can use the simpler of the two equations below: * * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c * mru ghost + mfu ghost <= arc_c */ target = zfs_refcount_count(&arc_mru_ghost->arcs_size) + zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; bytes = arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); total_evicted += bytes; target -= bytes; total_evicted += arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); return (total_evicted); } void arc_flush(spa_t *spa, boolean_t retry) { uint64_t guid = 0; /* * If retry is B_TRUE, a spa must not be specified since we have * no good way to determine if all of a spa's buffers have been * evicted from an arc state. */ ASSERT(!retry || spa == 0); if (spa != NULL) guid = spa_load_guid(spa); (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); } void arc_reduce_target_size(int64_t to_free) { uint64_t asize = aggsum_value(&arc_sums.arcstat_size); /* * All callers want the ARC to actually evict (at least) this much * memory. Therefore we reduce from the lower of the current size and * the target size. This way, even if arc_c is much higher than * arc_size (as can be the case after many calls to arc_freed(), we will * immediately have arc_c < arc_size and therefore the arc_evict_zthr * will evict. */ uint64_t c = MIN(arc_c, asize); if (c > to_free && c - to_free > arc_c_min) { arc_c = c - to_free; atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); if (arc_p > arc_c) arc_p = (arc_c >> 1); ASSERT(arc_c >= arc_c_min); ASSERT((int64_t)arc_p >= 0); } else { arc_c = arc_c_min; } if (asize > arc_c) { /* See comment in arc_evict_cb_check() on why lock+flag */ mutex_enter(&arc_evict_lock); arc_evict_needed = B_TRUE; mutex_exit(&arc_evict_lock); zthr_wakeup(arc_evict_zthr); } } /* * Determine if the system is under memory pressure and is asking * to reclaim memory. A return value of B_TRUE indicates that the system * is under memory pressure and that the arc should adjust accordingly. */ boolean_t arc_reclaim_needed(void) { return (arc_available_memory() < 0); } void arc_kmem_reap_soon(void) { size_t i; kmem_cache_t *prev_cache = NULL; kmem_cache_t *prev_data_cache = NULL; extern kmem_cache_t *zio_buf_cache[]; extern kmem_cache_t *zio_data_buf_cache[]; #ifdef _KERNEL if ((aggsum_compare(&arc_sums.arcstat_meta_used, arc_meta_limit) >= 0) && zfs_arc_meta_prune) { /* * We are exceeding our meta-data cache limit. * Prune some entries to release holds on meta-data. */ arc_prune_async(zfs_arc_meta_prune); } #if defined(_ILP32) /* * Reclaim unused memory from all kmem caches. */ kmem_reap(); #endif #endif for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { #if defined(_ILP32) /* reach upper limit of cache size on 32-bit */ if (zio_buf_cache[i] == NULL) break; #endif if (zio_buf_cache[i] != prev_cache) { prev_cache = zio_buf_cache[i]; kmem_cache_reap_now(zio_buf_cache[i]); } if (zio_data_buf_cache[i] != prev_data_cache) { prev_data_cache = zio_data_buf_cache[i]; kmem_cache_reap_now(zio_data_buf_cache[i]); } } kmem_cache_reap_now(buf_cache); kmem_cache_reap_now(hdr_full_cache); kmem_cache_reap_now(hdr_l2only_cache); kmem_cache_reap_now(zfs_btree_leaf_cache); abd_cache_reap_now(); } /* ARGSUSED */ static boolean_t arc_evict_cb_check(void *arg, zthr_t *zthr) { #ifdef ZFS_DEBUG /* * This is necessary in order to keep the kstat information * up to date for tools that display kstat data such as the * mdb ::arc dcmd and the Linux crash utility. These tools * typically do not call kstat's update function, but simply * dump out stats from the most recent update. Without * this call, these commands may show stale stats for the * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even * with this call, the data might be out of date if the * evict thread hasn't been woken recently; but that should * suffice. The arc_state_t structures can be queried * directly if more accurate information is needed. */ if (arc_ksp != NULL) arc_ksp->ks_update(arc_ksp, KSTAT_READ); #endif /* * We have to rely on arc_wait_for_eviction() to tell us when to * evict, rather than checking if we are overflowing here, so that we * are sure to not leave arc_wait_for_eviction() waiting on aew_cv. * If we have become "not overflowing" since arc_wait_for_eviction() * checked, we need to wake it up. We could broadcast the CV here, * but arc_wait_for_eviction() may have not yet gone to sleep. We * would need to use a mutex to ensure that this function doesn't * broadcast until arc_wait_for_eviction() has gone to sleep (e.g. * the arc_evict_lock). However, the lock ordering of such a lock * would necessarily be incorrect with respect to the zthr_lock, * which is held before this function is called, and is held by * arc_wait_for_eviction() when it calls zthr_wakeup(). */ return (arc_evict_needed); } /* * Keep arc_size under arc_c by running arc_evict which evicts data * from the ARC. */ /* ARGSUSED */ static void arc_evict_cb(void *arg, zthr_t *zthr) { uint64_t evicted = 0; fstrans_cookie_t cookie = spl_fstrans_mark(); /* Evict from cache */ evicted = arc_evict(); /* * If evicted is zero, we couldn't evict anything * via arc_evict(). This could be due to hash lock * collisions, but more likely due to the majority of * arc buffers being unevictable. Therefore, even if * arc_size is above arc_c, another pass is unlikely to * be helpful and could potentially cause us to enter an * infinite loop. Additionally, zthr_iscancelled() is * checked here so that if the arc is shutting down, the * broadcast will wake any remaining arc evict waiters. */ mutex_enter(&arc_evict_lock); arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) && evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0; if (!arc_evict_needed) { /* * We're either no longer overflowing, or we * can't evict anything more, so we should wake * arc_get_data_impl() sooner. */ arc_evict_waiter_t *aw; while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) { cv_broadcast(&aw->aew_cv); } arc_set_need_free(); } mutex_exit(&arc_evict_lock); spl_fstrans_unmark(cookie); } /* ARGSUSED */ static boolean_t arc_reap_cb_check(void *arg, zthr_t *zthr) { int64_t free_memory = arc_available_memory(); static int reap_cb_check_counter = 0; /* * If a kmem reap is already active, don't schedule more. We must * check for this because kmem_cache_reap_soon() won't actually * block on the cache being reaped (this is to prevent callers from * becoming implicitly blocked by a system-wide kmem reap -- which, * on a system with many, many full magazines, can take minutes). */ if (!kmem_cache_reap_active() && free_memory < 0) { arc_no_grow = B_TRUE; arc_warm = B_TRUE; /* * Wait at least zfs_grow_retry (default 5) seconds * before considering growing. */ arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry); return (B_TRUE); } else if (free_memory < arc_c >> arc_no_grow_shift) { arc_no_grow = B_TRUE; } else if (gethrtime() >= arc_growtime) { arc_no_grow = B_FALSE; } /* * Called unconditionally every 60 seconds to reclaim unused * zstd compression and decompression context. This is done * here to avoid the need for an independent thread. */ if (!((reap_cb_check_counter++) % 60)) zfs_zstd_cache_reap_now(); return (B_FALSE); } /* * Keep enough free memory in the system by reaping the ARC's kmem * caches. To cause more slabs to be reapable, we may reduce the * target size of the cache (arc_c), causing the arc_evict_cb() * to free more buffers. */ /* ARGSUSED */ static void arc_reap_cb(void *arg, zthr_t *zthr) { int64_t free_memory; fstrans_cookie_t cookie = spl_fstrans_mark(); /* * Kick off asynchronous kmem_reap()'s of all our caches. */ arc_kmem_reap_soon(); /* * Wait at least arc_kmem_cache_reap_retry_ms between * arc_kmem_reap_soon() calls. Without this check it is possible to * end up in a situation where we spend lots of time reaping * caches, while we're near arc_c_min. Waiting here also gives the * subsequent free memory check a chance of finding that the * asynchronous reap has already freed enough memory, and we don't * need to call arc_reduce_target_size(). */ delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000); /* * Reduce the target size as needed to maintain the amount of free * memory in the system at a fraction of the arc_size (1/128th by * default). If oversubscribed (free_memory < 0) then reduce the * target arc_size by the deficit amount plus the fractional * amount. If free memory is positive but less than the fractional * amount, reduce by what is needed to hit the fractional amount. */ free_memory = arc_available_memory(); int64_t to_free = (arc_c >> arc_shrink_shift) - free_memory; if (to_free > 0) { arc_reduce_target_size(to_free); } spl_fstrans_unmark(cookie); } #ifdef _KERNEL /* * Determine the amount of memory eligible for eviction contained in the * ARC. All clean data reported by the ghost lists can always be safely * evicted. Due to arc_c_min, the same does not hold for all clean data * contained by the regular mru and mfu lists. * * In the case of the regular mru and mfu lists, we need to report as * much clean data as possible, such that evicting that same reported * data will not bring arc_size below arc_c_min. Thus, in certain * circumstances, the total amount of clean data in the mru and mfu * lists might not actually be evictable. * * The following two distinct cases are accounted for: * * 1. The sum of the amount of dirty data contained by both the mru and * mfu lists, plus the ARC's other accounting (e.g. the anon list), * is greater than or equal to arc_c_min. * (i.e. amount of dirty data >= arc_c_min) * * This is the easy case; all clean data contained by the mru and mfu * lists is evictable. Evicting all clean data can only drop arc_size * to the amount of dirty data, which is greater than arc_c_min. * * 2. The sum of the amount of dirty data contained by both the mru and * mfu lists, plus the ARC's other accounting (e.g. the anon list), * is less than arc_c_min. * (i.e. arc_c_min > amount of dirty data) * * 2.1. arc_size is greater than or equal arc_c_min. * (i.e. arc_size >= arc_c_min > amount of dirty data) * * In this case, not all clean data from the regular mru and mfu * lists is actually evictable; we must leave enough clean data * to keep arc_size above arc_c_min. Thus, the maximum amount of * evictable data from the two lists combined, is exactly the * difference between arc_size and arc_c_min. * * 2.2. arc_size is less than arc_c_min * (i.e. arc_c_min > arc_size > amount of dirty data) * * In this case, none of the data contained in the mru and mfu * lists is evictable, even if it's clean. Since arc_size is * already below arc_c_min, evicting any more would only * increase this negative difference. */ #endif /* _KERNEL */ /* * Adapt arc info given the number of bytes we are trying to add and * the state that we are coming from. This function is only called * when we are adding new content to the cache. */ static void arc_adapt(int bytes, arc_state_t *state) { int mult; uint64_t arc_p_min = (arc_c >> arc_p_min_shift); int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size); int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size); ASSERT(bytes > 0); /* * Adapt the target size of the MRU list: * - if we just hit in the MRU ghost list, then increase * the target size of the MRU list. * - if we just hit in the MFU ghost list, then increase * the target size of the MFU list by decreasing the * target size of the MRU list. */ if (state == arc_mru_ghost) { mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); if (!zfs_arc_p_dampener_disable) mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); } else if (state == arc_mfu_ghost) { uint64_t delta; mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); if (!zfs_arc_p_dampener_disable) mult = MIN(mult, 10); delta = MIN(bytes * mult, arc_p); arc_p = MAX(arc_p_min, arc_p - delta); } ASSERT((int64_t)arc_p >= 0); /* * Wake reap thread if we do not have any available memory */ if (arc_reclaim_needed()) { zthr_wakeup(arc_reap_zthr); return; } if (arc_no_grow) return; if (arc_c >= arc_c_max) return; /* * If we're within (2 * maxblocksize) bytes of the target * cache size, increment the target cache size */ ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT); if (aggsum_upper_bound(&arc_sums.arcstat_size) >= arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { atomic_add_64(&arc_c, (int64_t)bytes); if (arc_c > arc_c_max) arc_c = arc_c_max; else if (state == arc_anon) atomic_add_64(&arc_p, (int64_t)bytes); if (arc_p > arc_c) arc_p = arc_c; } ASSERT((int64_t)arc_p >= 0); } /* * Check if arc_size has grown past our upper threshold, determined by * zfs_arc_overflow_shift. */ static arc_ovf_level_t arc_is_overflowing(boolean_t use_reserve) { /* Always allow at least one block of overflow */ int64_t overflow = MAX(SPA_MAXBLOCKSIZE, arc_c >> zfs_arc_overflow_shift); /* * We just compare the lower bound here for performance reasons. Our * primary goals are to make sure that the arc never grows without * bound, and that it can reach its maximum size. This check * accomplishes both goals. The maximum amount we could run over by is * 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block * in the ARC. In practice, that's in the tens of MB, which is low * enough to be safe. */ int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) - arc_c - overflow / 2; if (!use_reserve) overflow /= 2; return (over < 0 ? ARC_OVF_NONE : over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE); } static abd_t * arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag, int alloc_flags) { arc_buf_contents_t type = arc_buf_type(hdr); arc_get_data_impl(hdr, size, tag, alloc_flags); if (type == ARC_BUFC_METADATA) { return (abd_alloc(size, B_TRUE)); } else { ASSERT(type == ARC_BUFC_DATA); return (abd_alloc(size, B_FALSE)); } } static void * arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag) { arc_buf_contents_t type = arc_buf_type(hdr); arc_get_data_impl(hdr, size, tag, ARC_HDR_DO_ADAPT); if (type == ARC_BUFC_METADATA) { return (zio_buf_alloc(size)); } else { ASSERT(type == ARC_BUFC_DATA); return (zio_data_buf_alloc(size)); } } /* * Wait for the specified amount of data (in bytes) to be evicted from the * ARC, and for there to be sufficient free memory in the system. Waiting for * eviction ensures that the memory used by the ARC decreases. Waiting for * free memory ensures that the system won't run out of free pages, regardless * of ARC behavior and settings. See arc_lowmem_init(). */ void arc_wait_for_eviction(uint64_t amount, boolean_t use_reserve) { switch (arc_is_overflowing(use_reserve)) { case ARC_OVF_NONE: return; case ARC_OVF_SOME: /* * This is a bit racy without taking arc_evict_lock, but the * worst that can happen is we either call zthr_wakeup() extra * time due to race with other thread here, or the set flag * get cleared by arc_evict_cb(), which is unlikely due to * big hysteresis, but also not important since at this level * of overflow the eviction is purely advisory. Same time * taking the global lock here every time without waiting for * the actual eviction creates a significant lock contention. */ if (!arc_evict_needed) { arc_evict_needed = B_TRUE; zthr_wakeup(arc_evict_zthr); } return; case ARC_OVF_SEVERE: default: { arc_evict_waiter_t aw; list_link_init(&aw.aew_node); cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL); uint64_t last_count = 0; mutex_enter(&arc_evict_lock); if (!list_is_empty(&arc_evict_waiters)) { arc_evict_waiter_t *last = list_tail(&arc_evict_waiters); last_count = last->aew_count; } else if (!arc_evict_needed) { arc_evict_needed = B_TRUE; zthr_wakeup(arc_evict_zthr); } /* * Note, the last waiter's count may be less than * arc_evict_count if we are low on memory in which * case arc_evict_state_impl() may have deferred * wakeups (but still incremented arc_evict_count). */ aw.aew_count = MAX(last_count, arc_evict_count) + amount; list_insert_tail(&arc_evict_waiters, &aw); arc_set_need_free(); DTRACE_PROBE3(arc__wait__for__eviction, uint64_t, amount, uint64_t, arc_evict_count, uint64_t, aw.aew_count); /* * We will be woken up either when arc_evict_count reaches * aew_count, or when the ARC is no longer overflowing and * eviction completes. * In case of "false" wakeup, we will still be on the list. */ do { cv_wait(&aw.aew_cv, &arc_evict_lock); } while (list_link_active(&aw.aew_node)); mutex_exit(&arc_evict_lock); cv_destroy(&aw.aew_cv); } } } /* * Allocate a block and return it to the caller. If we are hitting the * hard limit for the cache size, we must sleep, waiting for the eviction * thread to catch up. If we're past the target size but below the hard * limit, we'll only signal the reclaim thread and continue on. */ static void arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag, int alloc_flags) { arc_state_t *state = hdr->b_l1hdr.b_state; arc_buf_contents_t type = arc_buf_type(hdr); if (alloc_flags & ARC_HDR_DO_ADAPT) arc_adapt(size, state); /* * If arc_size is currently overflowing, we must be adding data * faster than we are evicting. To ensure we don't compound the * problem by adding more data and forcing arc_size to grow even * further past it's target size, we wait for the eviction thread to * make some progress. We also wait for there to be sufficient free * memory in the system, as measured by arc_free_memory(). * * Specifically, we wait for zfs_arc_eviction_pct percent of the * requested size to be evicted. This should be more than 100%, to * ensure that that progress is also made towards getting arc_size * under arc_c. See the comment above zfs_arc_eviction_pct. */ arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100, alloc_flags & ARC_HDR_USE_RESERVE); VERIFY3U(hdr->b_type, ==, type); if (type == ARC_BUFC_METADATA) { arc_space_consume(size, ARC_SPACE_META); } else { arc_space_consume(size, ARC_SPACE_DATA); } /* * Update the state size. Note that ghost states have a * "ghost size" and so don't need to be updated. */ if (!GHOST_STATE(state)) { (void) zfs_refcount_add_many(&state->arcs_size, size, tag); /* * If this is reached via arc_read, the link is * protected by the hash lock. If reached via * arc_buf_alloc, the header should not be accessed by * any other thread. And, if reached via arc_read_done, * the hash lock will protect it if it's found in the * hash table; otherwise no other thread should be * trying to [add|remove]_reference it. */ if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); (void) zfs_refcount_add_many(&state->arcs_esize[type], size, tag); } /* * If we are growing the cache, and we are adding anonymous * data, and we have outgrown arc_p, update arc_p */ if (aggsum_upper_bound(&arc_sums.arcstat_size) < arc_c && hdr->b_l1hdr.b_state == arc_anon && (zfs_refcount_count(&arc_anon->arcs_size) + zfs_refcount_count(&arc_mru->arcs_size) > arc_p)) arc_p = MIN(arc_c, arc_p + size); } } static void arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag) { arc_free_data_impl(hdr, size, tag); abd_free(abd); } static void arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag) { arc_buf_contents_t type = arc_buf_type(hdr); arc_free_data_impl(hdr, size, tag); if (type == ARC_BUFC_METADATA) { zio_buf_free(buf, size); } else { ASSERT(type == ARC_BUFC_DATA); zio_data_buf_free(buf, size); } } /* * Free the arc data buffer. */ static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) { arc_state_t *state = hdr->b_l1hdr.b_state; arc_buf_contents_t type = arc_buf_type(hdr); /* protected by hash lock, if in the hash table */ if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); ASSERT(state != arc_anon && state != arc_l2c_only); (void) zfs_refcount_remove_many(&state->arcs_esize[type], size, tag); } (void) zfs_refcount_remove_many(&state->arcs_size, size, tag); VERIFY3U(hdr->b_type, ==, type); if (type == ARC_BUFC_METADATA) { arc_space_return(size, ARC_SPACE_META); } else { ASSERT(type == ARC_BUFC_DATA); arc_space_return(size, ARC_SPACE_DATA); } } /* * This routine is called whenever a buffer is accessed. * NOTE: the hash lock is dropped in this function. */ static void arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) { clock_t now; ASSERT(MUTEX_HELD(hash_lock)); ASSERT(HDR_HAS_L1HDR(hdr)); if (hdr->b_l1hdr.b_state == arc_anon) { /* * This buffer is not in the cache, and does not * appear in our "ghost" list. Add the new buffer * to the MRU state. */ ASSERT0(hdr->b_l1hdr.b_arc_access); hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); arc_change_state(arc_mru, hdr, hash_lock); } else if (hdr->b_l1hdr.b_state == arc_mru) { now = ddi_get_lbolt(); /* * If this buffer is here because of a prefetch, then either: * - clear the flag if this is a "referencing" read * (any subsequent access will bump this into the MFU state). * or * - move the buffer to the head of the list if this is * another prefetch (to make it less likely to be evicted). */ if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { /* link protected by hash lock */ ASSERT(multilist_link_active( &hdr->b_l1hdr.b_arc_node)); } else { if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_decrement_state(hdr); arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH | ARC_FLAG_PRESCIENT_PREFETCH); hdr->b_l1hdr.b_mru_hits++; ARCSTAT_BUMP(arcstat_mru_hits); if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_increment_state(hdr); } hdr->b_l1hdr.b_arc_access = now; return; } /* * This buffer has been "accessed" only once so far, * but it is still in the cache. Move it to the MFU * state. */ if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access + ARC_MINTIME)) { /* * More than 125ms have passed since we * instantiated this buffer. Move it to the * most frequently used state. */ hdr->b_l1hdr.b_arc_access = now; DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); arc_change_state(arc_mfu, hdr, hash_lock); } hdr->b_l1hdr.b_mru_hits++; ARCSTAT_BUMP(arcstat_mru_hits); } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { arc_state_t *new_state; /* * This buffer has been "accessed" recently, but * was evicted from the cache. Move it to the * MFU state. */ if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { new_state = arc_mru; if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) { if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_decrement_state(hdr); arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH | ARC_FLAG_PRESCIENT_PREFETCH); if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_increment_state(hdr); } DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); } else { new_state = arc_mfu; DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); } hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); arc_change_state(new_state, hdr, hash_lock); hdr->b_l1hdr.b_mru_ghost_hits++; ARCSTAT_BUMP(arcstat_mru_ghost_hits); } else if (hdr->b_l1hdr.b_state == arc_mfu) { /* * This buffer has been accessed more than once and is * still in the cache. Keep it in the MFU state. * * NOTE: an add_reference() that occurred when we did * the arc_read() will have kicked this off the list. * If it was a prefetch, we will explicitly move it to * the head of the list now. */ hdr->b_l1hdr.b_mfu_hits++; ARCSTAT_BUMP(arcstat_mfu_hits); hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { arc_state_t *new_state = arc_mfu; /* * This buffer has been accessed more than once but has * been evicted from the cache. Move it back to the * MFU state. */ if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { /* * This is a prefetch access... * move this block back to the MRU state. */ new_state = arc_mru; } hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); arc_change_state(new_state, hdr, hash_lock); hdr->b_l1hdr.b_mfu_ghost_hits++; ARCSTAT_BUMP(arcstat_mfu_ghost_hits); } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { /* * This buffer is on the 2nd Level ARC. */ hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); arc_change_state(arc_mfu, hdr, hash_lock); } else { cmn_err(CE_PANIC, "invalid arc state 0x%p", hdr->b_l1hdr.b_state); } } /* * This routine is called by dbuf_hold() to update the arc_access() state * which otherwise would be skipped for entries in the dbuf cache. */ void arc_buf_access(arc_buf_t *buf) { mutex_enter(&buf->b_evict_lock); arc_buf_hdr_t *hdr = buf->b_hdr; /* * Avoid taking the hash_lock when possible as an optimization. * The header must be checked again under the hash_lock in order * to handle the case where it is concurrently being released. */ if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { mutex_exit(&buf->b_evict_lock); return; } kmutex_t *hash_lock = HDR_LOCK(hdr); mutex_enter(hash_lock); if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { mutex_exit(hash_lock); mutex_exit(&buf->b_evict_lock); ARCSTAT_BUMP(arcstat_access_skip); return; } mutex_exit(&buf->b_evict_lock); ASSERT(hdr->b_l1hdr.b_state == arc_mru || hdr->b_l1hdr.b_state == arc_mfu); DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); arc_access(hdr, hash_lock); mutex_exit(hash_lock); ARCSTAT_BUMP(arcstat_hits); ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr) && !HDR_PRESCIENT_PREFETCH(hdr), demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits); } /* a generic arc_read_done_func_t which you can use */ /* ARGSUSED */ void arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, arc_buf_t *buf, void *arg) { if (buf == NULL) return; bcopy(buf->b_data, arg, arc_buf_size(buf)); arc_buf_destroy(buf, arg); } /* a generic arc_read_done_func_t */ /* ARGSUSED */ void arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, arc_buf_t *buf, void *arg) { arc_buf_t **bufp = arg; if (buf == NULL) { ASSERT(zio == NULL || zio->io_error != 0); *bufp = NULL; } else { ASSERT(zio == NULL || zio->io_error == 0); *bufp = buf; ASSERT(buf->b_data != NULL); } } static void arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp) { if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF); } else { if (HDR_COMPRESSION_ENABLED(hdr)) { ASSERT3U(arc_hdr_get_compress(hdr), ==, BP_GET_COMPRESS(bp)); } ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp)); ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp)); } } static void arc_read_done(zio_t *zio) { blkptr_t *bp = zio->io_bp; arc_buf_hdr_t *hdr = zio->io_private; kmutex_t *hash_lock = NULL; arc_callback_t *callback_list; arc_callback_t *acb; boolean_t freeable = B_FALSE; /* * The hdr was inserted into hash-table and removed from lists * prior to starting I/O. We should find this header, since * it's in the hash table, and it should be legit since it's * not possible to evict it during the I/O. The only possible * reason for it not to be found is if we were freed during the * read. */ if (HDR_IN_HASH_TABLE(hdr)) { arc_buf_hdr_t *found; ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); ASSERT3U(hdr->b_dva.dva_word[0], ==, BP_IDENTITY(zio->io_bp)->dva_word[0]); ASSERT3U(hdr->b_dva.dva_word[1], ==, BP_IDENTITY(zio->io_bp)->dva_word[1]); found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock); ASSERT((found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || (found == hdr && HDR_L2_READING(hdr))); ASSERT3P(hash_lock, !=, NULL); } if (BP_IS_PROTECTED(bp)) { hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv); if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) { void *tmpbuf; tmpbuf = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); zio_crypt_decode_mac_zil(tmpbuf, hdr->b_crypt_hdr.b_mac); abd_return_buf(zio->io_abd, tmpbuf, sizeof (zil_chain_t)); } else { zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); } } if (zio->io_error == 0) { /* byteswap if necessary */ if (BP_SHOULD_BYTESWAP(zio->io_bp)) { if (BP_GET_LEVEL(zio->io_bp) > 0) { hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; } else { hdr->b_l1hdr.b_byteswap = DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); } } else { hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; } if (!HDR_L2_READING(hdr)) { hdr->b_complevel = zio->io_prop.zp_complevel; } } arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED); if (l2arc_noprefetch && HDR_PREFETCH(hdr)) arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE); callback_list = hdr->b_l1hdr.b_acb; ASSERT3P(callback_list, !=, NULL); if (hash_lock && zio->io_error == 0 && hdr->b_l1hdr.b_state == arc_anon) { /* * Only call arc_access on anonymous buffers. This is because * if we've issued an I/O for an evicted buffer, we've already * called arc_access (to prevent any simultaneous readers from * getting confused). */ arc_access(hdr, hash_lock); } /* * If a read request has a callback (i.e. acb_done is not NULL), then we * make a buf containing the data according to the parameters which were * passed in. The implementation of arc_buf_alloc_impl() ensures that we * aren't needlessly decompressing the data multiple times. */ int callback_cnt = 0; for (acb = callback_list; acb != NULL; acb = acb->acb_next) { if (!acb->acb_done || acb->acb_nobuf) continue; callback_cnt++; if (zio->io_error != 0) continue; int error = arc_buf_alloc_impl(hdr, zio->io_spa, &acb->acb_zb, acb->acb_private, acb->acb_encrypted, acb->acb_compressed, acb->acb_noauth, B_TRUE, &acb->acb_buf); /* * Assert non-speculative zios didn't fail because an * encryption key wasn't loaded */ ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) || error != EACCES); /* * If we failed to decrypt, report an error now (as the zio * layer would have done if it had done the transforms). */ if (error == ECKSUM) { ASSERT(BP_IS_PROTECTED(bp)); error = SET_ERROR(EIO); if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { spa_log_error(zio->io_spa, &acb->acb_zb); (void) zfs_ereport_post( FM_EREPORT_ZFS_AUTHENTICATION, zio->io_spa, NULL, &acb->acb_zb, zio, 0); } } if (error != 0) { /* * Decompression or decryption failed. Set * io_error so that when we call acb_done * (below), we will indicate that the read * failed. Note that in the unusual case * where one callback is compressed and another * uncompressed, we will mark all of them * as failed, even though the uncompressed * one can't actually fail. In this case, * the hdr will not be anonymous, because * if there are multiple callbacks, it's * because multiple threads found the same * arc buf in the hash table. */ zio->io_error = error; } } /* * If there are multiple callbacks, we must have the hash lock, * because the only way for multiple threads to find this hdr is * in the hash table. This ensures that if there are multiple * callbacks, the hdr is not anonymous. If it were anonymous, * we couldn't use arc_buf_destroy() in the error case below. */ ASSERT(callback_cnt < 2 || hash_lock != NULL); hdr->b_l1hdr.b_acb = NULL; arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); if (callback_cnt == 0) ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || callback_list != NULL); if (zio->io_error == 0) { arc_hdr_verify(hdr, zio->io_bp); } else { arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); if (hdr->b_l1hdr.b_state != arc_anon) arc_change_state(arc_anon, hdr, hash_lock); if (HDR_IN_HASH_TABLE(hdr)) buf_hash_remove(hdr); freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt); } /* * Broadcast before we drop the hash_lock to avoid the possibility * that the hdr (and hence the cv) might be freed before we get to * the cv_broadcast(). */ cv_broadcast(&hdr->b_l1hdr.b_cv); if (hash_lock != NULL) { mutex_exit(hash_lock); } else { /* * This block was freed while we waited for the read to * complete. It has been removed from the hash table and * moved to the anonymous state (so that it won't show up * in the cache). */ ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt); } /* execute each callback and free its structure */ while ((acb = callback_list) != NULL) { if (acb->acb_done != NULL) { if (zio->io_error != 0 && acb->acb_buf != NULL) { /* * If arc_buf_alloc_impl() fails during * decompression, the buf will still be * allocated, and needs to be freed here. */ arc_buf_destroy(acb->acb_buf, acb->acb_private); acb->acb_buf = NULL; } acb->acb_done(zio, &zio->io_bookmark, zio->io_bp, acb->acb_buf, acb->acb_private); } if (acb->acb_zio_dummy != NULL) { acb->acb_zio_dummy->io_error = zio->io_error; zio_nowait(acb->acb_zio_dummy); } callback_list = acb->acb_next; kmem_free(acb, sizeof (arc_callback_t)); } if (freeable) arc_hdr_destroy(hdr); } /* * "Read" the block at the specified DVA (in bp) via the * cache. If the block is found in the cache, invoke the provided * callback immediately and return. Note that the `zio' parameter * in the callback will be NULL in this case, since no IO was * required. If the block is not in the cache pass the read request * on to the spa with a substitute callback function, so that the * requested block will be added to the cache. * * If a read request arrives for a block that has a read in-progress, * either wait for the in-progress read to complete (and return the * results); or, if this is a read with a "done" func, add a record * to the read to invoke the "done" func when the read completes, * and return; or just return. * * arc_read_done() will invoke all the requested "done" functions * for readers of this block. */ int arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_read_done_func_t *done, void *private, zio_priority_t priority, int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb) { arc_buf_hdr_t *hdr = NULL; kmutex_t *hash_lock = NULL; zio_t *rzio; uint64_t guid = spa_load_guid(spa); boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0; boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) && (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0; boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) && (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0; boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp); boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF; int rc = 0; ASSERT(!embedded_bp || BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); ASSERT(!BP_IS_HOLE(bp)); ASSERT(!BP_IS_REDACTED(bp)); /* * Normally SPL_FSTRANS will already be set since kernel threads which * expect to call the DMU interfaces will set it when created. System * calls are similarly handled by setting/cleaning the bit in the * registered callback (module/os/.../zfs/zpl_*). * * External consumers such as Lustre which call the exported DMU * interfaces may not have set SPL_FSTRANS. To avoid a deadlock * on the hash_lock always set and clear the bit. */ fstrans_cookie_t cookie = spl_fstrans_mark(); top: /* * Verify the block pointer contents are reasonable. This should * always be the case since the blkptr is protected by a checksum. * However, if there is damage it's desirable to detect this early * and treat it as a checksum error. This allows an alternate blkptr * to be tried when one is available (e.g. ditto blocks). */ if (!zfs_blkptr_verify(spa, bp, zio_flags & ZIO_FLAG_CONFIG_WRITER, BLK_VERIFY_LOG)) { rc = SET_ERROR(ECKSUM); goto out; } if (!embedded_bp) { /* * Embedded BP's have no DVA and require no I/O to "read". * Create an anonymous arc buf to back it. */ hdr = buf_hash_find(guid, bp, &hash_lock); } /* * Determine if we have an L1 cache hit or a cache miss. For simplicity * we maintain encrypted data separately from compressed / uncompressed * data. If the user is requesting raw encrypted data and we don't have * that in the header we will read from disk to guarantee that we can * get it even if the encryption keys aren't loaded. */ if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) || (hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) { arc_buf_t *buf = NULL; *arc_flags |= ARC_FLAG_CACHED; if (HDR_IO_IN_PROGRESS(hdr)) { zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head; if (*arc_flags & ARC_FLAG_CACHED_ONLY) { mutex_exit(hash_lock); ARCSTAT_BUMP(arcstat_cached_only_in_progress); rc = SET_ERROR(ENOENT); goto out; } ASSERT3P(head_zio, !=, NULL); if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && priority == ZIO_PRIORITY_SYNC_READ) { /* * This is a sync read that needs to wait for * an in-flight async read. Request that the * zio have its priority upgraded. */ zio_change_priority(head_zio, priority); DTRACE_PROBE1(arc__async__upgrade__sync, arc_buf_hdr_t *, hdr); ARCSTAT_BUMP(arcstat_async_upgrade_sync); } if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { arc_hdr_clear_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); } if (*arc_flags & ARC_FLAG_WAIT) { cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); mutex_exit(hash_lock); goto top; } ASSERT(*arc_flags & ARC_FLAG_NOWAIT); if (done) { arc_callback_t *acb = NULL; acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); acb->acb_done = done; acb->acb_private = private; acb->acb_compressed = compressed_read; acb->acb_encrypted = encrypted_read; acb->acb_noauth = noauth_read; acb->acb_nobuf = no_buf; acb->acb_zb = *zb; if (pio != NULL) acb->acb_zio_dummy = zio_null(pio, spa, NULL, NULL, NULL, zio_flags); ASSERT3P(acb->acb_done, !=, NULL); acb->acb_zio_head = head_zio; acb->acb_next = hdr->b_l1hdr.b_acb; hdr->b_l1hdr.b_acb = acb; } mutex_exit(hash_lock); goto out; } ASSERT(hdr->b_l1hdr.b_state == arc_mru || hdr->b_l1hdr.b_state == arc_mfu); if (done && !no_buf) { if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { /* * This is a demand read which does not have to * wait for i/o because we did a predictive * prefetch i/o for it, which has completed. */ DTRACE_PROBE1( arc__demand__hit__predictive__prefetch, arc_buf_hdr_t *, hdr); ARCSTAT_BUMP( arcstat_demand_hit_predictive_prefetch); arc_hdr_clear_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); } if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) { ARCSTAT_BUMP( arcstat_demand_hit_prescient_prefetch); arc_hdr_clear_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH); } ASSERT(!embedded_bp || !BP_IS_HOLE(bp)); /* Get a buf with the desired data in it. */ rc = arc_buf_alloc_impl(hdr, spa, zb, private, encrypted_read, compressed_read, noauth_read, B_TRUE, &buf); if (rc == ECKSUM) { /* * Convert authentication and decryption errors * to EIO (and generate an ereport if needed) * before leaving the ARC. */ rc = SET_ERROR(EIO); if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) { spa_log_error(spa, zb); (void) zfs_ereport_post( FM_EREPORT_ZFS_AUTHENTICATION, spa, NULL, zb, NULL, 0); } } if (rc != 0) { (void) remove_reference(hdr, hash_lock, private); arc_buf_destroy_impl(buf); buf = NULL; } /* assert any errors weren't due to unloaded keys */ ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) || rc != EACCES); } else if (*arc_flags & ARC_FLAG_PREFETCH && zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_decrement_state(hdr); arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_increment_state(hdr); } DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); arc_access(hdr, hash_lock); if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH); if (*arc_flags & ARC_FLAG_L2CACHE) arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); mutex_exit(hash_lock); ARCSTAT_BUMP(arcstat_hits); ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits); if (done) done(NULL, zb, bp, buf, private); } else { uint64_t lsize = BP_GET_LSIZE(bp); uint64_t psize = BP_GET_PSIZE(bp); arc_callback_t *acb; vdev_t *vd = NULL; uint64_t addr = 0; boolean_t devw = B_FALSE; uint64_t size; abd_t *hdr_abd; int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0; if (*arc_flags & ARC_FLAG_CACHED_ONLY) { rc = SET_ERROR(ENOENT); if (hash_lock != NULL) mutex_exit(hash_lock); goto out; } if (hdr == NULL) { /* * This block is not in the cache or it has * embedded data. */ arc_buf_hdr_t *exists = NULL; arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type); if (!embedded_bp) { hdr->b_dva = *BP_IDENTITY(bp); hdr->b_birth = BP_PHYSICAL_BIRTH(bp); exists = buf_hash_insert(hdr, &hash_lock); } if (exists != NULL) { /* somebody beat us to the hash insert */ mutex_exit(hash_lock); buf_discard_identity(hdr); arc_hdr_destroy(hdr); goto top; /* restart the IO request */ } alloc_flags |= ARC_HDR_DO_ADAPT; } else { /* * This block is in the ghost cache or encrypted data * was requested and we didn't have it. If it was * L2-only (and thus didn't have an L1 hdr), * we realloc the header to add an L1 hdr. */ if (!HDR_HAS_L1HDR(hdr)) { hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, hdr_full_cache); } if (GHOST_STATE(hdr->b_l1hdr.b_state)) { ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); ASSERT0(zfs_refcount_count( &hdr->b_l1hdr.b_refcnt)); ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); } else if (HDR_IO_IN_PROGRESS(hdr)) { /* * If this header already had an IO in progress * and we are performing another IO to fetch * encrypted data we must wait until the first * IO completes so as not to confuse * arc_read_done(). This should be very rare * and so the performance impact shouldn't * matter. */ cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); mutex_exit(hash_lock); goto top; } /* * This is a delicate dance that we play here. * This hdr might be in the ghost list so we access * it to move it out of the ghost list before we * initiate the read. If it's a prefetch then * it won't have a callback so we'll remove the * reference that arc_buf_alloc_impl() created. We * do this after we've called arc_access() to * avoid hitting an assert in remove_reference(). */ arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state); arc_access(hdr, hash_lock); } arc_hdr_alloc_abd(hdr, alloc_flags); if (encrypted_read) { ASSERT(HDR_HAS_RABD(hdr)); size = HDR_GET_PSIZE(hdr); hdr_abd = hdr->b_crypt_hdr.b_rabd; zio_flags |= ZIO_FLAG_RAW; } else { ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); size = arc_hdr_size(hdr); hdr_abd = hdr->b_l1hdr.b_pabd; if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) { zio_flags |= ZIO_FLAG_RAW_COMPRESS; } /* * For authenticated bp's, we do not ask the ZIO layer * to authenticate them since this will cause the entire * IO to fail if the key isn't loaded. Instead, we * defer authentication until arc_buf_fill(), which will * verify the data when the key is available. */ if (BP_IS_AUTHENTICATED(bp)) zio_flags |= ZIO_FLAG_RAW_ENCRYPT; } if (*arc_flags & ARC_FLAG_PREFETCH && zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_decrement_state(hdr); arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); if (HDR_HAS_L2HDR(hdr)) l2arc_hdr_arcstats_increment_state(hdr); } if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH); if (*arc_flags & ARC_FLAG_L2CACHE) arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); if (BP_IS_AUTHENTICATED(bp)) arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH); if (BP_GET_LEVEL(bp) > 0) arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT); if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH) arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); acb->acb_done = done; acb->acb_private = private; acb->acb_compressed = compressed_read; acb->acb_encrypted = encrypted_read; acb->acb_noauth = noauth_read; acb->acb_zb = *zb; ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); hdr->b_l1hdr.b_acb = acb; arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); if (HDR_HAS_L2HDR(hdr) && (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { devw = hdr->b_l2hdr.b_dev->l2ad_writing; addr = hdr->b_l2hdr.b_daddr; /* * Lock out L2ARC device removal. */ if (vdev_is_dead(vd) || !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) vd = NULL; } /* * We count both async reads and scrub IOs as asynchronous so * that both can be upgraded in the event of a cache hit while * the read IO is still in-flight. */ if (priority == ZIO_PRIORITY_ASYNC_READ || priority == ZIO_PRIORITY_SCRUB) arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); else arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); /* * At this point, we have a level 1 cache miss or a blkptr * with embedded data. Try again in L2ARC if possible. */ ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize); /* * Skip ARC stat bump for block pointers with embedded * data. The data are read from the blkptr itself via * decode_embedded_bp_compressed(). */ if (!embedded_bp) { DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, uint64_t, lsize, zbookmark_phys_t *, zb); ARCSTAT_BUMP(arcstat_misses); ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, misses); zfs_racct_read(size, 1); } /* Check if the spa even has l2 configured */ const boolean_t spa_has_l2 = l2arc_ndev != 0 && spa->spa_l2cache.sav_count > 0; if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) { /* * Read from the L2ARC if the following are true: * 1. The L2ARC vdev was previously cached. * 2. This buffer still has L2ARC metadata. * 3. This buffer isn't currently writing to the L2ARC. * 4. The L2ARC entry wasn't evicted, which may * also have invalidated the vdev. * 5. This isn't prefetch or l2arc_noprefetch is 0. */ if (HDR_HAS_L2HDR(hdr) && !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { l2arc_read_callback_t *cb; abd_t *abd; uint64_t asize; DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); ARCSTAT_BUMP(arcstat_l2_hits); hdr->b_l2hdr.b_hits++; cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP); cb->l2rcb_hdr = hdr; cb->l2rcb_bp = *bp; cb->l2rcb_zb = *zb; cb->l2rcb_flags = zio_flags; /* * When Compressed ARC is disabled, but the * L2ARC block is compressed, arc_hdr_size() * will have returned LSIZE rather than PSIZE. */ if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr) && HDR_GET_PSIZE(hdr) != 0) { size = HDR_GET_PSIZE(hdr); } asize = vdev_psize_to_asize(vd, size); if (asize != size) { abd = abd_alloc_for_io(asize, HDR_ISTYPE_METADATA(hdr)); cb->l2rcb_abd = abd; } else { abd = hdr_abd; } ASSERT(addr >= VDEV_LABEL_START_SIZE && addr + asize <= vd->vdev_psize - VDEV_LABEL_END_SIZE); /* * l2arc read. The SCL_L2ARC lock will be * released by l2arc_read_done(). * Issue a null zio if the underlying buffer * was squashed to zero size by compression. */ ASSERT3U(arc_hdr_get_compress(hdr), !=, ZIO_COMPRESS_EMPTY); rzio = zio_read_phys(pio, vd, addr, asize, abd, ZIO_CHECKSUM_OFF, l2arc_read_done, cb, priority, zio_flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE); acb->acb_zio_head = rzio; if (hash_lock != NULL) mutex_exit(hash_lock); DTRACE_PROBE2(l2arc__read, vdev_t *, vd, zio_t *, rzio); ARCSTAT_INCR(arcstat_l2_read_bytes, HDR_GET_PSIZE(hdr)); if (*arc_flags & ARC_FLAG_NOWAIT) { zio_nowait(rzio); goto out; } ASSERT(*arc_flags & ARC_FLAG_WAIT); if (zio_wait(rzio) == 0) goto out; /* l2arc read error; goto zio_read() */ if (hash_lock != NULL) mutex_enter(hash_lock); } else { DTRACE_PROBE1(l2arc__miss, arc_buf_hdr_t *, hdr); ARCSTAT_BUMP(arcstat_l2_misses); if (HDR_L2_WRITING(hdr)) ARCSTAT_BUMP(arcstat_l2_rw_clash); spa_config_exit(spa, SCL_L2ARC, vd); } } else { if (vd != NULL) spa_config_exit(spa, SCL_L2ARC, vd); /* * Only a spa with l2 should contribute to l2 * miss stats. (Including the case of having a * faulted cache device - that's also a miss.) */ if (spa_has_l2) { /* * Skip ARC stat bump for block pointers with * embedded data. The data are read from the * blkptr itself via * decode_embedded_bp_compressed(). */ if (!embedded_bp) { DTRACE_PROBE1(l2arc__miss, arc_buf_hdr_t *, hdr); ARCSTAT_BUMP(arcstat_l2_misses); } } } rzio = zio_read(pio, spa, bp, hdr_abd, size, arc_read_done, hdr, priority, zio_flags, zb); acb->acb_zio_head = rzio; if (hash_lock != NULL) mutex_exit(hash_lock); if (*arc_flags & ARC_FLAG_WAIT) { rc = zio_wait(rzio); goto out; } ASSERT(*arc_flags & ARC_FLAG_NOWAIT); zio_nowait(rzio); } out: /* embedded bps don't actually go to disk */ if (!embedded_bp) spa_read_history_add(spa, zb, *arc_flags); spl_fstrans_unmark(cookie); return (rc); } arc_prune_t * arc_add_prune_callback(arc_prune_func_t *func, void *private) { arc_prune_t *p; p = kmem_alloc(sizeof (*p), KM_SLEEP); p->p_pfunc = func; p->p_private = private; list_link_init(&p->p_node); zfs_refcount_create(&p->p_refcnt); mutex_enter(&arc_prune_mtx); zfs_refcount_add(&p->p_refcnt, &arc_prune_list); list_insert_head(&arc_prune_list, p); mutex_exit(&arc_prune_mtx); return (p); } void arc_remove_prune_callback(arc_prune_t *p) { boolean_t wait = B_FALSE; mutex_enter(&arc_prune_mtx); list_remove(&arc_prune_list, p); if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0) wait = B_TRUE; mutex_exit(&arc_prune_mtx); /* wait for arc_prune_task to finish */ if (wait) taskq_wait_outstanding(arc_prune_taskq, 0); ASSERT0(zfs_refcount_count(&p->p_refcnt)); zfs_refcount_destroy(&p->p_refcnt); kmem_free(p, sizeof (*p)); } /* * Notify the arc that a block was freed, and thus will never be used again. */ void arc_freed(spa_t *spa, const blkptr_t *bp) { arc_buf_hdr_t *hdr; kmutex_t *hash_lock; uint64_t guid = spa_load_guid(spa); ASSERT(!BP_IS_EMBEDDED(bp)); hdr = buf_hash_find(guid, bp, &hash_lock); if (hdr == NULL) return; /* * We might be trying to free a block that is still doing I/O * (i.e. prefetch) or has a reference (i.e. a dedup-ed, * dmu_sync-ed block). If this block is being prefetched, then it * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr * until the I/O completes. A block may also have a reference if it is * part of a dedup-ed, dmu_synced write. The dmu_sync() function would * have written the new block to its final resting place on disk but * without the dedup flag set. This would have left the hdr in the MRU * state and discoverable. When the txg finally syncs it detects that * the block was overridden in open context and issues an override I/O. * Since this is a dedup block, the override I/O will determine if the * block is already in the DDT. If so, then it will replace the io_bp * with the bp from the DDT and allow the I/O to finish. When the I/O * reaches the done callback, dbuf_write_override_done, it will * check to see if the io_bp and io_bp_override are identical. * If they are not, then it indicates that the bp was replaced with * the bp in the DDT and the override bp is freed. This allows * us to arrive here with a reference on a block that is being * freed. So if we have an I/O in progress, or a reference to * this hdr, then we don't destroy the hdr. */ if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) && zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) { arc_change_state(arc_anon, hdr, hash_lock); arc_hdr_destroy(hdr); mutex_exit(hash_lock); } else { mutex_exit(hash_lock); } } /* * Release this buffer from the cache, making it an anonymous buffer. This * must be done after a read and prior to modifying the buffer contents. * If the buffer has more than one reference, we must make * a new hdr for the buffer. */ void arc_release(arc_buf_t *buf, void *tag) { arc_buf_hdr_t *hdr = buf->b_hdr; /* * It would be nice to assert that if its DMU metadata (level > * 0 || it's the dnode file), then it must be syncing context. * But we don't know that information at this level. */ mutex_enter(&buf->b_evict_lock); ASSERT(HDR_HAS_L1HDR(hdr)); /* * We don't grab the hash lock prior to this check, because if * the buffer's header is in the arc_anon state, it won't be * linked into the hash table. */ if (hdr->b_l1hdr.b_state == arc_anon) { mutex_exit(&buf->b_evict_lock); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); ASSERT(!HDR_IN_HASH_TABLE(hdr)); ASSERT(!HDR_HAS_L2HDR(hdr)); ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); hdr->b_l1hdr.b_arc_access = 0; /* * If the buf is being overridden then it may already * have a hdr that is not empty. */ buf_discard_identity(hdr); arc_buf_thaw(buf); return; } kmutex_t *hash_lock = HDR_LOCK(hdr); mutex_enter(hash_lock); /* * This assignment is only valid as long as the hash_lock is * held, we must be careful not to reference state or the * b_state field after dropping the lock. */ arc_state_t *state = hdr->b_l1hdr.b_state; ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); ASSERT3P(state, !=, arc_anon); /* this buffer is not on any list */ ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0); if (HDR_HAS_L2HDR(hdr)) { mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); /* * We have to recheck this conditional again now that * we're holding the l2ad_mtx to prevent a race with * another thread which might be concurrently calling * l2arc_evict(). In that case, l2arc_evict() might have * destroyed the header's L2 portion as we were waiting * to acquire the l2ad_mtx. */ if (HDR_HAS_L2HDR(hdr)) arc_hdr_l2hdr_destroy(hdr); mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); } /* * Do we have more than one buf? */ if (hdr->b_l1hdr.b_bufcnt > 1) { arc_buf_hdr_t *nhdr; uint64_t spa = hdr->b_spa; uint64_t psize = HDR_GET_PSIZE(hdr); uint64_t lsize = HDR_GET_LSIZE(hdr); boolean_t protected = HDR_PROTECTED(hdr); enum zio_compress compress = arc_hdr_get_compress(hdr); arc_buf_contents_t type = arc_buf_type(hdr); VERIFY3U(hdr->b_type, ==, type); ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); (void) remove_reference(hdr, hash_lock, tag); if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) { ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); ASSERT(ARC_BUF_LAST(buf)); } /* * Pull the data off of this hdr and attach it to * a new anonymous hdr. Also find the last buffer * in the hdr's buffer list. */ arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); ASSERT3P(lastbuf, !=, NULL); /* * If the current arc_buf_t and the hdr are sharing their data * buffer, then we must stop sharing that block. */ if (arc_buf_is_shared(buf)) { ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); VERIFY(!arc_buf_is_shared(lastbuf)); /* * First, sever the block sharing relationship between * buf and the arc_buf_hdr_t. */ arc_unshare_buf(hdr, buf); /* * Now we need to recreate the hdr's b_pabd. Since we * have lastbuf handy, we try to share with it, but if * we can't then we allocate a new b_pabd and copy the * data from buf into it. */ if (arc_can_share(hdr, lastbuf)) { arc_share_buf(hdr, lastbuf); } else { arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT); abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, psize); } VERIFY3P(lastbuf->b_data, !=, NULL); } else if (HDR_SHARED_DATA(hdr)) { /* * Uncompressed shared buffers are always at the end * of the list. Compressed buffers don't have the * same requirements. This makes it hard to * simply assert that the lastbuf is shared so * we rely on the hdr's compression flags to determine * if we have a compressed, shared buffer. */ ASSERT(arc_buf_is_shared(lastbuf) || arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); ASSERT(!ARC_BUF_SHARED(buf)); } ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); ASSERT3P(state, !=, arc_l2c_only); (void) zfs_refcount_remove_many(&state->arcs_size, arc_buf_size(buf), buf); if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { ASSERT3P(state, !=, arc_l2c_only); (void) zfs_refcount_remove_many( &state->arcs_esize[type], arc_buf_size(buf), buf); } hdr->b_l1hdr.b_bufcnt -= 1; if (ARC_BUF_ENCRYPTED(buf)) hdr->b_crypt_hdr.b_ebufcnt -= 1; arc_cksum_verify(buf); arc_buf_unwatch(buf); /* if this is the last uncompressed buf free the checksum */ if (!arc_hdr_has_uncompressed_buf(hdr)) arc_cksum_free(hdr); mutex_exit(hash_lock); /* * Allocate a new hdr. The new hdr will contain a b_pabd * buffer which will be freed in arc_write(). */ nhdr = arc_hdr_alloc(spa, psize, lsize, protected, compress, hdr->b_complevel, type); ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); ASSERT0(nhdr->b_l1hdr.b_bufcnt); ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt)); VERIFY3U(nhdr->b_type, ==, type); ASSERT(!HDR_SHARED_DATA(nhdr)); nhdr->b_l1hdr.b_buf = buf; nhdr->b_l1hdr.b_bufcnt = 1; if (ARC_BUF_ENCRYPTED(buf)) nhdr->b_crypt_hdr.b_ebufcnt = 1; (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); buf->b_hdr = nhdr; mutex_exit(&buf->b_evict_lock); (void) zfs_refcount_add_many(&arc_anon->arcs_size, arc_buf_size(buf), buf); } else { mutex_exit(&buf->b_evict_lock); ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); /* protected by hash lock, or hdr is on arc_anon */ ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); hdr->b_l1hdr.b_mru_hits = 0; hdr->b_l1hdr.b_mru_ghost_hits = 0; hdr->b_l1hdr.b_mfu_hits = 0; hdr->b_l1hdr.b_mfu_ghost_hits = 0; arc_change_state(arc_anon, hdr, hash_lock); hdr->b_l1hdr.b_arc_access = 0; mutex_exit(hash_lock); buf_discard_identity(hdr); arc_buf_thaw(buf); } } int arc_released(arc_buf_t *buf) { int released; mutex_enter(&buf->b_evict_lock); released = (buf->b_data != NULL && buf->b_hdr->b_l1hdr.b_state == arc_anon); mutex_exit(&buf->b_evict_lock); return (released); } #ifdef ZFS_DEBUG int arc_referenced(arc_buf_t *buf) { int referenced; mutex_enter(&buf->b_evict_lock); referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); mutex_exit(&buf->b_evict_lock); return (referenced); } #endif static void arc_write_ready(zio_t *zio) { arc_write_callback_t *callback = zio->io_private; arc_buf_t *buf = callback->awcb_buf; arc_buf_hdr_t *hdr = buf->b_hdr; blkptr_t *bp = zio->io_bp; uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp); fstrans_cookie_t cookie = spl_fstrans_mark(); ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); ASSERT(hdr->b_l1hdr.b_bufcnt > 0); /* * If we're reexecuting this zio because the pool suspended, then * cleanup any state that was previously set the first time the * callback was invoked. */ if (zio->io_flags & ZIO_FLAG_REEXECUTED) { arc_cksum_free(hdr); arc_buf_unwatch(buf); if (hdr->b_l1hdr.b_pabd != NULL) { if (arc_buf_is_shared(buf)) { arc_unshare_buf(hdr, buf); } else { arc_hdr_free_abd(hdr, B_FALSE); } } if (HDR_HAS_RABD(hdr)) arc_hdr_free_abd(hdr, B_TRUE); } ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT(!HDR_HAS_RABD(hdr)); ASSERT(!HDR_SHARED_DATA(hdr)); ASSERT(!arc_buf_is_shared(buf)); callback->awcb_ready(zio, buf, callback->awcb_private); if (HDR_IO_IN_PROGRESS(hdr)) ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr)) hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp)); if (BP_IS_PROTECTED(bp)) { /* ZIL blocks are written through zio_rewrite */ ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG); ASSERT(HDR_PROTECTED(hdr)); if (BP_SHOULD_BYTESWAP(bp)) { if (BP_GET_LEVEL(bp) > 0) { hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; } else { hdr->b_l1hdr.b_byteswap = DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); } } else { hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; } hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv); zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); } /* * If this block was written for raw encryption but the zio layer * ended up only authenticating it, adjust the buffer flags now. */ if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) { arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH); buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF) buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; } else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) { buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; } /* this must be done after the buffer flags are adjusted */ arc_cksum_compute(buf); enum zio_compress compress; if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { compress = ZIO_COMPRESS_OFF; } else { ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); compress = BP_GET_COMPRESS(bp); } HDR_SET_PSIZE(hdr, psize); arc_hdr_set_compress(hdr, compress); hdr->b_complevel = zio->io_prop.zp_complevel; if (zio->io_error != 0 || psize == 0) goto out; /* * Fill the hdr with data. If the buffer is encrypted we have no choice * but to copy the data into b_radb. If the hdr is compressed, the data * we want is available from the zio, otherwise we can take it from * the buf. * * We might be able to share the buf's data with the hdr here. However, * doing so would cause the ARC to be full of linear ABDs if we write a * lot of shareable data. As a compromise, we check whether scattered * ABDs are allowed, and assume that if they are then the user wants * the ARC to be primarily filled with them regardless of the data being * written. Therefore, if they're allowed then we allocate one and copy * the data into it; otherwise, we share the data directly if we can. */ if (ARC_BUF_ENCRYPTED(buf)) { ASSERT3U(psize, >, 0); ASSERT(ARC_BUF_COMPRESSED(buf)); arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT | ARC_HDR_ALLOC_RDATA | ARC_HDR_USE_RESERVE); abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); } else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) { /* * Ideally, we would always copy the io_abd into b_pabd, but the * user may have disabled compressed ARC, thus we must check the * hdr's compression setting rather than the io_bp's. */ if (BP_IS_ENCRYPTED(bp)) { ASSERT3U(psize, >, 0); arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT | ARC_HDR_ALLOC_RDATA | ARC_HDR_USE_RESERVE); abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); } else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF && !ARC_BUF_COMPRESSED(buf)) { ASSERT3U(psize, >, 0); arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE); abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize); } else { ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr)); arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE); abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, arc_buf_size(buf)); } } else { ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd)); ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf)); ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); arc_share_buf(hdr, buf); } out: arc_hdr_verify(hdr, bp); spl_fstrans_unmark(cookie); } static void arc_write_children_ready(zio_t *zio) { arc_write_callback_t *callback = zio->io_private; arc_buf_t *buf = callback->awcb_buf; callback->awcb_children_ready(zio, buf, callback->awcb_private); } /* * The SPA calls this callback for each physical write that happens on behalf * of a logical write. See the comment in dbuf_write_physdone() for details. */ static void arc_write_physdone(zio_t *zio) { arc_write_callback_t *cb = zio->io_private; if (cb->awcb_physdone != NULL) cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); } static void arc_write_done(zio_t *zio) { arc_write_callback_t *callback = zio->io_private; arc_buf_t *buf = callback->awcb_buf; arc_buf_hdr_t *hdr = buf->b_hdr; ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); if (zio->io_error == 0) { arc_hdr_verify(hdr, zio->io_bp); if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { buf_discard_identity(hdr); } else { hdr->b_dva = *BP_IDENTITY(zio->io_bp); hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); } } else { ASSERT(HDR_EMPTY(hdr)); } /* * If the block to be written was all-zero or compressed enough to be * embedded in the BP, no write was performed so there will be no * dva/birth/checksum. The buffer must therefore remain anonymous * (and uncached). */ if (!HDR_EMPTY(hdr)) { arc_buf_hdr_t *exists; kmutex_t *hash_lock; ASSERT3U(zio->io_error, ==, 0); arc_cksum_verify(buf); exists = buf_hash_insert(hdr, &hash_lock); if (exists != NULL) { /* * This can only happen if we overwrite for * sync-to-convergence, because we remove * buffers from the hash table when we arc_free(). */ if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) panic("bad overwrite, hdr=%p exists=%p", (void *)hdr, (void *)exists); ASSERT(zfs_refcount_is_zero( &exists->b_l1hdr.b_refcnt)); arc_change_state(arc_anon, exists, hash_lock); arc_hdr_destroy(exists); mutex_exit(hash_lock); exists = buf_hash_insert(hdr, &hash_lock); ASSERT3P(exists, ==, NULL); } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { /* nopwrite */ ASSERT(zio->io_prop.zp_nopwrite); if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) panic("bad nopwrite, hdr=%p exists=%p", (void *)hdr, (void *)exists); } else { /* Dedup */ ASSERT(hdr->b_l1hdr.b_bufcnt == 1); ASSERT(hdr->b_l1hdr.b_state == arc_anon); ASSERT(BP_GET_DEDUP(zio->io_bp)); ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); } } arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); /* if it's not anon, we are doing a scrub */ if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) arc_access(hdr, hash_lock); mutex_exit(hash_lock); } else { arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); } ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); callback->awcb_done(zio, buf, callback->awcb_private); abd_free(zio->io_abd); kmem_free(callback, sizeof (arc_write_callback_t)); } zio_t * arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, arc_write_done_func_t *ready, arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone, arc_write_done_func_t *done, void *private, zio_priority_t priority, int zio_flags, const zbookmark_phys_t *zb) { arc_buf_hdr_t *hdr = buf->b_hdr; arc_write_callback_t *callback; zio_t *zio; zio_prop_t localprop = *zp; ASSERT3P(ready, !=, NULL); ASSERT3P(done, !=, NULL); ASSERT(!HDR_IO_ERROR(hdr)); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); if (l2arc) arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); if (ARC_BUF_ENCRYPTED(buf)) { ASSERT(ARC_BUF_COMPRESSED(buf)); localprop.zp_encrypt = B_TRUE; localprop.zp_compress = HDR_GET_COMPRESS(hdr); localprop.zp_complevel = hdr->b_complevel; localprop.zp_byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER; bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt, ZIO_DATA_SALT_LEN); bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv, ZIO_DATA_IV_LEN); bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac, ZIO_DATA_MAC_LEN); if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) { localprop.zp_nopwrite = B_FALSE; localprop.zp_copies = MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1); } zio_flags |= ZIO_FLAG_RAW; } else if (ARC_BUF_COMPRESSED(buf)) { ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf)); localprop.zp_compress = HDR_GET_COMPRESS(hdr); localprop.zp_complevel = hdr->b_complevel; zio_flags |= ZIO_FLAG_RAW_COMPRESS; } callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); callback->awcb_ready = ready; callback->awcb_children_ready = children_ready; callback->awcb_physdone = physdone; callback->awcb_done = done; callback->awcb_private = private; callback->awcb_buf = buf; /* * The hdr's b_pabd is now stale, free it now. A new data block * will be allocated when the zio pipeline calls arc_write_ready(). */ if (hdr->b_l1hdr.b_pabd != NULL) { /* * If the buf is currently sharing the data block with * the hdr then we need to break that relationship here. * The hdr will remain with a NULL data pointer and the * buf will take sole ownership of the block. */ if (arc_buf_is_shared(buf)) { arc_unshare_buf(hdr, buf); } else { arc_hdr_free_abd(hdr, B_FALSE); } VERIFY3P(buf->b_data, !=, NULL); } if (HDR_HAS_RABD(hdr)) arc_hdr_free_abd(hdr, B_TRUE); if (!(zio_flags & ZIO_FLAG_RAW)) arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF); ASSERT(!arc_buf_is_shared(buf)); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); zio = zio_write(pio, spa, txg, bp, abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)), HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready, (children_ready != NULL) ? arc_write_children_ready : NULL, arc_write_physdone, arc_write_done, callback, priority, zio_flags, zb); return (zio); } void arc_tempreserve_clear(uint64_t reserve) { atomic_add_64(&arc_tempreserve, -reserve); ASSERT((int64_t)arc_tempreserve >= 0); } int arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg) { int error; uint64_t anon_size; if (!arc_no_grow && reserve > arc_c/4 && reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT)) arc_c = MIN(arc_c_max, reserve * 4); /* * Throttle when the calculated memory footprint for the TXG * exceeds the target ARC size. */ if (reserve > arc_c) { DMU_TX_STAT_BUMP(dmu_tx_memory_reserve); return (SET_ERROR(ERESTART)); } /* * Don't count loaned bufs as in flight dirty data to prevent long * network delays from blocking transactions that are ready to be * assigned to a txg. */ /* assert that it has not wrapped around */ ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) - arc_loaned_bytes), 0); /* * Writes will, almost always, require additional memory allocations * in order to compress/encrypt/etc the data. We therefore need to * make sure that there is sufficient available memory for this. */ error = arc_memory_throttle(spa, reserve, txg); if (error != 0) return (error); /* * Throttle writes when the amount of dirty data in the cache * gets too large. We try to keep the cache less than half full * of dirty blocks so that our sync times don't grow too large. * * In the case of one pool being built on another pool, we want * to make sure we don't end up throttling the lower (backing) * pool when the upper pool is the majority contributor to dirty * data. To insure we make forward progress during throttling, we * also check the current pool's net dirty data and only throttle * if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty * data in the cache. * * Note: if two requests come in concurrently, we might let them * both succeed, when one of them should fail. Not a huge deal. */ uint64_t total_dirty = reserve + arc_tempreserve + anon_size; uint64_t spa_dirty_anon = spa_dirty_data(spa); uint64_t rarc_c = arc_warm ? arc_c : arc_c_max; if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 && anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 && spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) { #ifdef ZFS_DEBUG uint64_t meta_esize = zfs_refcount_count( &arc_anon->arcs_esize[ARC_BUFC_METADATA]); uint64_t data_esize = zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " "anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n", (u_longlong_t)arc_tempreserve >> 10, (u_longlong_t)meta_esize >> 10, (u_longlong_t)data_esize >> 10, (u_longlong_t)reserve >> 10, (u_longlong_t)rarc_c >> 10); #endif DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle); return (SET_ERROR(ERESTART)); } atomic_add_64(&arc_tempreserve, reserve); return (0); } static void arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, kstat_named_t *evict_data, kstat_named_t *evict_metadata) { size->value.ui64 = zfs_refcount_count(&state->arcs_size); evict_data->value.ui64 = zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); evict_metadata->value.ui64 = zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); } static int arc_kstat_update(kstat_t *ksp, int rw) { arc_stats_t *as = ksp->ks_data; if (rw == KSTAT_WRITE) return (SET_ERROR(EACCES)); as->arcstat_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_hits); as->arcstat_misses.value.ui64 = wmsum_value(&arc_sums.arcstat_misses); as->arcstat_demand_data_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_demand_data_hits); as->arcstat_demand_data_misses.value.ui64 = wmsum_value(&arc_sums.arcstat_demand_data_misses); as->arcstat_demand_metadata_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_demand_metadata_hits); as->arcstat_demand_metadata_misses.value.ui64 = wmsum_value(&arc_sums.arcstat_demand_metadata_misses); as->arcstat_prefetch_data_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_prefetch_data_hits); as->arcstat_prefetch_data_misses.value.ui64 = wmsum_value(&arc_sums.arcstat_prefetch_data_misses); as->arcstat_prefetch_metadata_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits); as->arcstat_prefetch_metadata_misses.value.ui64 = wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses); as->arcstat_mru_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_mru_hits); as->arcstat_mru_ghost_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_mru_ghost_hits); as->arcstat_mfu_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_mfu_hits); as->arcstat_mfu_ghost_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_mfu_ghost_hits); as->arcstat_deleted.value.ui64 = wmsum_value(&arc_sums.arcstat_deleted); as->arcstat_mutex_miss.value.ui64 = wmsum_value(&arc_sums.arcstat_mutex_miss); as->arcstat_access_skip.value.ui64 = wmsum_value(&arc_sums.arcstat_access_skip); as->arcstat_evict_skip.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_skip); as->arcstat_evict_not_enough.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_not_enough); as->arcstat_evict_l2_cached.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_l2_cached); as->arcstat_evict_l2_eligible.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_l2_eligible); as->arcstat_evict_l2_eligible_mfu.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu); as->arcstat_evict_l2_eligible_mru.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru); as->arcstat_evict_l2_ineligible.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_l2_ineligible); as->arcstat_evict_l2_skip.value.ui64 = wmsum_value(&arc_sums.arcstat_evict_l2_skip); as->arcstat_hash_collisions.value.ui64 = wmsum_value(&arc_sums.arcstat_hash_collisions); as->arcstat_hash_chains.value.ui64 = wmsum_value(&arc_sums.arcstat_hash_chains); as->arcstat_size.value.ui64 = aggsum_value(&arc_sums.arcstat_size); as->arcstat_compressed_size.value.ui64 = wmsum_value(&arc_sums.arcstat_compressed_size); as->arcstat_uncompressed_size.value.ui64 = wmsum_value(&arc_sums.arcstat_uncompressed_size); as->arcstat_overhead_size.value.ui64 = wmsum_value(&arc_sums.arcstat_overhead_size); as->arcstat_hdr_size.value.ui64 = wmsum_value(&arc_sums.arcstat_hdr_size); as->arcstat_data_size.value.ui64 = wmsum_value(&arc_sums.arcstat_data_size); as->arcstat_metadata_size.value.ui64 = wmsum_value(&arc_sums.arcstat_metadata_size); as->arcstat_dbuf_size.value.ui64 = wmsum_value(&arc_sums.arcstat_dbuf_size); #if defined(COMPAT_FREEBSD11) as->arcstat_other_size.value.ui64 = wmsum_value(&arc_sums.arcstat_bonus_size) + aggsum_value(&arc_sums.arcstat_dnode_size) + wmsum_value(&arc_sums.arcstat_dbuf_size); #endif arc_kstat_update_state(arc_anon, &as->arcstat_anon_size, &as->arcstat_anon_evictable_data, &as->arcstat_anon_evictable_metadata); arc_kstat_update_state(arc_mru, &as->arcstat_mru_size, &as->arcstat_mru_evictable_data, &as->arcstat_mru_evictable_metadata); arc_kstat_update_state(arc_mru_ghost, &as->arcstat_mru_ghost_size, &as->arcstat_mru_ghost_evictable_data, &as->arcstat_mru_ghost_evictable_metadata); arc_kstat_update_state(arc_mfu, &as->arcstat_mfu_size, &as->arcstat_mfu_evictable_data, &as->arcstat_mfu_evictable_metadata); arc_kstat_update_state(arc_mfu_ghost, &as->arcstat_mfu_ghost_size, &as->arcstat_mfu_ghost_evictable_data, &as->arcstat_mfu_ghost_evictable_metadata); as->arcstat_dnode_size.value.ui64 = aggsum_value(&arc_sums.arcstat_dnode_size); as->arcstat_bonus_size.value.ui64 = wmsum_value(&arc_sums.arcstat_bonus_size); as->arcstat_l2_hits.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_hits); as->arcstat_l2_misses.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_misses); as->arcstat_l2_prefetch_asize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_prefetch_asize); as->arcstat_l2_mru_asize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_mru_asize); as->arcstat_l2_mfu_asize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_mfu_asize); as->arcstat_l2_bufc_data_asize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize); as->arcstat_l2_bufc_metadata_asize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize); as->arcstat_l2_feeds.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_feeds); as->arcstat_l2_rw_clash.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rw_clash); as->arcstat_l2_read_bytes.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_read_bytes); as->arcstat_l2_write_bytes.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_write_bytes); as->arcstat_l2_writes_sent.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_writes_sent); as->arcstat_l2_writes_done.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_writes_done); as->arcstat_l2_writes_error.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_writes_error); as->arcstat_l2_writes_lock_retry.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry); as->arcstat_l2_evict_lock_retry.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry); as->arcstat_l2_evict_reading.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_evict_reading); as->arcstat_l2_evict_l1cached.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_evict_l1cached); as->arcstat_l2_free_on_write.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_free_on_write); as->arcstat_l2_abort_lowmem.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_abort_lowmem); as->arcstat_l2_cksum_bad.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_cksum_bad); as->arcstat_l2_io_error.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_io_error); as->arcstat_l2_lsize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_lsize); as->arcstat_l2_psize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_psize); as->arcstat_l2_hdr_size.value.ui64 = aggsum_value(&arc_sums.arcstat_l2_hdr_size); as->arcstat_l2_log_blk_writes.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_log_blk_writes); as->arcstat_l2_log_blk_asize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_log_blk_asize); as->arcstat_l2_log_blk_count.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_log_blk_count); as->arcstat_l2_rebuild_success.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_success); as->arcstat_l2_rebuild_abort_unsupported.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported); as->arcstat_l2_rebuild_abort_io_errors.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors); as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors); as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors); as->arcstat_l2_rebuild_abort_lowmem.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem); as->arcstat_l2_rebuild_size.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_size); as->arcstat_l2_rebuild_asize.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_asize); as->arcstat_l2_rebuild_bufs.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs); as->arcstat_l2_rebuild_bufs_precached.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached); as->arcstat_l2_rebuild_log_blks.value.ui64 = wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks); as->arcstat_memory_throttle_count.value.ui64 = wmsum_value(&arc_sums.arcstat_memory_throttle_count); as->arcstat_memory_direct_count.value.ui64 = wmsum_value(&arc_sums.arcstat_memory_direct_count); as->arcstat_memory_indirect_count.value.ui64 = wmsum_value(&arc_sums.arcstat_memory_indirect_count); as->arcstat_memory_all_bytes.value.ui64 = arc_all_memory(); as->arcstat_memory_free_bytes.value.ui64 = arc_free_memory(); as->arcstat_memory_available_bytes.value.i64 = arc_available_memory(); as->arcstat_prune.value.ui64 = wmsum_value(&arc_sums.arcstat_prune); as->arcstat_meta_used.value.ui64 = aggsum_value(&arc_sums.arcstat_meta_used); as->arcstat_async_upgrade_sync.value.ui64 = wmsum_value(&arc_sums.arcstat_async_upgrade_sync); as->arcstat_demand_hit_predictive_prefetch.value.ui64 = wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch); as->arcstat_demand_hit_prescient_prefetch.value.ui64 = wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch); as->arcstat_raw_size.value.ui64 = wmsum_value(&arc_sums.arcstat_raw_size); as->arcstat_cached_only_in_progress.value.ui64 = wmsum_value(&arc_sums.arcstat_cached_only_in_progress); as->arcstat_abd_chunk_waste_size.value.ui64 = wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size); return (0); } /* * This function *must* return indices evenly distributed between all * sublists of the multilist. This is needed due to how the ARC eviction * code is laid out; arc_evict_state() assumes ARC buffers are evenly * distributed between all sublists and uses this assumption when * deciding which sublist to evict from and how much to evict from it. */ static unsigned int arc_state_multilist_index_func(multilist_t *ml, void *obj) { arc_buf_hdr_t *hdr = obj; /* * We rely on b_dva to generate evenly distributed index * numbers using buf_hash below. So, as an added precaution, * let's make sure we never add empty buffers to the arc lists. */ ASSERT(!HDR_EMPTY(hdr)); /* * The assumption here, is the hash value for a given * arc_buf_hdr_t will remain constant throughout its lifetime * (i.e. its b_spa, b_dva, and b_birth fields don't change). * Thus, we don't need to store the header's sublist index * on insertion, as this index can be recalculated on removal. * * Also, the low order bits of the hash value are thought to be * distributed evenly. Otherwise, in the case that the multilist * has a power of two number of sublists, each sublists' usage * would not be evenly distributed. In this context full 64bit * division would be a waste of time, so limit it to 32 bits. */ return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % multilist_get_num_sublists(ml)); } static unsigned int arc_state_l2c_multilist_index_func(multilist_t *ml, void *obj) { panic("Header %p insert into arc_l2c_only %p", obj, ml); } #define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \ if ((do_warn) && (tuning) && ((tuning) != (value))) { \ cmn_err(CE_WARN, \ "ignoring tunable %s (using %llu instead)", \ (#tuning), (value)); \ } \ } while (0) /* * Called during module initialization and periodically thereafter to * apply reasonable changes to the exposed performance tunings. Can also be * called explicitly by param_set_arc_*() functions when ARC tunables are * updated manually. Non-zero zfs_* values which differ from the currently set * values will be applied. */ void arc_tuning_update(boolean_t verbose) { uint64_t allmem = arc_all_memory(); unsigned long limit; /* Valid range: 32M - <arc_c_max> */ if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) && (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) && (zfs_arc_min <= arc_c_max)) { arc_c_min = zfs_arc_min; arc_c = MAX(arc_c, arc_c_min); } WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose); /* Valid range: 64M - <all physical memory> */ if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) && (zfs_arc_max >= MIN_ARC_MAX) && (zfs_arc_max < allmem) && (zfs_arc_max > arc_c_min)) { arc_c_max = zfs_arc_max; arc_c = MIN(arc_c, arc_c_max); arc_p = (arc_c >> 1); if (arc_meta_limit > arc_c_max) arc_meta_limit = arc_c_max; if (arc_dnode_size_limit > arc_meta_limit) arc_dnode_size_limit = arc_meta_limit; } WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose); /* Valid range: 16M - <arc_c_max> */ if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) && (zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) && (zfs_arc_meta_min <= arc_c_max)) { arc_meta_min = zfs_arc_meta_min; if (arc_meta_limit < arc_meta_min) arc_meta_limit = arc_meta_min; if (arc_dnode_size_limit < arc_meta_min) arc_dnode_size_limit = arc_meta_min; } WARN_IF_TUNING_IGNORED(zfs_arc_meta_min, arc_meta_min, verbose); /* Valid range: <arc_meta_min> - <arc_c_max> */ limit = zfs_arc_meta_limit ? zfs_arc_meta_limit : MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100; if ((limit != arc_meta_limit) && (limit >= arc_meta_min) && (limit <= arc_c_max)) arc_meta_limit = limit; WARN_IF_TUNING_IGNORED(zfs_arc_meta_limit, arc_meta_limit, verbose); /* Valid range: <arc_meta_min> - <arc_meta_limit> */ limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit : MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100; if ((limit != arc_dnode_size_limit) && (limit >= arc_meta_min) && (limit <= arc_meta_limit)) arc_dnode_size_limit = limit; WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_size_limit, verbose); /* Valid range: 1 - N */ if (zfs_arc_grow_retry) arc_grow_retry = zfs_arc_grow_retry; /* Valid range: 1 - N */ if (zfs_arc_shrink_shift) { arc_shrink_shift = zfs_arc_shrink_shift; arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1); } /* Valid range: 1 - N */ if (zfs_arc_p_min_shift) arc_p_min_shift = zfs_arc_p_min_shift; /* Valid range: 1 - N ms */ if (zfs_arc_min_prefetch_ms) arc_min_prefetch_ms = zfs_arc_min_prefetch_ms; /* Valid range: 1 - N ms */ if (zfs_arc_min_prescient_prefetch_ms) { arc_min_prescient_prefetch_ms = zfs_arc_min_prescient_prefetch_ms; } /* Valid range: 0 - 100 */ if ((zfs_arc_lotsfree_percent >= 0) && (zfs_arc_lotsfree_percent <= 100)) arc_lotsfree_percent = zfs_arc_lotsfree_percent; WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent, verbose); /* Valid range: 0 - <all physical memory> */ if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free)) arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem); WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose); } static void arc_state_init(void) { multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_multilist_index_func); /* * L2 headers should never be on the L2 state list since they don't * have L1 headers allocated. Special index function asserts that. */ multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_l2c_multilist_index_func); multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), arc_state_l2c_multilist_index_func); zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_create(&arc_anon->arcs_size); zfs_refcount_create(&arc_mru->arcs_size); zfs_refcount_create(&arc_mru_ghost->arcs_size); zfs_refcount_create(&arc_mfu->arcs_size); zfs_refcount_create(&arc_mfu_ghost->arcs_size); zfs_refcount_create(&arc_l2c_only->arcs_size); wmsum_init(&arc_sums.arcstat_hits, 0); wmsum_init(&arc_sums.arcstat_misses, 0); wmsum_init(&arc_sums.arcstat_demand_data_hits, 0); wmsum_init(&arc_sums.arcstat_demand_data_misses, 0); wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0); wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0); wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0); wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0); wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0); wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0); wmsum_init(&arc_sums.arcstat_mru_hits, 0); wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0); wmsum_init(&arc_sums.arcstat_mfu_hits, 0); wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0); wmsum_init(&arc_sums.arcstat_deleted, 0); wmsum_init(&arc_sums.arcstat_mutex_miss, 0); wmsum_init(&arc_sums.arcstat_access_skip, 0); wmsum_init(&arc_sums.arcstat_evict_skip, 0); wmsum_init(&arc_sums.arcstat_evict_not_enough, 0); wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0); wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0); wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0); wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0); wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0); wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0); wmsum_init(&arc_sums.arcstat_hash_collisions, 0); wmsum_init(&arc_sums.arcstat_hash_chains, 0); aggsum_init(&arc_sums.arcstat_size, 0); wmsum_init(&arc_sums.arcstat_compressed_size, 0); wmsum_init(&arc_sums.arcstat_uncompressed_size, 0); wmsum_init(&arc_sums.arcstat_overhead_size, 0); wmsum_init(&arc_sums.arcstat_hdr_size, 0); wmsum_init(&arc_sums.arcstat_data_size, 0); wmsum_init(&arc_sums.arcstat_metadata_size, 0); wmsum_init(&arc_sums.arcstat_dbuf_size, 0); aggsum_init(&arc_sums.arcstat_dnode_size, 0); wmsum_init(&arc_sums.arcstat_bonus_size, 0); wmsum_init(&arc_sums.arcstat_l2_hits, 0); wmsum_init(&arc_sums.arcstat_l2_misses, 0); wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0); wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0); wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0); wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0); wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0); wmsum_init(&arc_sums.arcstat_l2_feeds, 0); wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0); wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0); wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0); wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0); wmsum_init(&arc_sums.arcstat_l2_writes_done, 0); wmsum_init(&arc_sums.arcstat_l2_writes_error, 0); wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0); wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0); wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0); wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0); wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0); wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0); wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0); wmsum_init(&arc_sums.arcstat_l2_io_error, 0); wmsum_init(&arc_sums.arcstat_l2_lsize, 0); wmsum_init(&arc_sums.arcstat_l2_psize, 0); aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0); wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0); wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0); wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0); wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0); wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0); wmsum_init(&arc_sums.arcstat_memory_direct_count, 0); wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0); wmsum_init(&arc_sums.arcstat_prune, 0); aggsum_init(&arc_sums.arcstat_meta_used, 0); wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0); wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0); wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0); wmsum_init(&arc_sums.arcstat_raw_size, 0); wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0); wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0); arc_anon->arcs_state = ARC_STATE_ANON; arc_mru->arcs_state = ARC_STATE_MRU; arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST; arc_mfu->arcs_state = ARC_STATE_MFU; arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST; arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY; } static void arc_state_fini(void) { zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); zfs_refcount_destroy(&arc_anon->arcs_size); zfs_refcount_destroy(&arc_mru->arcs_size); zfs_refcount_destroy(&arc_mru_ghost->arcs_size); zfs_refcount_destroy(&arc_mfu->arcs_size); zfs_refcount_destroy(&arc_mfu_ghost->arcs_size); zfs_refcount_destroy(&arc_l2c_only->arcs_size); multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]); multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]); wmsum_fini(&arc_sums.arcstat_hits); wmsum_fini(&arc_sums.arcstat_misses); wmsum_fini(&arc_sums.arcstat_demand_data_hits); wmsum_fini(&arc_sums.arcstat_demand_data_misses); wmsum_fini(&arc_sums.arcstat_demand_metadata_hits); wmsum_fini(&arc_sums.arcstat_demand_metadata_misses); wmsum_fini(&arc_sums.arcstat_prefetch_data_hits); wmsum_fini(&arc_sums.arcstat_prefetch_data_misses); wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits); wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses); wmsum_fini(&arc_sums.arcstat_mru_hits); wmsum_fini(&arc_sums.arcstat_mru_ghost_hits); wmsum_fini(&arc_sums.arcstat_mfu_hits); wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits); wmsum_fini(&arc_sums.arcstat_deleted); wmsum_fini(&arc_sums.arcstat_mutex_miss); wmsum_fini(&arc_sums.arcstat_access_skip); wmsum_fini(&arc_sums.arcstat_evict_skip); wmsum_fini(&arc_sums.arcstat_evict_not_enough); wmsum_fini(&arc_sums.arcstat_evict_l2_cached); wmsum_fini(&arc_sums.arcstat_evict_l2_eligible); wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu); wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru); wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible); wmsum_fini(&arc_sums.arcstat_evict_l2_skip); wmsum_fini(&arc_sums.arcstat_hash_collisions); wmsum_fini(&arc_sums.arcstat_hash_chains); aggsum_fini(&arc_sums.arcstat_size); wmsum_fini(&arc_sums.arcstat_compressed_size); wmsum_fini(&arc_sums.arcstat_uncompressed_size); wmsum_fini(&arc_sums.arcstat_overhead_size); wmsum_fini(&arc_sums.arcstat_hdr_size); wmsum_fini(&arc_sums.arcstat_data_size); wmsum_fini(&arc_sums.arcstat_metadata_size); wmsum_fini(&arc_sums.arcstat_dbuf_size); aggsum_fini(&arc_sums.arcstat_dnode_size); wmsum_fini(&arc_sums.arcstat_bonus_size); wmsum_fini(&arc_sums.arcstat_l2_hits); wmsum_fini(&arc_sums.arcstat_l2_misses); wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize); wmsum_fini(&arc_sums.arcstat_l2_mru_asize); wmsum_fini(&arc_sums.arcstat_l2_mfu_asize); wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize); wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize); wmsum_fini(&arc_sums.arcstat_l2_feeds); wmsum_fini(&arc_sums.arcstat_l2_rw_clash); wmsum_fini(&arc_sums.arcstat_l2_read_bytes); wmsum_fini(&arc_sums.arcstat_l2_write_bytes); wmsum_fini(&arc_sums.arcstat_l2_writes_sent); wmsum_fini(&arc_sums.arcstat_l2_writes_done); wmsum_fini(&arc_sums.arcstat_l2_writes_error); wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry); wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry); wmsum_fini(&arc_sums.arcstat_l2_evict_reading); wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached); wmsum_fini(&arc_sums.arcstat_l2_free_on_write); wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem); wmsum_fini(&arc_sums.arcstat_l2_cksum_bad); wmsum_fini(&arc_sums.arcstat_l2_io_error); wmsum_fini(&arc_sums.arcstat_l2_lsize); wmsum_fini(&arc_sums.arcstat_l2_psize); aggsum_fini(&arc_sums.arcstat_l2_hdr_size); wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes); wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize); wmsum_fini(&arc_sums.arcstat_l2_log_blk_count); wmsum_fini(&arc_sums.arcstat_l2_rebuild_success); wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported); wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors); wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors); wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors); wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem); wmsum_fini(&arc_sums.arcstat_l2_rebuild_size); wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize); wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs); wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached); wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks); wmsum_fini(&arc_sums.arcstat_memory_throttle_count); wmsum_fini(&arc_sums.arcstat_memory_direct_count); wmsum_fini(&arc_sums.arcstat_memory_indirect_count); wmsum_fini(&arc_sums.arcstat_prune); aggsum_fini(&arc_sums.arcstat_meta_used); wmsum_fini(&arc_sums.arcstat_async_upgrade_sync); wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch); wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch); wmsum_fini(&arc_sums.arcstat_raw_size); wmsum_fini(&arc_sums.arcstat_cached_only_in_progress); wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size); } uint64_t arc_target_bytes(void) { return (arc_c); } void arc_set_limits(uint64_t allmem) { /* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */ arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT); /* How to set default max varies by platform. */ arc_c_max = arc_default_max(arc_c_min, allmem); } void arc_init(void) { uint64_t percent, allmem = arc_all_memory(); mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL); list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t), offsetof(arc_evict_waiter_t, aew_node)); arc_min_prefetch_ms = 1000; arc_min_prescient_prefetch_ms = 6000; #if defined(_KERNEL) arc_lowmem_init(); #endif arc_set_limits(allmem); #ifdef _KERNEL /* * If zfs_arc_max is non-zero at init, meaning it was set in the kernel * environment before the module was loaded, don't block setting the * maximum because it is less than arc_c_min, instead, reset arc_c_min * to a lower value. * zfs_arc_min will be handled by arc_tuning_update(). */ if (zfs_arc_max != 0 && zfs_arc_max >= MIN_ARC_MAX && zfs_arc_max < allmem) { arc_c_max = zfs_arc_max; if (arc_c_min >= arc_c_max) { arc_c_min = MAX(zfs_arc_max / 2, 2ULL << SPA_MAXBLOCKSHIFT); } } #else /* * In userland, there's only the memory pressure that we artificially * create (see arc_available_memory()). Don't let arc_c get too * small, because it can cause transactions to be larger than * arc_c, causing arc_tempreserve_space() to fail. */ arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT); #endif arc_c = arc_c_min; arc_p = (arc_c >> 1); /* Set min to 1/2 of arc_c_min */ arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT; /* * Set arc_meta_limit to a percent of arc_c_max with a floor of * arc_meta_min, and a ceiling of arc_c_max. */ percent = MIN(zfs_arc_meta_limit_percent, 100); arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100); percent = MIN(zfs_arc_dnode_limit_percent, 100); arc_dnode_size_limit = (percent * arc_meta_limit) / 100; /* Apply user specified tunings */ arc_tuning_update(B_TRUE); /* if kmem_flags are set, lets try to use less memory */ if (kmem_debugging()) arc_c = arc_c / 2; if (arc_c < arc_c_min) arc_c = arc_c_min; arc_register_hotplug(); arc_state_init(); buf_init(); list_create(&arc_prune_list, sizeof (arc_prune_t), offsetof(arc_prune_t, p_node)); mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL); arc_prune_taskq = taskq_create("arc_prune", 100, defclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (arc_ksp != NULL) { arc_ksp->ks_data = &arc_stats; arc_ksp->ks_update = arc_kstat_update; kstat_install(arc_ksp); } arc_evict_zthr = zthr_create("arc_evict", arc_evict_cb_check, arc_evict_cb, NULL, defclsyspri); arc_reap_zthr = zthr_create_timer("arc_reap", arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1), minclsyspri); arc_warm = B_FALSE; /* * Calculate maximum amount of dirty data per pool. * * If it has been set by a module parameter, take that. * Otherwise, use a percentage of physical memory defined by * zfs_dirty_data_max_percent (default 10%) with a cap at * zfs_dirty_data_max_max (default 4G or 25% of physical memory). */ #ifdef __LP64__ if (zfs_dirty_data_max_max == 0) zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024, allmem * zfs_dirty_data_max_max_percent / 100); #else if (zfs_dirty_data_max_max == 0) zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024, allmem * zfs_dirty_data_max_max_percent / 100); #endif if (zfs_dirty_data_max == 0) { zfs_dirty_data_max = allmem * zfs_dirty_data_max_percent / 100; zfs_dirty_data_max = MIN(zfs_dirty_data_max, zfs_dirty_data_max_max); } } void arc_fini(void) { arc_prune_t *p; #ifdef _KERNEL arc_lowmem_fini(); #endif /* _KERNEL */ /* Use B_TRUE to ensure *all* buffers are evicted */ arc_flush(NULL, B_TRUE); if (arc_ksp != NULL) { kstat_delete(arc_ksp); arc_ksp = NULL; } taskq_wait(arc_prune_taskq); taskq_destroy(arc_prune_taskq); mutex_enter(&arc_prune_mtx); while ((p = list_head(&arc_prune_list)) != NULL) { list_remove(&arc_prune_list, p); zfs_refcount_remove(&p->p_refcnt, &arc_prune_list); zfs_refcount_destroy(&p->p_refcnt); kmem_free(p, sizeof (*p)); } mutex_exit(&arc_prune_mtx); list_destroy(&arc_prune_list); mutex_destroy(&arc_prune_mtx); (void) zthr_cancel(arc_evict_zthr); (void) zthr_cancel(arc_reap_zthr); mutex_destroy(&arc_evict_lock); list_destroy(&arc_evict_waiters); /* * Free any buffers that were tagged for destruction. This needs * to occur before arc_state_fini() runs and destroys the aggsum * values which are updated when freeing scatter ABDs. */ l2arc_do_free_on_write(); /* * buf_fini() must proceed arc_state_fini() because buf_fin() may * trigger the release of kmem magazines, which can callback to * arc_space_return() which accesses aggsums freed in act_state_fini(). */ buf_fini(); arc_state_fini(); arc_unregister_hotplug(); /* * We destroy the zthrs after all the ARC state has been * torn down to avoid the case of them receiving any * wakeup() signals after they are destroyed. */ zthr_destroy(arc_evict_zthr); zthr_destroy(arc_reap_zthr); ASSERT0(arc_loaned_bytes); } /* * Level 2 ARC * * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. * It uses dedicated storage devices to hold cached data, which are populated * using large infrequent writes. The main role of this cache is to boost * the performance of random read workloads. The intended L2ARC devices * include short-stroked disks, solid state disks, and other media with * substantially faster read latency than disk. * * +-----------------------+ * | ARC | * +-----------------------+ * | ^ ^ * | | | * l2arc_feed_thread() arc_read() * | | | * | l2arc read | * V | | * +---------------+ | * | L2ARC | | * +---------------+ | * | ^ | * l2arc_write() | | * | | | * V | | * +-------+ +-------+ * | vdev | | vdev | * | cache | | cache | * +-------+ +-------+ * +=========+ .-----. * : L2ARC : |-_____-| * : devices : | Disks | * +=========+ `-_____-' * * Read requests are satisfied from the following sources, in order: * * 1) ARC * 2) vdev cache of L2ARC devices * 3) L2ARC devices * 4) vdev cache of disks * 5) disks * * Some L2ARC device types exhibit extremely slow write performance. * To accommodate for this there are some significant differences between * the L2ARC and traditional cache design: * * 1. There is no eviction path from the ARC to the L2ARC. Evictions from * the ARC behave as usual, freeing buffers and placing headers on ghost * lists. The ARC does not send buffers to the L2ARC during eviction as * this would add inflated write latencies for all ARC memory pressure. * * 2. The L2ARC attempts to cache data from the ARC before it is evicted. * It does this by periodically scanning buffers from the eviction-end of * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are * not already there. It scans until a headroom of buffers is satisfied, * which itself is a buffer for ARC eviction. If a compressible buffer is * found during scanning and selected for writing to an L2ARC device, we * temporarily boost scanning headroom during the next scan cycle to make * sure we adapt to compression effects (which might significantly reduce * the data volume we write to L2ARC). The thread that does this is * l2arc_feed_thread(), illustrated below; example sizes are included to * provide a better sense of ratio than this diagram: * * head --> tail * +---------------------+----------+ * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC * +---------------------+----------+ | o L2ARC eligible * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer * +---------------------+----------+ | * 15.9 Gbytes ^ 32 Mbytes | * headroom | * l2arc_feed_thread() * | * l2arc write hand <--[oooo]--' * | 8 Mbyte * | write max * V * +==============================+ * L2ARC dev |####|#|###|###| |####| ... | * +==============================+ * 32 Gbytes * * 3. If an ARC buffer is copied to the L2ARC but then hit instead of * evicted, then the L2ARC has cached a buffer much sooner than it probably * needed to, potentially wasting L2ARC device bandwidth and storage. It is * safe to say that this is an uncommon case, since buffers at the end of * the ARC lists have moved there due to inactivity. * * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, * then the L2ARC simply misses copying some buffers. This serves as a * pressure valve to prevent heavy read workloads from both stalling the ARC * with waits and clogging the L2ARC with writes. This also helps prevent * the potential for the L2ARC to churn if it attempts to cache content too * quickly, such as during backups of the entire pool. * * 5. After system boot and before the ARC has filled main memory, there are * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru * lists can remain mostly static. Instead of searching from tail of these * lists as pictured, the l2arc_feed_thread() will search from the list heads * for eligible buffers, greatly increasing its chance of finding them. * * The L2ARC device write speed is also boosted during this time so that * the L2ARC warms up faster. Since there have been no ARC evictions yet, * there are no L2ARC reads, and no fear of degrading read performance * through increased writes. * * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that * the vdev queue can aggregate them into larger and fewer writes. Each * device is written to in a rotor fashion, sweeping writes through * available space then repeating. * * 7. The L2ARC does not store dirty content. It never needs to flush * write buffers back to disk based storage. * * 8. If an ARC buffer is written (and dirtied) which also exists in the * L2ARC, the now stale L2ARC buffer is immediately dropped. * * The performance of the L2ARC can be tweaked by a number of tunables, which * may be necessary for different workloads: * * l2arc_write_max max write bytes per interval * l2arc_write_boost extra write bytes during device warmup * l2arc_noprefetch skip caching prefetched buffers * l2arc_headroom number of max device writes to precache * l2arc_headroom_boost when we find compressed buffers during ARC * scanning, we multiply headroom by this * percentage factor for the next scan cycle, * since more compressed buffers are likely to * be present * l2arc_feed_secs seconds between L2ARC writing * * Tunables may be removed or added as future performance improvements are * integrated, and also may become zpool properties. * * There are three key functions that control how the L2ARC warms up: * * l2arc_write_eligible() check if a buffer is eligible to cache * l2arc_write_size() calculate how much to write * l2arc_write_interval() calculate sleep delay between writes * * These three functions determine what to write, how much, and how quickly * to send writes. * * L2ARC persistence: * * When writing buffers to L2ARC, we periodically add some metadata to * make sure we can pick them up after reboot, thus dramatically reducing * the impact that any downtime has on the performance of storage systems * with large caches. * * The implementation works fairly simply by integrating the following two * modifications: * * *) When writing to the L2ARC, we occasionally write a "l2arc log block", * which is an additional piece of metadata which describes what's been * written. This allows us to rebuild the arc_buf_hdr_t structures of the * main ARC buffers. There are 2 linked-lists of log blocks headed by * dh_start_lbps[2]. We alternate which chain we append to, so they are * time-wise and offset-wise interleaved, but that is an optimization rather * than for correctness. The log block also includes a pointer to the * previous block in its chain. * * *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device * for our header bookkeeping purposes. This contains a device header, * which contains our top-level reference structures. We update it each * time we write a new log block, so that we're able to locate it in the * L2ARC device. If this write results in an inconsistent device header * (e.g. due to power failure), we detect this by verifying the header's * checksum and simply fail to reconstruct the L2ARC after reboot. * * Implementation diagram: * * +=== L2ARC device (not to scale) ======================================+ * | ___two newest log block pointers__.__________ | * | / \dh_start_lbps[1] | * | / \ \dh_start_lbps[0]| * |.___/__. V V | * ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---| * || hdr| ^ /^ /^ / / | * |+------+ ...--\-------/ \-----/--\------/ / | * | \--------------/ \--------------/ | * +======================================================================+ * * As can be seen on the diagram, rather than using a simple linked list, * we use a pair of linked lists with alternating elements. This is a * performance enhancement due to the fact that we only find out the * address of the next log block access once the current block has been * completely read in. Obviously, this hurts performance, because we'd be * keeping the device's I/O queue at only a 1 operation deep, thus * incurring a large amount of I/O round-trip latency. Having two lists * allows us to fetch two log blocks ahead of where we are currently * rebuilding L2ARC buffers. * * On-device data structures: * * L2ARC device header: l2arc_dev_hdr_phys_t * L2ARC log block: l2arc_log_blk_phys_t * * L2ARC reconstruction: * * When writing data, we simply write in the standard rotary fashion, * evicting buffers as we go and simply writing new data over them (writing * a new log block every now and then). This obviously means that once we * loop around the end of the device, we will start cutting into an already * committed log block (and its referenced data buffers), like so: * * current write head__ __old tail * \ / * V V * <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |--> * ^ ^^^^^^^^^___________________________________ * | \ * <<nextwrite>> may overwrite this blk and/or its bufs --' * * When importing the pool, we detect this situation and use it to stop * our scanning process (see l2arc_rebuild). * * There is one significant caveat to consider when rebuilding ARC contents * from an L2ARC device: what about invalidated buffers? Given the above * construction, we cannot update blocks which we've already written to amend * them to remove buffers which were invalidated. Thus, during reconstruction, * we might be populating the cache with buffers for data that's not on the * main pool anymore, or may have been overwritten! * * As it turns out, this isn't a problem. Every arc_read request includes * both the DVA and, crucially, the birth TXG of the BP the caller is * looking for. So even if the cache were populated by completely rotten * blocks for data that had been long deleted and/or overwritten, we'll * never actually return bad data from the cache, since the DVA with the * birth TXG uniquely identify a block in space and time - once created, * a block is immutable on disk. The worst thing we have done is wasted * some time and memory at l2arc rebuild to reconstruct outdated ARC * entries that will get dropped from the l2arc as it is being updated * with new blocks. * * L2ARC buffers that have been evicted by l2arc_evict() ahead of the write * hand are not restored. This is done by saving the offset (in bytes) * l2arc_evict() has evicted to in the L2ARC device header and taking it * into account when restoring buffers. */ static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) { /* * A buffer is *not* eligible for the L2ARC if it: * 1. belongs to a different spa. * 2. is already cached on the L2ARC. * 3. has an I/O in progress (it may be an incomplete read). * 4. is flagged not eligible (zfs property). */ if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr)) return (B_FALSE); return (B_TRUE); } static uint64_t l2arc_write_size(l2arc_dev_t *dev) { uint64_t size, dev_size, tsize; /* * Make sure our globals have meaningful values in case the user * altered them. */ size = l2arc_write_max; if (size == 0) { cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " "be greater than zero, resetting it to the default (%d)", L2ARC_WRITE_SIZE); size = l2arc_write_max = L2ARC_WRITE_SIZE; } if (arc_warm == B_FALSE) size += l2arc_write_boost; /* * Make sure the write size does not exceed the size of the cache * device. This is important in l2arc_evict(), otherwise infinite * iteration can occur. */ dev_size = dev->l2ad_end - dev->l2ad_start; tsize = size + l2arc_log_blk_overhead(size, dev); if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) tsize += MAX(64 * 1024 * 1024, (tsize * l2arc_trim_ahead) / 100); if (tsize >= dev_size) { cmn_err(CE_NOTE, "l2arc_write_max or l2arc_write_boost " "plus the overhead of log blocks (persistent L2ARC, " "%llu bytes) exceeds the size of the cache device " "(guid %llu), resetting them to the default (%d)", l2arc_log_blk_overhead(size, dev), dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE); size = l2arc_write_max = l2arc_write_boost = L2ARC_WRITE_SIZE; if (arc_warm == B_FALSE) size += l2arc_write_boost; } return (size); } static clock_t l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) { clock_t interval, next, now; /* * If the ARC lists are busy, increase our write rate; if the * lists are stale, idle back. This is achieved by checking * how much we previously wrote - if it was more than half of * what we wanted, schedule the next write much sooner. */ if (l2arc_feed_again && wrote > (wanted / 2)) interval = (hz * l2arc_feed_min_ms) / 1000; else interval = hz * l2arc_feed_secs; now = ddi_get_lbolt(); next = MAX(now, MIN(now + interval, began + interval)); return (next); } /* * Cycle through L2ARC devices. This is how L2ARC load balances. * If a device is returned, this also returns holding the spa config lock. */ static l2arc_dev_t * l2arc_dev_get_next(void) { l2arc_dev_t *first, *next = NULL; /* * Lock out the removal of spas (spa_namespace_lock), then removal * of cache devices (l2arc_dev_mtx). Once a device has been selected, * both locks will be dropped and a spa config lock held instead. */ mutex_enter(&spa_namespace_lock); mutex_enter(&l2arc_dev_mtx); /* if there are no vdevs, there is nothing to do */ if (l2arc_ndev == 0) goto out; first = NULL; next = l2arc_dev_last; do { /* loop around the list looking for a non-faulted vdev */ if (next == NULL) { next = list_head(l2arc_dev_list); } else { next = list_next(l2arc_dev_list, next); if (next == NULL) next = list_head(l2arc_dev_list); } /* if we have come back to the start, bail out */ if (first == NULL) first = next; else if (next == first) break; } while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild || next->l2ad_trim_all); /* if we were unable to find any usable vdevs, return NULL */ if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild || next->l2ad_trim_all) next = NULL; l2arc_dev_last = next; out: mutex_exit(&l2arc_dev_mtx); /* * Grab the config lock to prevent the 'next' device from being * removed while we are writing to it. */ if (next != NULL) spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); mutex_exit(&spa_namespace_lock); return (next); } /* * Free buffers that were tagged for destruction. */ static void l2arc_do_free_on_write(void) { list_t *buflist; l2arc_data_free_t *df, *df_prev; mutex_enter(&l2arc_free_on_write_mtx); buflist = l2arc_free_on_write; for (df = list_tail(buflist); df; df = df_prev) { df_prev = list_prev(buflist, df); ASSERT3P(df->l2df_abd, !=, NULL); abd_free(df->l2df_abd); list_remove(buflist, df); kmem_free(df, sizeof (l2arc_data_free_t)); } mutex_exit(&l2arc_free_on_write_mtx); } /* * A write to a cache device has completed. Update all headers to allow * reads from these buffers to begin. */ static void l2arc_write_done(zio_t *zio) { l2arc_write_callback_t *cb; l2arc_lb_abd_buf_t *abd_buf; l2arc_lb_ptr_buf_t *lb_ptr_buf; l2arc_dev_t *dev; l2arc_dev_hdr_phys_t *l2dhdr; list_t *buflist; arc_buf_hdr_t *head, *hdr, *hdr_prev; kmutex_t *hash_lock; int64_t bytes_dropped = 0; cb = zio->io_private; ASSERT3P(cb, !=, NULL); dev = cb->l2wcb_dev; l2dhdr = dev->l2ad_dev_hdr; ASSERT3P(dev, !=, NULL); head = cb->l2wcb_head; ASSERT3P(head, !=, NULL); buflist = &dev->l2ad_buflist; ASSERT3P(buflist, !=, NULL); DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, l2arc_write_callback_t *, cb); /* * All writes completed, or an error was hit. */ top: mutex_enter(&dev->l2ad_mtx); for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { hdr_prev = list_prev(buflist, hdr); hash_lock = HDR_LOCK(hdr); /* * We cannot use mutex_enter or else we can deadlock * with l2arc_write_buffers (due to swapping the order * the hash lock and l2ad_mtx are taken). */ if (!mutex_tryenter(hash_lock)) { /* * Missed the hash lock. We must retry so we * don't leave the ARC_FLAG_L2_WRITING bit set. */ ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); /* * We don't want to rescan the headers we've * already marked as having been written out, so * we reinsert the head node so we can pick up * where we left off. */ list_remove(buflist, head); list_insert_after(buflist, hdr, head); mutex_exit(&dev->l2ad_mtx); /* * We wait for the hash lock to become available * to try and prevent busy waiting, and increase * the chance we'll be able to acquire the lock * the next time around. */ mutex_enter(hash_lock); mutex_exit(hash_lock); goto top; } /* * We could not have been moved into the arc_l2c_only * state while in-flight due to our ARC_FLAG_L2_WRITING * bit being set. Let's just ensure that's being enforced. */ ASSERT(HDR_HAS_L1HDR(hdr)); /* * Skipped - drop L2ARC entry and mark the header as no * longer L2 eligibile. */ if (zio->io_error != 0) { /* * Error - drop L2ARC entry. */ list_remove(buflist, hdr); arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); uint64_t psize = HDR_GET_PSIZE(hdr); l2arc_hdr_arcstats_decrement(hdr); bytes_dropped += vdev_psize_to_asize(dev->l2ad_vdev, psize); (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); } /* * Allow ARC to begin reads and ghost list evictions to * this L2ARC entry. */ arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING); mutex_exit(hash_lock); } /* * Free the allocated abd buffers for writing the log blocks. * If the zio failed reclaim the allocated space and remove the * pointers to these log blocks from the log block pointer list * of the L2ARC device. */ while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) { abd_free(abd_buf->abd); zio_buf_free(abd_buf, sizeof (*abd_buf)); if (zio->io_error != 0) { lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list); /* * L2BLK_GET_PSIZE returns aligned size for log * blocks. */ uint64_t asize = L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop); bytes_dropped += asize; ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize); ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count); zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf); kmem_free(lb_ptr_buf->lb_ptr, sizeof (l2arc_log_blkptr_t)); kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t)); } } list_destroy(&cb->l2wcb_abd_list); if (zio->io_error != 0) { ARCSTAT_BUMP(arcstat_l2_writes_error); /* * Restore the lbps array in the header to its previous state. * If the list of log block pointers is empty, zero out the * log block pointers in the device header. */ lb_ptr_buf = list_head(&dev->l2ad_lbptr_list); for (int i = 0; i < 2; i++) { if (lb_ptr_buf == NULL) { /* * If the list is empty zero out the device * header. Otherwise zero out the second log * block pointer in the header. */ if (i == 0) { bzero(l2dhdr, dev->l2ad_dev_hdr_asize); } else { bzero(&l2dhdr->dh_start_lbps[i], sizeof (l2arc_log_blkptr_t)); } break; } bcopy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[i], sizeof (l2arc_log_blkptr_t)); lb_ptr_buf = list_next(&dev->l2ad_lbptr_list, lb_ptr_buf); } } ARCSTAT_BUMP(arcstat_l2_writes_done); list_remove(buflist, head); ASSERT(!HDR_HAS_L1HDR(head)); kmem_cache_free(hdr_l2only_cache, head); mutex_exit(&dev->l2ad_mtx); ASSERT(dev->l2ad_vdev != NULL); vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); l2arc_do_free_on_write(); kmem_free(cb, sizeof (l2arc_write_callback_t)); } static int l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb) { int ret; spa_t *spa = zio->io_spa; arc_buf_hdr_t *hdr = cb->l2rcb_hdr; blkptr_t *bp = zio->io_bp; uint8_t salt[ZIO_DATA_SALT_LEN]; uint8_t iv[ZIO_DATA_IV_LEN]; uint8_t mac[ZIO_DATA_MAC_LEN]; boolean_t no_crypt = B_FALSE; /* * ZIL data is never be written to the L2ARC, so we don't need * special handling for its unique MAC storage. */ ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG); ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); /* * If the data was encrypted, decrypt it now. Note that * we must check the bp here and not the hdr, since the * hdr does not have its encryption parameters updated * until arc_read_done(). */ if (BP_IS_ENCRYPTED(bp)) { abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE); zio_crypt_decode_params_bp(bp, salt, iv); zio_crypt_decode_mac_bp(bp, mac); ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb, BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, HDR_GET_PSIZE(hdr), eabd, hdr->b_l1hdr.b_pabd, &no_crypt); if (ret != 0) { arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr); goto error; } /* * If we actually performed decryption, replace b_pabd * with the decrypted data. Otherwise we can just throw * our decryption buffer away. */ if (!no_crypt) { arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, arc_hdr_size(hdr), hdr); hdr->b_l1hdr.b_pabd = eabd; zio->io_abd = eabd; } else { arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr); } } /* * If the L2ARC block was compressed, but ARC compression * is disabled we decompress the data into a new buffer and * replace the existing data. */ if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) { abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE); void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr)); ret = zio_decompress_data(HDR_GET_COMPRESS(hdr), hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr), &hdr->b_complevel); if (ret != 0) { abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr); goto error; } abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, arc_hdr_size(hdr), hdr); hdr->b_l1hdr.b_pabd = cabd; zio->io_abd = cabd; zio->io_size = HDR_GET_LSIZE(hdr); } return (0); error: return (ret); } /* * A read to a cache device completed. Validate buffer contents before * handing over to the regular ARC routines. */ static void l2arc_read_done(zio_t *zio) { int tfm_error = 0; l2arc_read_callback_t *cb = zio->io_private; arc_buf_hdr_t *hdr; kmutex_t *hash_lock; boolean_t valid_cksum; boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) && (cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT)); ASSERT3P(zio->io_vd, !=, NULL); ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); ASSERT3P(cb, !=, NULL); hdr = cb->l2rcb_hdr; ASSERT3P(hdr, !=, NULL); hash_lock = HDR_LOCK(hdr); mutex_enter(hash_lock); ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); /* * If the data was read into a temporary buffer, * move it and free the buffer. */ if (cb->l2rcb_abd != NULL) { ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); if (zio->io_error == 0) { if (using_rdata) { abd_copy(hdr->b_crypt_hdr.b_rabd, cb->l2rcb_abd, arc_hdr_size(hdr)); } else { abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd, arc_hdr_size(hdr)); } } /* * The following must be done regardless of whether * there was an error: * - free the temporary buffer * - point zio to the real ARC buffer * - set zio size accordingly * These are required because zio is either re-used for * an I/O of the block in the case of the error * or the zio is passed to arc_read_done() and it * needs real data. */ abd_free(cb->l2rcb_abd); zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); if (using_rdata) { ASSERT(HDR_HAS_RABD(hdr)); zio->io_abd = zio->io_orig_abd = hdr->b_crypt_hdr.b_rabd; } else { ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd; } } ASSERT3P(zio->io_abd, !=, NULL); /* * Check this survived the L2ARC journey. */ ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd || (HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd)); zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ zio->io_prop.zp_complevel = hdr->b_complevel; valid_cksum = arc_cksum_is_equal(hdr, zio); /* * b_rabd will always match the data as it exists on disk if it is * being used. Therefore if we are reading into b_rabd we do not * attempt to untransform the data. */ if (valid_cksum && !using_rdata) tfm_error = l2arc_untransform(zio, cb); if (valid_cksum && tfm_error == 0 && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { mutex_exit(hash_lock); zio->io_private = hdr; arc_read_done(zio); } else { /* * Buffer didn't survive caching. Increment stats and * reissue to the original storage device. */ if (zio->io_error != 0) { ARCSTAT_BUMP(arcstat_l2_io_error); } else { zio->io_error = SET_ERROR(EIO); } if (!valid_cksum || tfm_error != 0) ARCSTAT_BUMP(arcstat_l2_cksum_bad); /* * If there's no waiter, issue an async i/o to the primary * storage now. If there *is* a waiter, the caller must * issue the i/o in a context where it's OK to block. */ if (zio->io_waiter == NULL) { zio_t *pio = zio_unique_parent(zio); void *abd = (using_rdata) ? hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd; ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); zio = zio_read(pio, zio->io_spa, zio->io_bp, abd, zio->io_size, arc_read_done, hdr, zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); /* * Original ZIO will be freed, so we need to update * ARC header with the new ZIO pointer to be used * by zio_change_priority() in arc_read(). */ for (struct arc_callback *acb = hdr->b_l1hdr.b_acb; acb != NULL; acb = acb->acb_next) acb->acb_zio_head = zio; mutex_exit(hash_lock); zio_nowait(zio); } else { mutex_exit(hash_lock); } } kmem_free(cb, sizeof (l2arc_read_callback_t)); } /* * This is the list priority from which the L2ARC will search for pages to * cache. This is used within loops (0..3) to cycle through lists in the * desired order. This order can have a significant effect on cache * performance. * * Currently the metadata lists are hit first, MFU then MRU, followed by * the data lists. This function returns a locked list, and also returns * the lock pointer. */ static multilist_sublist_t * l2arc_sublist_lock(int list_num) { multilist_t *ml = NULL; unsigned int idx; ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES); switch (list_num) { case 0: ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; break; case 1: ml = &arc_mru->arcs_list[ARC_BUFC_METADATA]; break; case 2: ml = &arc_mfu->arcs_list[ARC_BUFC_DATA]; break; case 3: ml = &arc_mru->arcs_list[ARC_BUFC_DATA]; break; default: return (NULL); } /* * Return a randomly-selected sublist. This is acceptable * because the caller feeds only a little bit of data for each * call (8MB). Subsequent calls will result in different * sublists being selected. */ idx = multilist_get_random_index(ml); return (multilist_sublist_lock(ml, idx)); } /* * Calculates the maximum overhead of L2ARC metadata log blocks for a given * L2ARC write size. l2arc_evict and l2arc_write_size need to include this * overhead in processing to make sure there is enough headroom available * when writing buffers. */ static inline uint64_t l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev) { if (dev->l2ad_log_entries == 0) { return (0); } else { uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT; uint64_t log_blocks = (log_entries + dev->l2ad_log_entries - 1) / dev->l2ad_log_entries; return (vdev_psize_to_asize(dev->l2ad_vdev, sizeof (l2arc_log_blk_phys_t)) * log_blocks); } } /* * Evict buffers from the device write hand to the distance specified in * bytes. This distance may span populated buffers, it may span nothing. * This is clearing a region on the L2ARC device ready for writing. * If the 'all' boolean is set, every buffer is evicted. */ static void l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) { list_t *buflist; arc_buf_hdr_t *hdr, *hdr_prev; kmutex_t *hash_lock; uint64_t taddr; l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev; vdev_t *vd = dev->l2ad_vdev; boolean_t rerun; buflist = &dev->l2ad_buflist; /* * We need to add in the worst case scenario of log block overhead. */ distance += l2arc_log_blk_overhead(distance, dev); if (vd->vdev_has_trim && l2arc_trim_ahead > 0) { /* * Trim ahead of the write size 64MB or (l2arc_trim_ahead/100) * times the write size, whichever is greater. */ distance += MAX(64 * 1024 * 1024, (distance * l2arc_trim_ahead) / 100); } top: rerun = B_FALSE; if (dev->l2ad_hand >= (dev->l2ad_end - distance)) { /* * When there is no space to accommodate upcoming writes, * evict to the end. Then bump the write and evict hands * to the start and iterate. This iteration does not * happen indefinitely as we make sure in * l2arc_write_size() that when the write hand is reset, * the write size does not exceed the end of the device. */ rerun = B_TRUE; taddr = dev->l2ad_end; } else { taddr = dev->l2ad_hand + distance; } DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, uint64_t, taddr, boolean_t, all); if (!all) { /* * This check has to be placed after deciding whether to * iterate (rerun). */ if (dev->l2ad_first) { /* * This is the first sweep through the device. There is * nothing to evict. We have already trimmmed the * whole device. */ goto out; } else { /* * Trim the space to be evicted. */ if (vd->vdev_has_trim && dev->l2ad_evict < taddr && l2arc_trim_ahead > 0) { /* * We have to drop the spa_config lock because * vdev_trim_range() will acquire it. * l2ad_evict already accounts for the label * size. To prevent vdev_trim_ranges() from * adding it again, we subtract it from * l2ad_evict. */ spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev); vdev_trim_simple(vd, dev->l2ad_evict - VDEV_LABEL_START_SIZE, taddr - dev->l2ad_evict); spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev, RW_READER); } /* * When rebuilding L2ARC we retrieve the evict hand * from the header of the device. Of note, l2arc_evict() * does not actually delete buffers from the cache * device, but trimming may do so depending on the * hardware implementation. Thus keeping track of the * evict hand is useful. */ dev->l2ad_evict = MAX(dev->l2ad_evict, taddr); } } retry: mutex_enter(&dev->l2ad_mtx); /* * We have to account for evicted log blocks. Run vdev_space_update() * on log blocks whose offset (in bytes) is before the evicted offset * (in bytes) by searching in the list of pointers to log blocks * present in the L2ARC device. */ for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf; lb_ptr_buf = lb_ptr_buf_prev) { lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf); /* L2BLK_GET_PSIZE returns aligned size for log blocks */ uint64_t asize = L2BLK_GET_PSIZE( (lb_ptr_buf->lb_ptr)->lbp_prop); /* * We don't worry about log blocks left behind (ie * lbp_payload_start < l2ad_hand) because l2arc_write_buffers() * will never write more than l2arc_evict() evicts. */ if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) { break; } else { vdev_space_update(vd, -asize, 0, 0); ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize); ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count); zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf); list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf); kmem_free(lb_ptr_buf->lb_ptr, sizeof (l2arc_log_blkptr_t)); kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t)); } } for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { hdr_prev = list_prev(buflist, hdr); ASSERT(!HDR_EMPTY(hdr)); hash_lock = HDR_LOCK(hdr); /* * We cannot use mutex_enter or else we can deadlock * with l2arc_write_buffers (due to swapping the order * the hash lock and l2ad_mtx are taken). */ if (!mutex_tryenter(hash_lock)) { /* * Missed the hash lock. Retry. */ ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); mutex_exit(&dev->l2ad_mtx); mutex_enter(hash_lock); mutex_exit(hash_lock); goto retry; } /* * A header can't be on this list if it doesn't have L2 header. */ ASSERT(HDR_HAS_L2HDR(hdr)); /* Ensure this header has finished being written. */ ASSERT(!HDR_L2_WRITING(hdr)); ASSERT(!HDR_L2_WRITE_HEAD(hdr)); if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict || hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { /* * We've evicted to the target address, * or the end of the device. */ mutex_exit(hash_lock); break; } if (!HDR_HAS_L1HDR(hdr)) { ASSERT(!HDR_L2_READING(hdr)); /* * This doesn't exist in the ARC. Destroy. * arc_hdr_destroy() will call list_remove() * and decrement arcstat_l2_lsize. */ arc_change_state(arc_anon, hdr, hash_lock); arc_hdr_destroy(hdr); } else { ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); ARCSTAT_BUMP(arcstat_l2_evict_l1cached); /* * Invalidate issued or about to be issued * reads, since we may be about to write * over this location. */ if (HDR_L2_READING(hdr)) { ARCSTAT_BUMP(arcstat_l2_evict_reading); arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED); } arc_hdr_l2hdr_destroy(hdr); } mutex_exit(hash_lock); } mutex_exit(&dev->l2ad_mtx); out: /* * We need to check if we evict all buffers, otherwise we may iterate * unnecessarily. */ if (!all && rerun) { /* * Bump device hand to the device start if it is approaching the * end. l2arc_evict() has already evicted ahead for this case. */ dev->l2ad_hand = dev->l2ad_start; dev->l2ad_evict = dev->l2ad_start; dev->l2ad_first = B_FALSE; goto top; } if (!all) { /* * In case of cache device removal (all) the following * assertions may be violated without functional consequences * as the device is about to be removed. */ ASSERT3U(dev->l2ad_hand + distance, <, dev->l2ad_end); if (!dev->l2ad_first) ASSERT3U(dev->l2ad_hand, <, dev->l2ad_evict); } } /* * Handle any abd transforms that might be required for writing to the L2ARC. * If successful, this function will always return an abd with the data * transformed as it is on disk in a new abd of asize bytes. */ static int l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize, abd_t **abd_out) { int ret; void *tmp = NULL; abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd; enum zio_compress compress = HDR_GET_COMPRESS(hdr); uint64_t psize = HDR_GET_PSIZE(hdr); uint64_t size = arc_hdr_size(hdr); boolean_t ismd = HDR_ISTYPE_METADATA(hdr); boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); dsl_crypto_key_t *dck = NULL; uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 }; boolean_t no_crypt = B_FALSE; ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) || HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize); ASSERT3U(psize, <=, asize); /* * If this data simply needs its own buffer, we simply allocate it * and copy the data. This may be done to eliminate a dependency on a * shared buffer or to reallocate the buffer to match asize. */ if (HDR_HAS_RABD(hdr) && asize != psize) { ASSERT3U(asize, >=, psize); to_write = abd_alloc_for_io(asize, ismd); abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize); if (psize != asize) abd_zero_off(to_write, psize, asize - psize); goto out; } if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) && !HDR_ENCRYPTED(hdr)) { ASSERT3U(size, ==, psize); to_write = abd_alloc_for_io(asize, ismd); abd_copy(to_write, hdr->b_l1hdr.b_pabd, size); if (size != asize) abd_zero_off(to_write, size, asize - size); goto out; } if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) { cabd = abd_alloc_for_io(asize, ismd); tmp = abd_borrow_buf(cabd, asize); psize = zio_compress_data(compress, to_write, tmp, size, hdr->b_complevel); if (psize >= size) { abd_return_buf(cabd, tmp, asize); HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF); to_write = cabd; abd_copy(to_write, hdr->b_l1hdr.b_pabd, size); if (size != asize) abd_zero_off(to_write, size, asize - size); goto encrypt; } ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr)); if (psize < asize) bzero((char *)tmp + psize, asize - psize); psize = HDR_GET_PSIZE(hdr); abd_return_buf_copy(cabd, tmp, asize); to_write = cabd; } encrypt: if (HDR_ENCRYPTED(hdr)) { eabd = abd_alloc_for_io(asize, ismd); /* * If the dataset was disowned before the buffer * made it to this point, the key to re-encrypt * it won't be available. In this case we simply * won't write the buffer to the L2ARC. */ ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj, FTAG, &dck); if (ret != 0) goto error; ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key, hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd, &no_crypt); if (ret != 0) goto error; if (no_crypt) abd_copy(eabd, to_write, psize); if (psize != asize) abd_zero_off(eabd, psize, asize - psize); /* assert that the MAC we got here matches the one we saved */ ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN)); spa_keystore_dsl_key_rele(spa, dck, FTAG); if (to_write == cabd) abd_free(cabd); to_write = eabd; } out: ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd); *abd_out = to_write; return (0); error: if (dck != NULL) spa_keystore_dsl_key_rele(spa, dck, FTAG); if (cabd != NULL) abd_free(cabd); if (eabd != NULL) abd_free(eabd); *abd_out = NULL; return (ret); } static void l2arc_blk_fetch_done(zio_t *zio) { l2arc_read_callback_t *cb; cb = zio->io_private; if (cb->l2rcb_abd != NULL) abd_free(cb->l2rcb_abd); kmem_free(cb, sizeof (l2arc_read_callback_t)); } /* * Find and write ARC buffers to the L2ARC device. * * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid * for reading until they have completed writing. * The headroom_boost is an in-out parameter used to maintain headroom boost * state between calls to this function. * * Returns the number of bytes actually written (which may be smaller than * the delta by which the device hand has changed due to alignment and the * writing of log blocks). */ static uint64_t l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) { arc_buf_hdr_t *hdr, *hdr_prev, *head; uint64_t write_asize, write_psize, write_lsize, headroom; boolean_t full; l2arc_write_callback_t *cb = NULL; zio_t *pio, *wzio; uint64_t guid = spa_load_guid(spa); l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; ASSERT3P(dev->l2ad_vdev, !=, NULL); pio = NULL; write_lsize = write_asize = write_psize = 0; full = B_FALSE; head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR); /* * Copy buffers for L2ARC writing. */ for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) { /* * If pass == 1 or 3, we cache MRU metadata and data * respectively. */ if (l2arc_mfuonly) { if (pass == 1 || pass == 3) continue; } multilist_sublist_t *mls = l2arc_sublist_lock(pass); uint64_t passed_sz = 0; VERIFY3P(mls, !=, NULL); /* * L2ARC fast warmup. * * Until the ARC is warm and starts to evict, read from the * head of the ARC lists rather than the tail. */ if (arc_warm == B_FALSE) hdr = multilist_sublist_head(mls); else hdr = multilist_sublist_tail(mls); headroom = target_sz * l2arc_headroom; if (zfs_compressed_arc_enabled) headroom = (headroom * l2arc_headroom_boost) / 100; for (; hdr; hdr = hdr_prev) { kmutex_t *hash_lock; abd_t *to_write = NULL; if (arc_warm == B_FALSE) hdr_prev = multilist_sublist_next(mls, hdr); else hdr_prev = multilist_sublist_prev(mls, hdr); hash_lock = HDR_LOCK(hdr); if (!mutex_tryenter(hash_lock)) { /* * Skip this buffer rather than waiting. */ continue; } passed_sz += HDR_GET_LSIZE(hdr); if (l2arc_headroom != 0 && passed_sz > headroom) { /* * Searched too far. */ mutex_exit(hash_lock); break; } if (!l2arc_write_eligible(guid, hdr)) { mutex_exit(hash_lock); continue; } /* * We rely on the L1 portion of the header below, so * it's invalid for this header to have been evicted out * of the ghost cache, prior to being written out. The * ARC_FLAG_L2_WRITING bit ensures this won't happen. */ ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); ASSERT3U(arc_hdr_size(hdr), >, 0); ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); uint64_t psize = HDR_GET_PSIZE(hdr); uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); if ((write_asize + asize) > target_sz) { full = B_TRUE; mutex_exit(hash_lock); break; } /* * We rely on the L1 portion of the header below, so * it's invalid for this header to have been evicted out * of the ghost cache, prior to being written out. The * ARC_FLAG_L2_WRITING bit ensures this won't happen. */ arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING); ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); ASSERT3U(arc_hdr_size(hdr), >, 0); /* * If this header has b_rabd, we can use this since it * must always match the data exactly as it exists on * disk. Otherwise, the L2ARC can normally use the * hdr's data, but if we're sharing data between the * hdr and one of its bufs, L2ARC needs its own copy of * the data so that the ZIO below can't race with the * buf consumer. To ensure that this copy will be * available for the lifetime of the ZIO and be cleaned * up afterwards, we add it to the l2arc_free_on_write * queue. If we need to apply any transforms to the * data (compression, encryption) we will also need the * extra buffer. */ if (HDR_HAS_RABD(hdr) && psize == asize) { to_write = hdr->b_crypt_hdr.b_rabd; } else if ((HDR_COMPRESSION_ENABLED(hdr) || HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) && !HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) && psize == asize) { to_write = hdr->b_l1hdr.b_pabd; } else { int ret; arc_buf_contents_t type = arc_buf_type(hdr); ret = l2arc_apply_transforms(spa, hdr, asize, &to_write); if (ret != 0) { arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING); mutex_exit(hash_lock); continue; } l2arc_free_abd_on_write(to_write, asize, type); } if (pio == NULL) { /* * Insert a dummy header on the buflist so * l2arc_write_done() can find where the * write buffers begin without searching. */ mutex_enter(&dev->l2ad_mtx); list_insert_head(&dev->l2ad_buflist, head); mutex_exit(&dev->l2ad_mtx); cb = kmem_alloc( sizeof (l2arc_write_callback_t), KM_SLEEP); cb->l2wcb_dev = dev; cb->l2wcb_head = head; /* * Create a list to save allocated abd buffers * for l2arc_log_blk_commit(). */ list_create(&cb->l2wcb_abd_list, sizeof (l2arc_lb_abd_buf_t), offsetof(l2arc_lb_abd_buf_t, node)); pio = zio_root(spa, l2arc_write_done, cb, ZIO_FLAG_CANFAIL); } hdr->b_l2hdr.b_dev = dev; hdr->b_l2hdr.b_hits = 0; hdr->b_l2hdr.b_daddr = dev->l2ad_hand; hdr->b_l2hdr.b_arcs_state = hdr->b_l1hdr.b_state->arcs_state; arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR); mutex_enter(&dev->l2ad_mtx); list_insert_head(&dev->l2ad_buflist, hdr); mutex_exit(&dev->l2ad_mtx); (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); wzio = zio_write_phys(pio, dev->l2ad_vdev, hdr->b_l2hdr.b_daddr, asize, to_write, ZIO_CHECKSUM_OFF, NULL, hdr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE); write_lsize += HDR_GET_LSIZE(hdr); DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio); write_psize += psize; write_asize += asize; dev->l2ad_hand += asize; l2arc_hdr_arcstats_increment(hdr); vdev_space_update(dev->l2ad_vdev, asize, 0, 0); mutex_exit(hash_lock); /* * Append buf info to current log and commit if full. * arcstat_l2_{size,asize} kstats are updated * internally. */ if (l2arc_log_blk_insert(dev, hdr)) l2arc_log_blk_commit(dev, pio, cb); zio_nowait(wzio); } multilist_sublist_unlock(mls); if (full == B_TRUE) break; } /* No buffers selected for writing? */ if (pio == NULL) { ASSERT0(write_lsize); ASSERT(!HDR_HAS_L1HDR(head)); kmem_cache_free(hdr_l2only_cache, head); /* * Although we did not write any buffers l2ad_evict may * have advanced. */ if (dev->l2ad_evict != l2dhdr->dh_evict) l2arc_dev_hdr_update(dev); return (0); } if (!dev->l2ad_first) ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict); ASSERT3U(write_asize, <=, target_sz); ARCSTAT_BUMP(arcstat_l2_writes_sent); ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize); dev->l2ad_writing = B_TRUE; (void) zio_wait(pio); dev->l2ad_writing = B_FALSE; /* * Update the device header after the zio completes as * l2arc_write_done() may have updated the memory holding the log block * pointers in the device header. */ l2arc_dev_hdr_update(dev); return (write_asize); } static boolean_t l2arc_hdr_limit_reached(void) { int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size); return (arc_reclaim_needed() || (s > arc_meta_limit * 3 / 4) || (s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100)); } /* * This thread feeds the L2ARC at regular intervals. This is the beating * heart of the L2ARC. */ /* ARGSUSED */ static void l2arc_feed_thread(void *unused) { callb_cpr_t cpr; l2arc_dev_t *dev; spa_t *spa; uint64_t size, wrote; clock_t begin, next = ddi_get_lbolt(); fstrans_cookie_t cookie; CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); mutex_enter(&l2arc_feed_thr_lock); cookie = spl_fstrans_mark(); while (l2arc_thread_exit == 0) { CALLB_CPR_SAFE_BEGIN(&cpr); (void) cv_timedwait_idle(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, next); CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); next = ddi_get_lbolt() + hz; /* * Quick check for L2ARC devices. */ mutex_enter(&l2arc_dev_mtx); if (l2arc_ndev == 0) { mutex_exit(&l2arc_dev_mtx); continue; } mutex_exit(&l2arc_dev_mtx); begin = ddi_get_lbolt(); /* * This selects the next l2arc device to write to, and in * doing so the next spa to feed from: dev->l2ad_spa. This * will return NULL if there are now no l2arc devices or if * they are all faulted. * * If a device is returned, its spa's config lock is also * held to prevent device removal. l2arc_dev_get_next() * will grab and release l2arc_dev_mtx. */ if ((dev = l2arc_dev_get_next()) == NULL) continue; spa = dev->l2ad_spa; ASSERT3P(spa, !=, NULL); /* * If the pool is read-only then force the feed thread to * sleep a little longer. */ if (!spa_writeable(spa)) { next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; spa_config_exit(spa, SCL_L2ARC, dev); continue; } /* * Avoid contributing to memory pressure. */ if (l2arc_hdr_limit_reached()) { ARCSTAT_BUMP(arcstat_l2_abort_lowmem); spa_config_exit(spa, SCL_L2ARC, dev); continue; } ARCSTAT_BUMP(arcstat_l2_feeds); size = l2arc_write_size(dev); /* * Evict L2ARC buffers that will be overwritten. */ l2arc_evict(dev, size, B_FALSE); /* * Write ARC buffers. */ wrote = l2arc_write_buffers(spa, dev, size); /* * Calculate interval between writes. */ next = l2arc_write_interval(begin, size, wrote); spa_config_exit(spa, SCL_L2ARC, dev); } spl_fstrans_unmark(cookie); l2arc_thread_exit = 0; cv_broadcast(&l2arc_feed_thr_cv); CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ thread_exit(); } boolean_t l2arc_vdev_present(vdev_t *vd) { return (l2arc_vdev_get(vd) != NULL); } /* * Returns the l2arc_dev_t associated with a particular vdev_t or NULL if * the vdev_t isn't an L2ARC device. */ l2arc_dev_t * l2arc_vdev_get(vdev_t *vd) { l2arc_dev_t *dev; mutex_enter(&l2arc_dev_mtx); for (dev = list_head(l2arc_dev_list); dev != NULL; dev = list_next(l2arc_dev_list, dev)) { if (dev->l2ad_vdev == vd) break; } mutex_exit(&l2arc_dev_mtx); return (dev); } static void l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen) { l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; spa_t *spa = dev->l2ad_spa; /* * The L2ARC has to hold at least the payload of one log block for * them to be restored (persistent L2ARC). The payload of a log block * depends on the amount of its log entries. We always write log blocks * with 1022 entries. How many of them are committed or restored depends * on the size of the L2ARC device. Thus the maximum payload of * one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device * is less than that, we reduce the amount of committed and restored * log entries per block so as to enable persistence. */ if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) { dev->l2ad_log_entries = 0; } else { dev->l2ad_log_entries = MIN((dev->l2ad_end - dev->l2ad_start) >> SPA_MAXBLOCKSHIFT, L2ARC_LOG_BLK_MAX_ENTRIES); } /* * Read the device header, if an error is returned do not rebuild L2ARC. */ if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) { /* * If we are onlining a cache device (vdev_reopen) that was * still present (l2arc_vdev_present()) and rebuild is enabled, * we should evict all ARC buffers and pointers to log blocks * and reclaim their space before restoring its contents to * L2ARC. */ if (reopen) { if (!l2arc_rebuild_enabled) { return; } else { l2arc_evict(dev, 0, B_TRUE); /* start a new log block */ dev->l2ad_log_ent_idx = 0; dev->l2ad_log_blk_payload_asize = 0; dev->l2ad_log_blk_payload_start = 0; } } /* * Just mark the device as pending for a rebuild. We won't * be starting a rebuild in line here as it would block pool * import. Instead spa_load_impl will hand that off to an * async task which will call l2arc_spa_rebuild_start. */ dev->l2ad_rebuild = B_TRUE; } else if (spa_writeable(spa)) { /* * In this case TRIM the whole device if l2arc_trim_ahead > 0, * otherwise create a new header. We zero out the memory holding * the header to reset dh_start_lbps. If we TRIM the whole * device the new header will be written by * vdev_trim_l2arc_thread() at the end of the TRIM to update the * trim_state in the header too. When reading the header, if * trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0 * we opt to TRIM the whole device again. */ if (l2arc_trim_ahead > 0) { dev->l2ad_trim_all = B_TRUE; } else { bzero(l2dhdr, l2dhdr_asize); l2arc_dev_hdr_update(dev); } } } /* * Add a vdev for use by the L2ARC. By this point the spa has already * validated the vdev and opened it. */ void l2arc_add_vdev(spa_t *spa, vdev_t *vd) { l2arc_dev_t *adddev; uint64_t l2dhdr_asize; ASSERT(!l2arc_vdev_present(vd)); /* * Create a new l2arc device entry. */ adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); adddev->l2ad_spa = spa; adddev->l2ad_vdev = vd; /* leave extra size for an l2arc device header */ l2dhdr_asize = adddev->l2ad_dev_hdr_asize = MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift); adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize; adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end); adddev->l2ad_hand = adddev->l2ad_start; adddev->l2ad_evict = adddev->l2ad_start; adddev->l2ad_first = B_TRUE; adddev->l2ad_writing = B_FALSE; adddev->l2ad_trim_all = B_FALSE; list_link_init(&adddev->l2ad_node); adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP); mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); /* * This is a list of all ARC buffers that are still valid on the * device. */ list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); /* * This is a list of pointers to log blocks that are still present * on the device. */ list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t), offsetof(l2arc_lb_ptr_buf_t, node)); vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); zfs_refcount_create(&adddev->l2ad_alloc); zfs_refcount_create(&adddev->l2ad_lb_asize); zfs_refcount_create(&adddev->l2ad_lb_count); /* * Decide if dev is eligible for L2ARC rebuild or whole device * trimming. This has to happen before the device is added in the * cache device list and l2arc_dev_mtx is released. Otherwise * l2arc_feed_thread() might already start writing on the * device. */ l2arc_rebuild_dev(adddev, B_FALSE); /* * Add device to global list */ mutex_enter(&l2arc_dev_mtx); list_insert_head(l2arc_dev_list, adddev); atomic_inc_64(&l2arc_ndev); mutex_exit(&l2arc_dev_mtx); } /* * Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen() * in case of onlining a cache device. */ void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen) { l2arc_dev_t *dev = NULL; dev = l2arc_vdev_get(vd); ASSERT3P(dev, !=, NULL); /* * In contrast to l2arc_add_vdev() we do not have to worry about * l2arc_feed_thread() invalidating previous content when onlining a * cache device. The device parameters (l2ad*) are not cleared when * offlining the device and writing new buffers will not invalidate * all previous content. In worst case only buffers that have not had * their log block written to the device will be lost. * When onlining the cache device (ie offline->online without exporting * the pool in between) this happens: * vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev() * | | * vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE * During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild * is set to B_TRUE we might write additional buffers to the device. */ l2arc_rebuild_dev(dev, reopen); } /* * Remove a vdev from the L2ARC. */ void l2arc_remove_vdev(vdev_t *vd) { l2arc_dev_t *remdev = NULL; /* * Find the device by vdev */ remdev = l2arc_vdev_get(vd); ASSERT3P(remdev, !=, NULL); /* * Cancel any ongoing or scheduled rebuild. */ mutex_enter(&l2arc_rebuild_thr_lock); if (remdev->l2ad_rebuild_began == B_TRUE) { remdev->l2ad_rebuild_cancel = B_TRUE; while (remdev->l2ad_rebuild == B_TRUE) cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock); } mutex_exit(&l2arc_rebuild_thr_lock); /* * Remove device from global list */ mutex_enter(&l2arc_dev_mtx); list_remove(l2arc_dev_list, remdev); l2arc_dev_last = NULL; /* may have been invalidated */ atomic_dec_64(&l2arc_ndev); mutex_exit(&l2arc_dev_mtx); /* * Clear all buflists and ARC references. L2ARC device flush. */ l2arc_evict(remdev, 0, B_TRUE); list_destroy(&remdev->l2ad_buflist); ASSERT(list_is_empty(&remdev->l2ad_lbptr_list)); list_destroy(&remdev->l2ad_lbptr_list); mutex_destroy(&remdev->l2ad_mtx); zfs_refcount_destroy(&remdev->l2ad_alloc); zfs_refcount_destroy(&remdev->l2ad_lb_asize); zfs_refcount_destroy(&remdev->l2ad_lb_count); kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize); vmem_free(remdev, sizeof (l2arc_dev_t)); } void l2arc_init(void) { l2arc_thread_exit = 0; l2arc_ndev = 0; mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL); mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); l2arc_dev_list = &L2ARC_dev_list; l2arc_free_on_write = &L2ARC_free_on_write; list_create(l2arc_dev_list, sizeof (l2arc_dev_t), offsetof(l2arc_dev_t, l2ad_node)); list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), offsetof(l2arc_data_free_t, l2df_list_node)); } void l2arc_fini(void) { mutex_destroy(&l2arc_feed_thr_lock); cv_destroy(&l2arc_feed_thr_cv); mutex_destroy(&l2arc_rebuild_thr_lock); cv_destroy(&l2arc_rebuild_thr_cv); mutex_destroy(&l2arc_dev_mtx); mutex_destroy(&l2arc_free_on_write_mtx); list_destroy(l2arc_dev_list); list_destroy(l2arc_free_on_write); } void l2arc_start(void) { if (!(spa_mode_global & SPA_MODE_WRITE)) return; (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, TS_RUN, defclsyspri); } void l2arc_stop(void) { if (!(spa_mode_global & SPA_MODE_WRITE)) return; mutex_enter(&l2arc_feed_thr_lock); cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ l2arc_thread_exit = 1; while (l2arc_thread_exit != 0) cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); mutex_exit(&l2arc_feed_thr_lock); } /* * Punches out rebuild threads for the L2ARC devices in a spa. This should * be called after pool import from the spa async thread, since starting * these threads directly from spa_import() will make them part of the * "zpool import" context and delay process exit (and thus pool import). */ void l2arc_spa_rebuild_start(spa_t *spa) { ASSERT(MUTEX_HELD(&spa_namespace_lock)); /* * Locate the spa's l2arc devices and kick off rebuild threads. */ for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { l2arc_dev_t *dev = l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]); if (dev == NULL) { /* Don't attempt a rebuild if the vdev is UNAVAIL */ continue; } mutex_enter(&l2arc_rebuild_thr_lock); if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) { dev->l2ad_rebuild_began = B_TRUE; (void) thread_create(NULL, 0, l2arc_dev_rebuild_thread, dev, 0, &p0, TS_RUN, minclsyspri); } mutex_exit(&l2arc_rebuild_thr_lock); } } /* * Main entry point for L2ARC rebuilding. */ static void l2arc_dev_rebuild_thread(void *arg) { l2arc_dev_t *dev = arg; VERIFY(!dev->l2ad_rebuild_cancel); VERIFY(dev->l2ad_rebuild); (void) l2arc_rebuild(dev); mutex_enter(&l2arc_rebuild_thr_lock); dev->l2ad_rebuild_began = B_FALSE; dev->l2ad_rebuild = B_FALSE; mutex_exit(&l2arc_rebuild_thr_lock); thread_exit(); } /* * This function implements the actual L2ARC metadata rebuild. It: * starts reading the log block chain and restores each block's contents * to memory (reconstructing arc_buf_hdr_t's). * * Operation stops under any of the following conditions: * * 1) We reach the end of the log block chain. * 2) We encounter *any* error condition (cksum errors, io errors) */ static int l2arc_rebuild(l2arc_dev_t *dev) { vdev_t *vd = dev->l2ad_vdev; spa_t *spa = vd->vdev_spa; int err = 0; l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; l2arc_log_blk_phys_t *this_lb, *next_lb; zio_t *this_io = NULL, *next_io = NULL; l2arc_log_blkptr_t lbps[2]; l2arc_lb_ptr_buf_t *lb_ptr_buf; boolean_t lock_held; this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP); next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP); /* * We prevent device removal while issuing reads to the device, * then during the rebuilding phases we drop this lock again so * that a spa_unload or device remove can be initiated - this is * safe, because the spa will signal us to stop before removing * our device and wait for us to stop. */ spa_config_enter(spa, SCL_L2ARC, vd, RW_READER); lock_held = B_TRUE; /* * Retrieve the persistent L2ARC device state. * L2BLK_GET_PSIZE returns aligned size for log blocks. */ dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start); dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr + L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop), dev->l2ad_start); dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST); vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time; vd->vdev_trim_state = l2dhdr->dh_trim_state; /* * In case the zfs module parameter l2arc_rebuild_enabled is false * we do not start the rebuild process. */ if (!l2arc_rebuild_enabled) goto out; /* Prepare the rebuild process */ bcopy(l2dhdr->dh_start_lbps, lbps, sizeof (lbps)); /* Start the rebuild process */ for (;;) { if (!l2arc_log_blkptr_valid(dev, &lbps[0])) break; if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1], this_lb, next_lb, this_io, &next_io)) != 0) goto out; /* * Our memory pressure valve. If the system is running low * on memory, rather than swamping memory with new ARC buf * hdrs, we opt not to rebuild the L2ARC. At this point, * however, we have already set up our L2ARC dev to chain in * new metadata log blocks, so the user may choose to offline/ * online the L2ARC dev at a later time (or re-import the pool) * to reconstruct it (when there's less memory pressure). */ if (l2arc_hdr_limit_reached()) { ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem); cmn_err(CE_NOTE, "System running low on memory, " "aborting L2ARC rebuild."); err = SET_ERROR(ENOMEM); goto out; } spa_config_exit(spa, SCL_L2ARC, vd); lock_held = B_FALSE; /* * Now that we know that the next_lb checks out alright, we * can start reconstruction from this log block. * L2BLK_GET_PSIZE returns aligned size for log blocks. */ uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop); l2arc_log_blk_restore(dev, this_lb, asize); /* * log block restored, include its pointer in the list of * pointers to log blocks present in the L2ARC device. */ lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP); lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP); bcopy(&lbps[0], lb_ptr_buf->lb_ptr, sizeof (l2arc_log_blkptr_t)); mutex_enter(&dev->l2ad_mtx); list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf); ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize); ARCSTAT_BUMP(arcstat_l2_log_blk_count); zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf); mutex_exit(&dev->l2ad_mtx); vdev_space_update(vd, asize, 0, 0); /* * Protection against loops of log blocks: * * l2ad_hand l2ad_evict * V V * l2ad_start |=======================================| l2ad_end * -----|||----|||---|||----||| * (3) (2) (1) (0) * ---|||---|||----|||---||| * (7) (6) (5) (4) * * In this situation the pointer of log block (4) passes * l2arc_log_blkptr_valid() but the log block should not be * restored as it is overwritten by the payload of log block * (0). Only log blocks (0)-(3) should be restored. We check * whether l2ad_evict lies in between the payload starting * offset of the next log block (lbps[1].lbp_payload_start) * and the payload starting offset of the present log block * (lbps[0].lbp_payload_start). If true and this isn't the * first pass, we are looping from the beginning and we should * stop. */ if (l2arc_range_check_overlap(lbps[1].lbp_payload_start, lbps[0].lbp_payload_start, dev->l2ad_evict) && !dev->l2ad_first) goto out; cond_resched(); for (;;) { mutex_enter(&l2arc_rebuild_thr_lock); if (dev->l2ad_rebuild_cancel) { dev->l2ad_rebuild = B_FALSE; cv_signal(&l2arc_rebuild_thr_cv); mutex_exit(&l2arc_rebuild_thr_lock); err = SET_ERROR(ECANCELED); goto out; } mutex_exit(&l2arc_rebuild_thr_lock); if (spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) { lock_held = B_TRUE; break; } /* * L2ARC config lock held by somebody in writer, * possibly due to them trying to remove us. They'll * likely to want us to shut down, so after a little * delay, we check l2ad_rebuild_cancel and retry * the lock again. */ delay(1); } /* * Continue with the next log block. */ lbps[0] = lbps[1]; lbps[1] = this_lb->lb_prev_lbp; PTR_SWAP(this_lb, next_lb); this_io = next_io; next_io = NULL; } if (this_io != NULL) l2arc_log_blk_fetch_abort(this_io); out: if (next_io != NULL) l2arc_log_blk_fetch_abort(next_io); vmem_free(this_lb, sizeof (*this_lb)); vmem_free(next_lb, sizeof (*next_lb)); if (!l2arc_rebuild_enabled) { spa_history_log_internal(spa, "L2ARC rebuild", NULL, "disabled"); } else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) { ARCSTAT_BUMP(arcstat_l2_rebuild_success); spa_history_log_internal(spa, "L2ARC rebuild", NULL, "successful, restored %llu blocks", (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); } else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) { /* * No error but also nothing restored, meaning the lbps array * in the device header points to invalid/non-present log * blocks. Reset the header. */ spa_history_log_internal(spa, "L2ARC rebuild", NULL, "no valid log blocks"); bzero(l2dhdr, dev->l2ad_dev_hdr_asize); l2arc_dev_hdr_update(dev); } else if (err == ECANCELED) { /* * In case the rebuild was canceled do not log to spa history * log as the pool may be in the process of being removed. */ zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks", (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); } else if (err != 0) { spa_history_log_internal(spa, "L2ARC rebuild", NULL, "aborted, restored %llu blocks", (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); } if (lock_held) spa_config_exit(spa, SCL_L2ARC, vd); return (err); } /* * Attempts to read the device header on the provided L2ARC device and writes * it to `hdr'. On success, this function returns 0, otherwise the appropriate * error code is returned. */ static int l2arc_dev_hdr_read(l2arc_dev_t *dev) { int err; uint64_t guid; l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; abd_t *abd; guid = spa_guid(dev->l2ad_vdev->vdev_spa); abd = abd_get_from_buf(l2dhdr, l2dhdr_asize); err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev, VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_SPECULATIVE, B_FALSE)); abd_free(abd); if (err != 0) { ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors); zfs_dbgmsg("L2ARC IO error (%d) while reading device header, " "vdev guid: %llu", err, (u_longlong_t)dev->l2ad_vdev->vdev_guid); return (err); } if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC)) byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr)); if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC || l2dhdr->dh_spa_guid != guid || l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid || l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION || l2dhdr->dh_log_entries != dev->l2ad_log_entries || l2dhdr->dh_end != dev->l2ad_end || !l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end, l2dhdr->dh_evict) || (l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE && l2arc_trim_ahead > 0)) { /* * Attempt to rebuild a device containing no actual dev hdr * or containing a header from some other pool or from another * version of persistent L2ARC. */ ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported); return (SET_ERROR(ENOTSUP)); } return (0); } /* * Reads L2ARC log blocks from storage and validates their contents. * * This function implements a simple fetcher to make sure that while * we're processing one buffer the L2ARC is already fetching the next * one in the chain. * * The arguments this_lp and next_lp point to the current and next log block * address in the block chain. Similarly, this_lb and next_lb hold the * l2arc_log_blk_phys_t's of the current and next L2ARC blk. * * The `this_io' and `next_io' arguments are used for block fetching. * When issuing the first blk IO during rebuild, you should pass NULL for * `this_io'. This function will then issue a sync IO to read the block and * also issue an async IO to fetch the next block in the block chain. The * fetched IO is returned in `next_io'. On subsequent calls to this * function, pass the value returned in `next_io' from the previous call * as `this_io' and a fresh `next_io' pointer to hold the next fetch IO. * Prior to the call, you should initialize your `next_io' pointer to be * NULL. If no fetch IO was issued, the pointer is left set at NULL. * * On success, this function returns 0, otherwise it returns an appropriate * error code. On error the fetching IO is aborted and cleared before * returning from this function. Therefore, if we return `success', the * caller can assume that we have taken care of cleanup of fetch IOs. */ static int l2arc_log_blk_read(l2arc_dev_t *dev, const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp, l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb, zio_t *this_io, zio_t **next_io) { int err = 0; zio_cksum_t cksum; abd_t *abd = NULL; uint64_t asize; ASSERT(this_lbp != NULL && next_lbp != NULL); ASSERT(this_lb != NULL && next_lb != NULL); ASSERT(next_io != NULL && *next_io == NULL); ASSERT(l2arc_log_blkptr_valid(dev, this_lbp)); /* * Check to see if we have issued the IO for this log block in a * previous run. If not, this is the first call, so issue it now. */ if (this_io == NULL) { this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp, this_lb); } /* * Peek to see if we can start issuing the next IO immediately. */ if (l2arc_log_blkptr_valid(dev, next_lbp)) { /* * Start issuing IO for the next log block early - this * should help keep the L2ARC device busy while we * decompress and restore this log block. */ *next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp, next_lb); } /* Wait for the IO to read this log block to complete */ if ((err = zio_wait(this_io)) != 0) { ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors); zfs_dbgmsg("L2ARC IO error (%d) while reading log block, " "offset: %llu, vdev guid: %llu", err, (u_longlong_t)this_lbp->lbp_daddr, (u_longlong_t)dev->l2ad_vdev->vdev_guid); goto cleanup; } /* * Make sure the buffer checks out. * L2BLK_GET_PSIZE returns aligned size for log blocks. */ asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop); fletcher_4_native(this_lb, asize, NULL, &cksum); if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) { ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors); zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, " "vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu", (u_longlong_t)this_lbp->lbp_daddr, (u_longlong_t)dev->l2ad_vdev->vdev_guid, (u_longlong_t)dev->l2ad_hand, (u_longlong_t)dev->l2ad_evict); err = SET_ERROR(ECKSUM); goto cleanup; } /* Now we can take our time decoding this buffer */ switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) { case ZIO_COMPRESS_OFF: break; case ZIO_COMPRESS_LZ4: abd = abd_alloc_for_io(asize, B_TRUE); abd_copy_from_buf_off(abd, this_lb, 0, asize); if ((err = zio_decompress_data( L2BLK_GET_COMPRESS((this_lbp)->lbp_prop), abd, this_lb, asize, sizeof (*this_lb), NULL)) != 0) { err = SET_ERROR(EINVAL); goto cleanup; } break; default: err = SET_ERROR(EINVAL); goto cleanup; } if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC)) byteswap_uint64_array(this_lb, sizeof (*this_lb)); if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) { err = SET_ERROR(EINVAL); goto cleanup; } cleanup: /* Abort an in-flight fetch I/O in case of error */ if (err != 0 && *next_io != NULL) { l2arc_log_blk_fetch_abort(*next_io); *next_io = NULL; } if (abd != NULL) abd_free(abd); return (err); } /* * Restores the payload of a log block to ARC. This creates empty ARC hdr * entries which only contain an l2arc hdr, essentially restoring the * buffers to their L2ARC evicted state. This function also updates space * usage on the L2ARC vdev to make sure it tracks restored buffers. */ static void l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb, uint64_t lb_asize) { uint64_t size = 0, asize = 0; uint64_t log_entries = dev->l2ad_log_entries; /* * Usually arc_adapt() is called only for data, not headers, but * since we may allocate significant amount of memory here, let ARC * grow its arc_c. */ arc_adapt(log_entries * HDR_L2ONLY_SIZE, arc_l2c_only); for (int i = log_entries - 1; i >= 0; i--) { /* * Restore goes in the reverse temporal direction to preserve * correct temporal ordering of buffers in the l2ad_buflist. * l2arc_hdr_restore also does a list_insert_tail instead of * list_insert_head on the l2ad_buflist: * * LIST l2ad_buflist LIST * HEAD <------ (time) ------ TAIL * direction +-----+-----+-----+-----+-----+ direction * of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild * fill +-----+-----+-----+-----+-----+ * ^ ^ * | | * | | * l2arc_feed_thread l2arc_rebuild * will place new bufs here restores bufs here * * During l2arc_rebuild() the device is not used by * l2arc_feed_thread() as dev->l2ad_rebuild is set to true. */ size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop); asize += vdev_psize_to_asize(dev->l2ad_vdev, L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop)); l2arc_hdr_restore(&lb->lb_entries[i], dev); } /* * Record rebuild stats: * size Logical size of restored buffers in the L2ARC * asize Aligned size of restored buffers in the L2ARC */ ARCSTAT_INCR(arcstat_l2_rebuild_size, size); ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize); ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries); ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize); ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize); ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks); } /* * Restores a single ARC buf hdr from a log entry. The ARC buffer is put * into a state indicating that it has been evicted to L2ARC. */ static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev) { arc_buf_hdr_t *hdr, *exists; kmutex_t *hash_lock; arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop); uint64_t asize; /* * Do all the allocation before grabbing any locks, this lets us * sleep if memory is full and we don't have to deal with failed * allocations. */ hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type, dev, le->le_dva, le->le_daddr, L2BLK_GET_PSIZE((le)->le_prop), le->le_birth, L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel, L2BLK_GET_PROTECTED((le)->le_prop), L2BLK_GET_PREFETCH((le)->le_prop), L2BLK_GET_STATE((le)->le_prop)); asize = vdev_psize_to_asize(dev->l2ad_vdev, L2BLK_GET_PSIZE((le)->le_prop)); /* * vdev_space_update() has to be called before arc_hdr_destroy() to * avoid underflow since the latter also calls vdev_space_update(). */ l2arc_hdr_arcstats_increment(hdr); vdev_space_update(dev->l2ad_vdev, asize, 0, 0); mutex_enter(&dev->l2ad_mtx); list_insert_tail(&dev->l2ad_buflist, hdr); (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); mutex_exit(&dev->l2ad_mtx); exists = buf_hash_insert(hdr, &hash_lock); if (exists) { /* Buffer was already cached, no need to restore it. */ arc_hdr_destroy(hdr); /* * If the buffer is already cached, check whether it has * L2ARC metadata. If not, enter them and update the flag. * This is important is case of onlining a cache device, since * we previously evicted all L2ARC metadata from ARC. */ if (!HDR_HAS_L2HDR(exists)) { arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR); exists->b_l2hdr.b_dev = dev; exists->b_l2hdr.b_daddr = le->le_daddr; exists->b_l2hdr.b_arcs_state = L2BLK_GET_STATE((le)->le_prop); mutex_enter(&dev->l2ad_mtx); list_insert_tail(&dev->l2ad_buflist, exists); (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(exists), exists); mutex_exit(&dev->l2ad_mtx); l2arc_hdr_arcstats_increment(exists); vdev_space_update(dev->l2ad_vdev, asize, 0, 0); } ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached); } mutex_exit(hash_lock); } /* * Starts an asynchronous read IO to read a log block. This is used in log * block reconstruction to start reading the next block before we are done * decoding and reconstructing the current block, to keep the l2arc device * nice and hot with read IO to process. * The returned zio will contain a newly allocated memory buffers for the IO * data which should then be freed by the caller once the zio is no longer * needed (i.e. due to it having completed). If you wish to abort this * zio, you should do so using l2arc_log_blk_fetch_abort, which takes * care of disposing of the allocated buffers correctly. */ static zio_t * l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp, l2arc_log_blk_phys_t *lb) { uint32_t asize; zio_t *pio; l2arc_read_callback_t *cb; /* L2BLK_GET_PSIZE returns aligned size for log blocks */ asize = L2BLK_GET_PSIZE((lbp)->lbp_prop); ASSERT(asize <= sizeof (l2arc_log_blk_phys_t)); cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP); cb->l2rcb_abd = abd_get_from_buf(lb, asize); pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY); (void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize, cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE)); return (pio); } /* * Aborts a zio returned from l2arc_log_blk_fetch and frees the data * buffers allocated for it. */ static void l2arc_log_blk_fetch_abort(zio_t *zio) { (void) zio_wait(zio); } /* * Creates a zio to update the device header on an l2arc device. */ void l2arc_dev_hdr_update(l2arc_dev_t *dev) { l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; abd_t *abd; int err; VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER)); l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC; l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION; l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa); l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid; l2dhdr->dh_log_entries = dev->l2ad_log_entries; l2dhdr->dh_evict = dev->l2ad_evict; l2dhdr->dh_start = dev->l2ad_start; l2dhdr->dh_end = dev->l2ad_end; l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize); l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count); l2dhdr->dh_flags = 0; l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time; l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state; if (dev->l2ad_first) l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST; abd = abd_get_from_buf(l2dhdr, l2dhdr_asize); err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev, VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE)); abd_free(abd); if (err != 0) { zfs_dbgmsg("L2ARC IO error (%d) while writing device header, " "vdev guid: %llu", err, (u_longlong_t)dev->l2ad_vdev->vdev_guid); } } /* * Commits a log block to the L2ARC device. This routine is invoked from * l2arc_write_buffers when the log block fills up. * This function allocates some memory to temporarily hold the serialized * buffer to be written. This is then released in l2arc_write_done. */ static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb) { l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk; l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; uint64_t psize, asize; zio_t *wzio; l2arc_lb_abd_buf_t *abd_buf; uint8_t *tmpbuf; l2arc_lb_ptr_buf_t *lb_ptr_buf; VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries); tmpbuf = zio_buf_alloc(sizeof (*lb)); abd_buf = zio_buf_alloc(sizeof (*abd_buf)); abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb)); lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP); lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP); /* link the buffer into the block chain */ lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1]; lb->lb_magic = L2ARC_LOG_BLK_MAGIC; /* * l2arc_log_blk_commit() may be called multiple times during a single * l2arc_write_buffers() call. Save the allocated abd buffers in a list * so we can free them in l2arc_write_done() later on. */ list_insert_tail(&cb->l2wcb_abd_list, abd_buf); /* try to compress the buffer */ psize = zio_compress_data(ZIO_COMPRESS_LZ4, abd_buf->abd, tmpbuf, sizeof (*lb), 0); /* a log block is never entirely zero */ ASSERT(psize != 0); asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); ASSERT(asize <= sizeof (*lb)); /* * Update the start log block pointer in the device header to point * to the log block we're about to write. */ l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0]; l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand; l2dhdr->dh_start_lbps[0].lbp_payload_asize = dev->l2ad_log_blk_payload_asize; l2dhdr->dh_start_lbps[0].lbp_payload_start = dev->l2ad_log_blk_payload_start; _NOTE(CONSTCOND) L2BLK_SET_LSIZE( (&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb)); L2BLK_SET_PSIZE( (&l2dhdr->dh_start_lbps[0])->lbp_prop, asize); L2BLK_SET_CHECKSUM( (&l2dhdr->dh_start_lbps[0])->lbp_prop, ZIO_CHECKSUM_FLETCHER_4); if (asize < sizeof (*lb)) { /* compression succeeded */ bzero(tmpbuf + psize, asize - psize); L2BLK_SET_COMPRESS( (&l2dhdr->dh_start_lbps[0])->lbp_prop, ZIO_COMPRESS_LZ4); } else { /* compression failed */ bcopy(lb, tmpbuf, sizeof (*lb)); L2BLK_SET_COMPRESS( (&l2dhdr->dh_start_lbps[0])->lbp_prop, ZIO_COMPRESS_OFF); } /* checksum what we're about to write */ fletcher_4_native(tmpbuf, asize, NULL, &l2dhdr->dh_start_lbps[0].lbp_cksum); abd_free(abd_buf->abd); /* perform the write itself */ abd_buf->abd = abd_get_from_buf(tmpbuf, sizeof (*lb)); abd_take_ownership_of_buf(abd_buf->abd, B_TRUE); wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand, asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE); DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio); (void) zio_nowait(wzio); dev->l2ad_hand += asize; /* * Include the committed log block's pointer in the list of pointers * to log blocks present in the L2ARC device. */ bcopy(&l2dhdr->dh_start_lbps[0], lb_ptr_buf->lb_ptr, sizeof (l2arc_log_blkptr_t)); mutex_enter(&dev->l2ad_mtx); list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf); ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize); ARCSTAT_BUMP(arcstat_l2_log_blk_count); zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf); mutex_exit(&dev->l2ad_mtx); vdev_space_update(dev->l2ad_vdev, asize, 0, 0); /* bump the kstats */ ARCSTAT_INCR(arcstat_l2_write_bytes, asize); ARCSTAT_BUMP(arcstat_l2_log_blk_writes); ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize); ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, dev->l2ad_log_blk_payload_asize / asize); /* start a new log block */ dev->l2ad_log_ent_idx = 0; dev->l2ad_log_blk_payload_asize = 0; dev->l2ad_log_blk_payload_start = 0; } /* * Validates an L2ARC log block address to make sure that it can be read * from the provided L2ARC device. */ boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp) { /* L2BLK_GET_PSIZE returns aligned size for log blocks */ uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop); uint64_t end = lbp->lbp_daddr + asize - 1; uint64_t start = lbp->lbp_payload_start; boolean_t evicted = B_FALSE; /* * A log block is valid if all of the following conditions are true: * - it fits entirely (including its payload) between l2ad_start and * l2ad_end * - it has a valid size * - neither the log block itself nor part of its payload was evicted * by l2arc_evict(): * * l2ad_hand l2ad_evict * | | lbp_daddr * | start | | end * | | | | | * V V V V V * l2ad_start ============================================ l2ad_end * --------------------------|||| * ^ ^ * | log block * payload */ evicted = l2arc_range_check_overlap(start, end, dev->l2ad_hand) || l2arc_range_check_overlap(start, end, dev->l2ad_evict) || l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) || l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end); return (start >= dev->l2ad_start && end <= dev->l2ad_end && asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) && (!evicted || dev->l2ad_first)); } /* * Inserts ARC buffer header `hdr' into the current L2ARC log block on * the device. The buffer being inserted must be present in L2ARC. * Returns B_TRUE if the L2ARC log block is full and needs to be committed * to L2ARC, or B_FALSE if it still has room for more ARC buffers. */ static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr) { l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk; l2arc_log_ent_phys_t *le; if (dev->l2ad_log_entries == 0) return (B_FALSE); int index = dev->l2ad_log_ent_idx++; ASSERT3S(index, <, dev->l2ad_log_entries); ASSERT(HDR_HAS_L2HDR(hdr)); le = &lb->lb_entries[index]; bzero(le, sizeof (*le)); le->le_dva = hdr->b_dva; le->le_birth = hdr->b_birth; le->le_daddr = hdr->b_l2hdr.b_daddr; if (index == 0) dev->l2ad_log_blk_payload_start = le->le_daddr; L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr)); L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr)); L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr)); le->le_complevel = hdr->b_complevel; L2BLK_SET_TYPE((le)->le_prop, hdr->b_type); L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr))); L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr))); L2BLK_SET_STATE((le)->le_prop, hdr->b_l1hdr.b_state->arcs_state); dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev, HDR_GET_PSIZE(hdr)); return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries); } /* * Checks whether a given L2ARC device address sits in a time-sequential * range. The trick here is that the L2ARC is a rotary buffer, so we can't * just do a range comparison, we need to handle the situation in which the * range wraps around the end of the L2ARC device. Arguments: * bottom -- Lower end of the range to check (written to earlier). * top -- Upper end of the range to check (written to later). * check -- The address for which we want to determine if it sits in * between the top and bottom. * * The 3-way conditional below represents the following cases: * * bottom < top : Sequentially ordered case: * <check>--------+-------------------+ * | (overlap here?) | * L2ARC dev V V * |---------------<bottom>============<top>--------------| * * bottom > top: Looped-around case: * <check>--------+------------------+ * | (overlap here?) | * L2ARC dev V V * |===============<top>---------------<bottom>===========| * ^ ^ * | (or here?) | * +---------------+---------<check> * * top == bottom : Just a single address comparison. */ boolean_t l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check) { if (bottom < top) return (bottom <= check && check <= top); else if (bottom > top) return (check <= top || bottom <= check); else return (check == top); } EXPORT_SYMBOL(arc_buf_size); EXPORT_SYMBOL(arc_write); EXPORT_SYMBOL(arc_read); EXPORT_SYMBOL(arc_buf_info); EXPORT_SYMBOL(arc_getbuf_func); EXPORT_SYMBOL(arc_add_prune_callback); EXPORT_SYMBOL(arc_remove_prune_callback); /* BEGIN CSTYLED */ ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min, param_get_long, ZMOD_RW, "Min arc size"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max, param_get_long, ZMOD_RW, "Max arc size"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long, param_get_long, ZMOD_RW, "Metadata limit for arc size"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent, param_set_arc_long, param_get_long, ZMOD_RW, "Percent of arc size for arc meta limit"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long, param_get_long, ZMOD_RW, "Min arc metadata"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW, "Meta objects to scan for prune"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, INT, ZMOD_RW, "Limit number of restarts in arc_evict_meta"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, INT, ZMOD_RW, "Meta reclaim strategy"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int, param_get_int, ZMOD_RW, "Seconds before growing arc size"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW, "Disable arc_p adapt dampener"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int, param_get_int, ZMOD_RW, "log2(fraction of arc to reclaim)"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW, "Percent of pagecache to reclaim arc to"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int, param_get_int, ZMOD_RW, "arc_c shift to calc min/max arc_p"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, INT, ZMOD_RD, "Target average block size"); ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW, "Disable compressed arc buffers"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int, param_get_int, ZMOD_RW, "Min life of prefetch block in ms"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms, param_set_arc_int, param_get_int, ZMOD_RW, "Min life of prescient prefetched block in ms"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW, "Max write bytes per interval"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, ULONG, ZMOD_RW, "Extra write bytes during device warmup"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, ULONG, ZMOD_RW, "Number of max device writes to precache"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, ULONG, ZMOD_RW, "Compressed l2arc_headroom multiplier"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, ULONG, ZMOD_RW, "TRIM ahead L2ARC write size multiplier"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, ULONG, ZMOD_RW, "Seconds between L2ARC writing"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, ULONG, ZMOD_RW, "Min feed interval in milliseconds"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW, "Skip caching prefetched buffers"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW, "Turbo L2ARC warmup"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW, "No reads during writes"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW, "Percent of ARC size allowed for L2ARC-only headers"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW, "Rebuild the L2ARC when importing a pool"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, ULONG, ZMOD_RW, "Min size in bytes to write rebuild log blocks in L2ARC"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW, "Cache only MFU data from ARC into L2ARC"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int, param_get_int, ZMOD_RW, "System free memory I/O throttle in bytes"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long, param_get_long, ZMOD_RW, "System free memory target size in bytes"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long, param_get_long, ZMOD_RW, "Minimum bytes of dnodes in arc"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent, param_set_arc_long, param_get_long, ZMOD_RW, "Percent of ARC meta buffers for dnodes"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW, "Percentage of excess dnodes to try to unpin"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, INT, ZMOD_RW, "When full, ARC allocation waits for eviction of this % of alloc size"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, INT, ZMOD_RW, "The number of headers to evict per sublist before moving to the next"); /* END CSTYLED */
utf-8
1
CDDL-1.0
2000, 2002, 2004-2007, 2009, 2010, Oracle and/or its affiliates. 2013-2014, Lawrence Livermore National Security, LLC
lua-cqueues-20200726/src/cqueues.h
/* ========================================================================== * cqueues.h - Lua Continuation Queues * -------------------------------------------------------------------------- * Copyright (c) 2012, 2014, 2015 William Ahern * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * ========================================================================== */ #ifndef CQUEUES_H #define CQUEUES_H #include <signal.h> /* sigset_t */ #include <errno.h> /* EOVERFLOW */ #include <assert.h> /* static_assert */ #include <sys/param.h> /* __NetBSD_Version__ OpenBSD __FreeBSD__version */ #include <sys/types.h> #include <sys/socket.h> /* socketpair(2) */ #include <unistd.h> /* close(2) pipe(2) */ #include <fcntl.h> /* F_GETFL F_SETFD F_SETFL FD_CLOEXEC O_NONBLOCK O_CLOEXEC fcntl(2) */ #include <lua.h> #include <lualib.h> #include <lauxlib.h> #include "../vendor/compat53/c-api/compat-5.3.h" /* * F E A T U R E / E N V I R O N M E N T M A C R O S * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifndef __has_feature #define __has_feature(...) 0 #endif #ifndef __has_extension #define __has_extension(...) 0 #endif #ifndef __NetBSD_Prereq__ #define __NetBSD_Prereq__(M, m, p) 0 #endif #define GNUC_PREREQ(M, m) (defined __GNUC__ && ((__GNUC__ > M) || (__GNUC__ == M && __GNUC_MINOR__ >= m))) #define NETBSD_PREREQ(M, m) __NetBSD_Prereq__(M, m, 0) #define FREEBSD_PREREQ(M, m) (defined __FreeBSD_version && __FreeBSD_version >= ((M) * 100000) + ((m) * 1000)) #if defined __GLIBC_PREREQ #define GLIBC_PREREQ(M, m) (defined __GLIBC__ && __GLIBC_PREREQ(M, m) && !__UCLIBC__) #else #define GLIBC_PREREQ(M, m) 0 #endif #define UCLIBC_PREREQ(M, m, p) (defined __UCLIBC__ && (__UCLIBC_MAJOR__ > M || (__UCLIBC_MAJOR__ == M && __UCLIBC_MINOR__ > m) || (__UCLIBC_MAJOR__ == M && __UCLIBC_MINOR__ == m && __UCLIBC_SUBLEVEL__ >= p))) #ifndef ENABLE_EPOLL #define ENABLE_EPOLL HAVE_EPOLL_CREATE #endif #ifndef ENABLE_PORTS #define ENABLE_PORTS HAVE_PORT_CREATE #endif #ifndef ENABLE_KQUEUE #define ENABLE_KQUEUE HAVE_KQUEUE #endif #if __GNUC__ #define NOTUSED __attribute__((unused)) #define EXTENSION __extension__ #else #define NOTUSED #define EXTENSION #endif #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) || __GNUC__ > 4 || __clang__ #define NOTREACHED __builtin_unreachable() #else #define NOTREACHED (void)0 #endif /* * C L A S S I N T E R F A C E S / R O U T I N E S * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define cqs_index_t int /* for documentation purposes */ #define cqs_nargs_t int /* "" */ #define cqs_error_t int /* "" */ #define cqs_status_t int /* "" */ #define CQS_CQUEUE "Continuation Queue" #define CQS_SOCKET "CQS Socket" #define CQS_SIGNAL "CQS Signal" #define CQS_THREAD "CQS Thread" #define CQS_NOTIFY "CQS Notify" #define CQS_CONDITION "CQS Condition" #ifndef CQS_USE_47BIT_LIGHTUSERDATA_HACK /* LuaJIT only supports pointers with the low 47 bits set */ #if defined(LUA_JITLIBNAME) && (defined(_LP64) || defined(_LLP64) || defined(__arch64__) || defined (__arm64__) || defined (__aarch64__) || defined(_WIN64)) #define CQS_USE_47BIT_LIGHTUSERDATA_HACK 1 #else #define CQS_USE_47BIT_LIGHTUSERDATA_HACK 0 #endif #endif #if CQS_USE_47BIT_LIGHTUSERDATA_HACK #define CQS_UNIQUE_LIGHTUSERDATA_MASK(p) ((void *)((intptr_t)(p) & ((1UL<<47)-1))) #else #define CQS_UNIQUE_LIGHTUSERDATA_MASK(p) ((void *)(p)) #endif #define CQUEUE__POLL CQS_UNIQUE_LIGHTUSERDATA_MASK(&cqueue__poll) extern const char *cqueue__poll; // signals multilevel yield cqs_nargs_t luaopen__cqueues(lua_State *); cqs_nargs_t luaopen__cqueues_errno(lua_State *); cqs_nargs_t luaopen__cqueues_socket(lua_State *); cqs_nargs_t luaopen__cqueues_signal(lua_State *); cqs_nargs_t luaopen__cqueues_thread(lua_State *); cqs_nargs_t luaopen__cqueues_notify(lua_State *); cqs_nargs_t luaopen__cqueues_condition(lua_State *); cqs_nargs_t luaopen__cqueues_dns_record(lua_State *); cqs_nargs_t luaopen__cqueues_dns_packet(lua_State *); cqs_nargs_t luaopen__cqueues_dns_config(lua_State *); cqs_nargs_t luaopen__cqueues_dns_hosts(lua_State *); cqs_nargs_t luaopen__cqueues_dns_hints(lua_State *); cqs_nargs_t luaopen__cqueues_dns_resolver(lua_State *); cqs_nargs_t luaopen__cqueues_dns(lua_State *); void cqs_cancelfd(lua_State *, int); struct so_options; cqs_error_t cqs_socket_fdopen(lua_State *, int, const struct so_options *); int cqs_socket_pollfd(lua_State *, int); int cqs_socket_events(lua_State *, int); double cqs_socket_timeout(lua_State *, int); static void cqs_requiref(lua_State *L, const char *modname, lua_CFunction openf, int glb) { luaL_getsubtable(L, LUA_REGISTRYINDEX, "_LOADED"); lua_getfield(L, -1, modname); lua_remove(L, -2); if (lua_isnil(L, -1)) { lua_pop(L, 1); luaL_requiref(L, modname, openf, glb); } } /* cqs_requiref() */ static void cqs_openlibs(lua_State *L) { int top = lua_gettop(L); cqs_requiref(L, "_cqueues", &luaopen__cqueues, 0); cqs_requiref(L, "_cqueues.errno", &luaopen__cqueues_errno, 0); cqs_requiref(L, "_cqueues.socket", &luaopen__cqueues_socket, 0); cqs_requiref(L, "_cqueues.signal", &luaopen__cqueues_signal, 0); cqs_requiref(L, "_cqueues.thread", &luaopen__cqueues_thread, 0); cqs_requiref(L, "_cqueues.notify", &luaopen__cqueues_notify, 0); #if 0 /* Make optional? */ cqs_requiref(L, "_cqueues.condition", &luaopen__cqueues_condition, 0); cqs_requiref(L, "_cqueues.dns.record", &luaopen__cqueues_dns_record, 0); cqs_requiref(L, "_cqueues.dns.packet", &luaopen__cqueues_dns_packet, 0); cqs_requiref(L, "_cqueues.dns.config", &luaopen__cqueues_dns_config, 0); cqs_requiref(L, "_cqueues.dns.hosts", &luaopen__cqueues_dns_hosts, 0); cqs_requiref(L, "_cqueues.dns.hints", &luaopen__cqueues_dns_hints, 0); cqs_requiref(L, "_cqueues.dns.resolver", &luaopen__cqueues_dns_resolver, 0); cqs_requiref(L, "_cqueues.dns", &luaopen__cqueues_dns, 0); #endif lua_settop(L, top); } /* cqs_openlibs() */ static inline int cqs_interpose(lua_State *L, const char *mt) { lua_settop(L, 2); luaL_getmetatable(L, mt); lua_getfield(L, -1, "__index"); lua_pushvalue(L, 1); /* push method name */ lua_gettable(L, -2); /* push old method */ lua_pushvalue(L, 1); /* push method name */ lua_pushvalue(L, 2); /* push new method */ lua_settable(L, -4); /* replace old method */ return 1; /* return old method */ } /* cqs_interpose() */ static inline void cqs_pushnils(lua_State *L, int n) { int i; luaL_checkstack(L, n, "too many arguments"); for (i = 0; i < n; i++) lua_pushnil(L); } /* cqs_pushnils() */ static inline int cqs_regcount(const luaL_Reg *l) { int i; for (i = 0; l[i].func; i++) ;; return i; } /* cqs_regcount() */ /* create new metatable, capturing upvalues for use by methods and metamethods */ static inline void cqs_newmetatable(lua_State *L, const char *name, const luaL_Reg *methods, const luaL_Reg *metamethods, int nup) { int i; luaL_newmetatable(L, name); for (i = 0; i < nup; i++) /* copy upvalues */ lua_pushvalue(L, -nup - 1); luaL_setfuncs(L, metamethods, nup); lua_createtable(L, 0, cqs_regcount(methods)); for (i = 0; i < nup; i++) /* copy upvalues */ lua_pushvalue(L, -nup - 2); luaL_setfuncs(L, methods, nup); lua_setfield(L, -2, "__index"); for (i = 0; i < nup; i++) /* remove the upvalues */ lua_remove(L, -2); } /* cqs_newmetatable() */ /* * set the n-th upvalue of every lua_CFunction in the table at tindex to the * value at the top of the stack */ static inline void cqs_setfuncsupvalue(lua_State *L, int tindex, int n) { tindex = lua_absindex(L, tindex); lua_pushnil(L); while (lua_next(L, tindex)) { if (lua_iscfunction(L, -1)) { lua_pushvalue(L, -3); lua_setupvalue(L, -2, n); } lua_pop(L, 1); /* pop field value (leaving key) */ } lua_pop(L, 1); /* pop upvalue */ } /* cqs_setfuncsupvalue() */ static inline void cqs_setmetaupvalue(lua_State *L, int tindex, int n) { tindex = lua_absindex(L, tindex); lua_pushvalue(L, -1); cqs_setfuncsupvalue(L, tindex, n); lua_getfield(L, tindex, "__index"); lua_pushvalue(L, -2); cqs_setfuncsupvalue(L, -2, n); lua_pop(L, 1); /* pop __index */ lua_pop(L, 1); /* pop upvalue */ } /* cqs_setmetaupvalue() */ /* test metatable against copy at upvalue */ static inline void *cqs_testudata(lua_State *L, int index, int upvalue) { void *ud = lua_touserdata(L, index); int eq; if (!ud || !lua_getmetatable(L, index)) return NULL; eq = lua_rawequal(L, -1, lua_upvalueindex(upvalue)); lua_pop(L, 1); return (eq)? ud : NULL; } /* cqs_testudata() */ static inline void *cqs_checkudata(lua_State *L, int index, int upvalue, const char *tname) { void *ud; if (!(ud = cqs_testudata(L, index, upvalue))) { index = lua_absindex(L, index); luaL_argerror(L, index, lua_pushfstring(L, "%s expected, got %s", tname, luaL_typename(L, index))); NOTREACHED; } return ud; } /* cqs_checkudata() */ struct cqs_macro { const char *name; int value; }; static inline void cqs_setmacros(lua_State *L, int index, const struct cqs_macro *macro, size_t count, _Bool swap) { index = lua_absindex(L, index); for (unsigned i = 0; i < count; i++) { lua_pushstring(L, macro[i].name); lua_pushinteger(L, macro[i].value); lua_rawset(L, index); } if (!swap) return; for (unsigned i = 0; i < count; i++) { lua_pushinteger(L, macro[i].value); lua_pushstring(L, macro[i].name); lua_rawset(L, index); } } /* cqs_setmacros() */ #if LUA_VERSION_NUM < 503 /* convert value at index to proxytable with value at t[2] */ static inline void cqs__toproxytable(lua_State *L, int index) { index = lua_absindex(L, index); lua_createtable(L, 2, 0); lua_pushlightuserdata(L, (void *)lua_topointer(L, -1)); lua_rawseti(L, -2, 1); /* set t[1] == pointer-to-t */ lua_pushvalue(L, index); lua_rawseti(L, -2, 2); /* set t[2] == value */ lua_replace(L, index); } /* cqs__toproxytable() */ /* check whether value at index is a proxytable */ static inline _Bool cqs__isproxytable(lua_State *L, int index) { const void *tp, *t1p; if (!lua_istable(L, index)) return 0; tp = lua_topointer(L, index); lua_rawgeti(L, index, 1); t1p = lua_topointer(L, -1); lua_pop(L, 1); return tp && tp == t1p; } /* cqs__isproxytable() */ #endif static inline void cqs_setuservalue(lua_State *L, int index) { #if LUA_VERSION_NUM >= 503 lua_setuservalue(L, index); #elif LUA_VERSION_NUM == 502 if (!lua_istable(L, -1) && !lua_isnil(L, -1)) cqs__toproxytable(L, -1); lua_setuservalue(L, index); #else if (!lua_istable(L, -1)) cqs__toproxytable(L, -1); lua_setfenv(L, index); #endif } /* cqs_setuservalue() */ static inline int cqs_getuservalue(lua_State *L, int index) { #if LUA_VERSION_NUM >= 503 return lua_getuservalue(L, index); #else lua_getuservalue(L, index); if (cqs__isproxytable(L, -1)) { lua_rawgeti(L, -1, 2); lua_replace(L, -2); } return lua_type(L, -1); #endif } /* cqs_setuservalue() */ static inline void cqs_closefd(int *fd) { if (*fd != -1) { #if __APPLE__ /* Do we need bother with close$NOCANCEL$UNIX2003? */ extern int close$NOCANCEL(int); close$NOCANCEL(*fd); #else close(*fd); #endif *fd = -1; } } /* cqs_closefd() */ #if !defined O_CLOEXEC #if __NetBSD__ /* bad hack for NetBSD < 6.0 until we refactor flags code */ #define O_CLOEXEC 0x00400000 #endif #endif static inline int cqs_setfd(int fd, int flags) { if (flags & O_NONBLOCK) { int oflags = fcntl(fd, F_GETFL); if (-1 == oflags || -1 == fcntl(fd, F_SETFL, oflags|O_NONBLOCK)) return errno; } if (flags & O_CLOEXEC) { if (-1 == fcntl(fd, F_SETFD, FD_CLOEXEC)) return errno; } return 0; } /* cqs_setfd() */ static inline int cqs_pipe(int fd[2], int flags) { #if HAVE_PIPE2 if (0 != pipe2(fd, flags)) return errno; return 0; #else int error; if (0 != pipe(fd)) return errno; if ((error = cqs_setfd(fd[0], flags)) || (error = cqs_setfd(fd[1], flags))) return error; return 0; #endif } /* cqs_pipe() */ static inline int cqs_socketpair(int family, int type, int proto, int fd[2], int flags) { #if defined SOCK_NONBLOCK && defined SOCK_CLOEXEC if (flags & O_NONBLOCK) type |= SOCK_NONBLOCK; if (flags & O_CLOEXEC) type |= SOCK_CLOEXEC; if (0 != socketpair(family, type, proto, fd)) return errno; return 0; #else int error; if (0 != socketpair(family, type, proto, fd)) return errno; if ((error = cqs_setfd(fd[0], flags)) || (error = cqs_setfd(fd[1], flags))) return error; return 0; #endif } /* cqs_socketpair() */ #if HAVE_STATIC_ASSERT #define cqs_static_assert(cond, msg) static_assert(cond, msg) #elif HAVE__STATIC_ASSERT #define cqs_static_assert(cond, msg) EXTENSION _Static_assert(cond, msg) #else #define cqs_inline_assert(cond) (sizeof (int[1 - 2*!(cond)])) #define cqs_static_assert(cond, msg) extern char CQS_XPASTE(assert_, __LINE__)[cqs_inline_assert(cond)] #endif cqs_error_t cqs_strerror_r(cqs_error_t, char *, size_t); /* * NB: Compound literals have block scope in C. But g++ creates * list-initialized temporaries, which only have expression scope. */ #if !__cplusplus #define cqs_strerror(...) cqs_strerror_(__VA_ARGS__, (char [128]){ 0 }, 128, 0) #define cqs_strerror_(error, dst, lim, ...) (cqs_strerror)((error), (dst), (lim)) #endif const char *(cqs_strerror)(cqs_error_t, void *, size_t); cqs_error_t cqs_sigmask(int, const sigset_t *, sigset_t *); /* * A U X I L L A R Y R O U T I N E S * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifndef MIN #define MIN(a, b) (((a) < (b))? (a) : (b)) #endif #ifndef MAX #define MAX(a, b) (((a) > (b))? (a) : (b)) #endif #ifndef countof #define countof(a) (sizeof (a) / sizeof *(a)) #endif #ifndef endof #define endof(a) (&(a)[countof(a)]) #endif #define cqs_ispowerof2(x) (((x) != 0) && (0 == (((x) - 1) & (x)))) #define CQS_PASTE(x, y) x ## y #define CQS_XPASTE(x, y) CQS_PASTE(x, y) typedef int cqs_ref_t; static inline void cqs_unref(lua_State *L, cqs_ref_t *ref) { if (*ref != LUA_NOREF) { luaL_unref(L, LUA_REGISTRYINDEX, *ref); *ref = LUA_NOREF; } } /* cqs_unref() */ static inline void cqs_ref(lua_State *L, cqs_ref_t *ref) { cqs_unref(L, ref); *ref = luaL_ref(L, LUA_REGISTRYINDEX); } /* cqs_ref() */ static inline void cqs_getref(lua_State *L, cqs_ref_t ref) { if (ref != LUA_NOREF) lua_rawgeti(L, LUA_REGISTRYINDEX, ref); else lua_pushnil(L); } /* cqs_getref() */ static inline cqs_error_t cqs_addzu(size_t *r, size_t a, size_t b) { if (~a < b) return EOVERFLOW; *r = a + b; return 0; } /* cqs_addzu() */ /* * D E B U G M A C R O S * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #if !defined SAY #define SAY_(file, func, line, fmt, ...) \ fprintf(stderr, "%s:%d: " fmt "%s", __func__, __LINE__, __VA_ARGS__) #define SAY(...) SAY_(__FILE__, __func__, __LINE__, __VA_ARGS__, "\n") #define HAI SAY("hai") #endif #include <string.h> #include <sys/stat.h> #include <sys/ioctl.h> #if __sun #include <sys/filio.h> #include <stropts.h> #endif NOTUSED static void cqs_debugfd(int fd) { struct stat st; char descr[64] = ""; int pending = -1; if (0 != fstat(fd, &st)) goto syerr; if (S_ISSOCK(st.st_mode)) { int type; if (0 != getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &(socklen_t){ sizeof type })) goto syerr; if (type == SOCK_STREAM) strncat(descr, "stream socket", sizeof descr - 1); else if (type == SOCK_DGRAM) strncat(descr, "dgram socket", sizeof descr - 1); else strncat(descr, "other socket", sizeof descr - 1); } else { if (S_ISFIFO(st.st_mode)) strncat(descr, "fifo file", sizeof descr - 1); else if (S_ISREG(st.st_mode)) strncat(descr, "regular file", sizeof descr - 1); else strncat(descr, "other file", sizeof descr - 1); } ioctl(fd, FIONREAD, &pending); SAY("%d: %s (pending:%d)", fd, descr, pending); return; syerr: SAY("%d: %s", fd, strerror(errno)); } /* cqs_debugfd() */ #endif /* CQUEUES_H */
utf-8
1
Expat
2012-2015 William Ahern
uhexen2-1.5.9+dfsg/engine/hexenworld/shared/net_chan.c
/* * net_chan.c -- net channel * $Id: net_chan.c 5404 2015-04-01 09:47:28Z sezero $ * * Copyright (C) 1996-1997 Id Software, Inc. * Copyright (C) 1997-1998 Raven Software Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "quakedef.h" #define PACKET_HEADER 8 /* packet header ------------- 31 sequence 1 does this message contain a reliable payload 31 acknowledge sequence 1 acknowledge receipt of even/odd message The remote connection never knows if it missed a reliable message, the local side detects that it has been dropped by seeing a sequence acknowledge higher thatn the last reliable sequence, but without the correct evon/odd bit for the reliable set. If the sender notices that a reliable message has been dropped, it will be retransmitted. It will not be retransmitted again until a message after the retransmit has been acknowledged and the reliable still failed to get there. if the sequence number is -1, the packet should be handled without a netcon The reliable message can be added to at any time by doing MSG_Write* (&netchan->message, <data>). If the message buffer is overflowed, either by a single message, or by multiple frames worth piling up while the last reliable transmit goes unacknowledged, the netchan signals a fatal error. Reliable messages are always placed first in a packet, then the unreliable message is included if there is sufficient room. To the receiver, there is no distinction between the reliable and unreliable parts of the message, they are just processed out as a single larger message. Illogical packet sequence numbers cause the packet to be dropped, but do not kill the connection. This, combined with the tight window of valid reliable acknowledgement numbers provides protection against malicious address spoofing. */ int net_drop; static cvar_t showpackets = {"showpackets", "0", CVAR_NONE}; static cvar_t showdrop = {"showdrop", "0", CVAR_NONE}; #ifdef SERVERONLY #define NOT_DEMOPLAYBACK true #else #define NOT_DEMOPLAYBACK (!cls.demoplayback) #endif /* =============== Netchan_Init =============== */ void Netchan_Init (void) { Cvar_RegisterVariable (&showpackets); Cvar_RegisterVariable (&showdrop); } /* =============== Netchan_OutOfBand Sends an out-of-band datagram ================ */ void Netchan_OutOfBand (const netadr_t *adr, int length, byte *data) { sizebuf_t senddata; byte send_buf[MAX_MSGLEN + PACKET_HEADER]; // write the packet header SZ_Init (&senddata, send_buf, sizeof(send_buf)); MSG_WriteLong (&senddata, -1); // -1 sequence means out of band SZ_Write (&senddata, data, length); // send the datagram if (NOT_DEMOPLAYBACK) // zoid, no input in demo playback mode NET_SendPacket (senddata.cursize, senddata.data, adr); } /* =============== Netchan_OutOfBandPrint Sends a text message in an out-of-band datagram ================ */ void Netchan_OutOfBandPrint (const netadr_t *adr, const char *format, ...) { va_list argptr; static char string[8192]; va_start (argptr, format); q_vsnprintf (string, sizeof (string), format, argptr); va_end (argptr); Netchan_OutOfBand (adr, strlen(string), (byte *)string); } /* ============== Netchan_Setup called to open a channel to a remote system ============== */ void Netchan_Setup (netchan_t *chan, const netadr_t *adr) { memset (chan, 0, sizeof(*chan)); chan->remote_address = *adr; chan->last_received = realtime; SZ_Init (&chan->message, chan->message_buf, sizeof(chan->message_buf)); chan->message.allowoverflow = true; chan->rate = 1.0/2500; } /* =============== Netchan_CanPacket Returns true if the bandwidth choke isn't active ================ */ #define MAX_BACKUP 200 qboolean Netchan_CanPacket (const netchan_t *chan) { if (chan->cleartime < realtime + MAX_BACKUP*chan->rate) return true; return false; } /* =============== Netchan_CanReliable Returns true if the bandwidth choke isn't ================ */ qboolean Netchan_CanReliable (const netchan_t *chan) { if (chan->reliable_length) return false; // waiting for ack return Netchan_CanPacket (chan); } /* =============== Netchan_Transmit tries to send an unreliable message to a connection, and handles the transmition / retransmition of the reliable messages. A 0 length will still generate a packet and deal with the reliable messages. ================ */ void Netchan_Transmit (netchan_t *chan, int length, byte *data) { sizebuf_t senddata; byte send_buf[MAX_MSGLEN + PACKET_HEADER]; qboolean send_reliable; unsigned int w1, w2; int i; // check for message overflow if (chan->message.overflowed) { chan->fatal_error = true; Con_Printf ("%s:Outgoing message overflow\n", NET_AdrToString (&chan->remote_address)); return; } // if the remote side dropped the last reliable message, resend it send_reliable = false; if (chan->incoming_acknowledged > chan->last_reliable_sequence && chan->incoming_reliable_acknowledged != chan->reliable_sequence) send_reliable = true; // if the reliable transmit buffer is empty, copy the current message out if (!chan->reliable_length && chan->message.cursize) { memcpy (chan->reliable_buf, chan->message_buf, chan->message.cursize); chan->reliable_length = chan->message.cursize; chan->message.cursize = 0; chan->reliable_sequence ^= 1; send_reliable = true; } // write the packet header SZ_Init (&senddata, send_buf, sizeof(send_buf)); w1 = chan->outgoing_sequence | (send_reliable<<31); w2 = chan->incoming_sequence | (chan->incoming_reliable_sequence<<31); chan->outgoing_sequence++; MSG_WriteLong (&senddata, w1); MSG_WriteLong (&senddata, w2); // copy the reliable message to the packet first if (send_reliable) { SZ_Write (&senddata, chan->reliable_buf, chan->reliable_length); chan->last_reliable_sequence = chan->outgoing_sequence; } // add the unreliable part if space is available if (senddata.maxsize - senddata.cursize >= length) SZ_Write (&senddata, data, length); // send the datagram i = chan->outgoing_sequence & (MAX_LATENT-1); chan->outgoing_size[i] = senddata.cursize; chan->outgoing_time[i] = realtime; if (NOT_DEMOPLAYBACK) // zoid, no input in demo playback mode NET_SendPacket (senddata.cursize, senddata.data, &chan->remote_address); if (chan->cleartime < realtime) chan->cleartime = realtime + senddata.cursize*chan->rate; else chan->cleartime += senddata.cursize*chan->rate; if (showpackets.integer) { Con_Printf ("--> s=%i(%i) a=%i(%i) %i\n", chan->outgoing_sequence, send_reliable, chan->incoming_sequence, chan->incoming_reliable_sequence, senddata.cursize); } } /* ================= Netchan_Process called when the current net_message is from remote_address modifies net_message so that it points to the packet payload ================= */ qboolean Netchan_Process (netchan_t *chan) { unsigned int sequence, sequence_ack; int reliable_ack, reliable_message; if (NOT_DEMOPLAYBACK && !NET_CompareAdr (&net_from, &chan->remote_address)) { return false; } // get sequence numbers MSG_BeginReading (); sequence = MSG_ReadLong (); sequence_ack = MSG_ReadLong (); reliable_message = (int)(sequence >> 31); reliable_ack = (int)(sequence_ack >> 31); sequence &= ~(1<<31); sequence_ack &= ~(1<<31); if (showpackets.integer) { Con_Printf ("<-- s=%u(%i) a=%u(%i) %i\n", sequence, reliable_message, sequence_ack, reliable_ack, net_message.cursize); } // get a rate estimation #if 0 if (chan->outgoing_sequence - sequence_ack < MAX_LATENT) { int i; double time, rate; i = sequence_ack & (MAX_LATENT - 1); time = realtime - chan->outgoing_time[i]; time -= 0.1; // subtract 100 ms if (time <= 0) { // gotta be a digital link for <100 ms ping if (chan->rate > 1.0/5000) chan->rate = 1.0/5000; } else { if (chan->outgoing_size[i] < 512) { // only deal with small messages rate = chan->outgoing_size[i]/time; if (rate > 5000) rate = 5000; rate = 1.0/rate; if (chan->rate > rate) chan->rate = rate; } } } #endif // // discard stale or duplicated packets // if (sequence <= (unsigned int) chan->incoming_sequence) { if (showdrop.integer) { Con_Printf ("%s:Out of order packet %u at %i\n", NET_AdrToString (&chan->remote_address), sequence, chan->incoming_sequence); } return false; } // // dropped packets don't keep the message from being used // net_drop = sequence - (chan->incoming_sequence+1); if (net_drop > 0) { chan->drop_count += 1; if (showdrop.integer) { Con_Printf ("%s:Dropped %u packets at %u\n", NET_AdrToString (&chan->remote_address), sequence - (unsigned int)(chan->incoming_sequence + 1), sequence); } } // // if the current outgoing reliable message has been acknowledged // clear the buffer to make way for the next // if (reliable_ack == chan->reliable_sequence) chan->reliable_length = 0; // it has been received // // if this message contains a reliable message, bump incoming_reliable_sequence // chan->incoming_sequence = sequence; chan->incoming_acknowledged = sequence_ack; chan->incoming_reliable_acknowledged = reliable_ack; if (reliable_message) chan->incoming_reliable_sequence ^= 1; // // the message can now be read from the current message pointer // update statistics counters // chan->frame_latency = chan->frame_latency*OLD_AVG + (chan->outgoing_sequence-sequence_ack)*(1.0-OLD_AVG); chan->frame_rate = chan->frame_rate*OLD_AVG + (realtime-chan->last_received)*(1.0-OLD_AVG); chan->good_count += 1; chan->last_received = realtime; return true; }
utf-8
1
GPL-2.0+
<2000-2018> Ozkan Sezer <sezeroz@gmail.com> <1997-1998> Raven Software Corp. <1996-1997> Id Software, Inc.
ycmd-0+20201028+git1d415c5+ds/cpp/ycm/ClangCompleter/CompilationDatabase.h
// Copyright (C) 2011-2018 ycmd contributors // // This file is part of ycmd. // // ycmd is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // ycmd is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with ycmd. If not, see <http://www.gnu.org/licenses/>. #ifndef COMPILATIONDATABASE_H_ZT7MQXPG #define COMPILATIONDATABASE_H_ZT7MQXPG #include <clang-c/CXCompilationDatabase.h> #include <mutex> #include <pybind11/pybind11.h> #include <string> #include <vector> namespace YouCompleteMe { struct CompilationInfoForFile { std::vector< std::string > compiler_flags_; std::string compiler_working_dir_; }; // Access to Clang's internal CompilationDatabase. This class is thread-safe. class CompilationDatabase { public: // |path_to_directory| should be a string-like object. explicit CompilationDatabase( pybind11::object path_to_directory ); CompilationDatabase( const CompilationDatabase& ) = delete; CompilationDatabase& operator=( const CompilationDatabase& ) = delete; ~CompilationDatabase(); bool DatabaseSuccessfullyLoaded(); // Returns true when a separate thread is already getting flags; this is // useful so that the caller doesn't need to block. bool AlreadyGettingFlags(); // NOTE: Multiple calls to this function from separate threads will be // serialized since Clang internals are not thread-safe. // |path_to_file| should be a string-like object. CompilationInfoForFile GetCompilationInfoForFile( pybind11::object path_to_file ); std::string GetDatabaseDirectory() { return path_to_directory_; } private: bool is_loaded_; std::string path_to_directory_; CXCompilationDatabase compilation_database_; std::mutex compilation_database_mutex_; }; } // namespace YouCompleteMe #endif /* end of include guard: COMPILATIONDATABASE_H_ZT7MQXPG */
utf-8
1
GPL-3+
2011-2015 Google Inc., 2011-2020 ycmd contributors
rcpp-1.0.8/inst/examples/SugarPerformance/Timer.h
// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; tab-width: 8 -*- // // Timer.h: Rcpp R/C++ interface class library -- simple timer class // // Copyright (C) 2010 Dirk Eddelbuettel and Romain Francois // // This file is part of Rcpp. // // Rcpp is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // Rcpp is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Rcpp. If not, see <http://www.gnu.org/licenses/>. // Simple timer class based on on ideas in atimer.h / atimer.cxx found a few years ago at // http://www.cs.uiowa.edu/~sriram/30/fall03/ // and attributed to Amir Elaguizy while under GPL // but converted to using gettimeofday/GetSystemTime instead #ifndef TIMER_H #define TIMER_H class Timer { public: Timer() : sys_time("Sys.time") { Reset(); } void Start() { start_t = getFractionalSeconds() ; } void Stop() { end_t = getFractionalSeconds(); elapsed = end_t - start_t; // Calculate elapsed time in seconds cumul += elapsed; } void Reset() { end_t = start_t = elapsed = cumul = 0.0; } double ElapsedTime() { return elapsed; } double CumulativeTime() { return cumul; } private: Function sys_time ; double start_t, end_t, elapsed, cumul; double getFractionalSeconds(void) { return as<double>( sys_time() ) ; } }; #endif
utf-8
1
unknown
unknown
libzypp-17.25.7/zypp/Bit.h
/*---------------------------------------------------------------------\ | ____ _ __ __ ___ | | |__ / \ / / . \ . \ | | / / \ V /| _/ _/ | | / /__ | | | | | | | | /_____||_| |_| |_| | | | \---------------------------------------------------------------------*/ /** \file zypp/Bit.h * */ #ifndef ZYPP_BIT_H #define ZYPP_BIT_H #include <iosfwd> #include <string> /////////////////////////////////////////////////////////////////// namespace zypp { ///////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////// /** * \todo Use boost::mpl library to assert constraints * at compiletime! There various like (TInt is an integral type) * (begin+size < maxbits) or ( field dependent * constants must be within the range defined by size ). */ namespace bit { ///////////////////////////////////////////////////////////////// namespace bit_detail { /** Generate constants with \a _size trailing '1'-bits */ template<class TInt, unsigned _size> struct Gen1Bits { static const TInt value = (Gen1Bits<TInt,_size-1>::value << 1)+1; }; /** Specialization for \a _length 0 */ template<class TInt> struct Gen1Bits<TInt, 0> { static const TInt value = 0; }; } /** Number of bits available in \a TInt. */ template<class TInt> struct MaxBits { typedef TInt IntT; static const unsigned value = (sizeof(IntT)*8); }; /** For printing bits. */ template<class TInt> inline std::string asString( TInt val, char zero = '0', char one = '1' ) { std::string s( MaxBits<TInt>::value, zero ); for( unsigned i = MaxBits<TInt>::value; i; ) { --i; if ( val & (TInt)1 ) s[i] = one; val = val >> 1; }; return s; } /** A bitmaks of \a _size 1-bits starting at bit \a _begin. */ template<class TInt, unsigned _begin, unsigned _size> struct Mask { typedef TInt IntT; static const IntT value = bit_detail::Gen1Bits<IntT,_size>::value << _begin; static const IntT inverted = ~value; }; /** Range of bits starting at bit \a _begin with length \a _size. */ template<class TInt, unsigned _begin, unsigned _size> struct Range { typedef TInt IntT; typedef zypp::bit::MaxBits<IntT> MaxBits; typedef zypp::bit::Mask<IntT,_begin,_size> Mask; static const unsigned begin = _begin; static const unsigned size = _size; static const unsigned end = _begin + _size; }; /** Range specialisation for (illegal) zero \a _size. * Force error at compiletime. Currently because types * and values are undefined */ template<class TInt, unsigned _begin> struct Range<TInt, _begin, 0> {}; /** A value with in a Range. * \code * typedef Range<char,2,3> SubField; // bits 2,3,4 in a char field * SubField::Mask::value; // 00011100 * RangeValue<SubField,0>::value; // 00000000 * RangeValue<SubField,1>::value; // 00000100 * RangeValue<SubField,2>::value; // 00001000 * RangeValue<SubField,3>::value; // 00001100 * \endcode */ template<class TRange, typename TRange::IntT _value> struct RangeValue { typedef TRange RangeT; typedef typename TRange::IntT IntT; static const IntT value = _value << RangeT::begin; }; /** A single 1-bit within a Range. * \code * typedef Range<char,2,3> SubField; // bits 2,3,4 in a char field * SubField::Mask::value; // 00011100 * RangeBit<SubField,0>::value; // 00000100 * RangeBit<SubField,1>::value; // 00001000 * RangeBit<SubField,2>::value; // 00010000 * \endcode */ template<class TRange, unsigned _pos> struct RangeBit { typedef TRange RangeT; typedef typename TRange::IntT IntT; static const IntT value = IntT(1) << (RangeT::begin + _pos); }; /////////////////////////////////////////////////////////////////// // // CLASS NAME : BitField // /** An integral type used as BitField. * * Most methods exist as templated and nontemplated * version. The nontemplated operates on the complete * BitField, while the tamplated ones are restricted * to the given Range. * \code * BitField<char> bf; // 00000000 * typedef Range<char,2,3> SubField; // bits 2,3,4 in a char field * * bf<SubField>.assign( -1 ); // assign SubField in -1 * // to SubField in bf. * // 00011100 * bf.assign( -1 ); // assign -1 to bf * // 11111111 * bf<SubField>.assign( 0 ); // 11100011 * \endcode */ template<class TInt> class BitField : public Range<TInt, 0, MaxBits<TInt>::value> { public: /** Default ctor: zero. */ BitField() : _value( (TInt)0 ) {} /** Ctor taking an \a TInt. */ BitField( const TInt & value_r ) : _value( value_r ) {} public: /** Validate in a boolean context. */ explicit operator bool() const { return _value != (TInt)0; } public: /** Return the value. */ template<class TRange> TInt value() const { return _value & TRange::Mask::value; } TInt value() const { return _value; } /** Value as bit string. */ template<class TRange> std::string asString() const { return bit::asString( _value & TRange::Mask::value, '_' ); } std::string asString() const { return bit::asString( _value, '_' ); } /** Assign Range in \a rhs to \c this. */ template<class TRange> BitField & assign( TInt rhs ) { _value = (_value & TRange::Mask::inverted) | (rhs & TRange::Mask::value); return *this; } BitField & assign( TInt rhs ) { _value = rhs; return *this; } /** Test for equal value within a Range. */ template<class TRange> bool isEqual( TInt rhs ) const { return (_value & TRange::Mask::value) == (rhs & TRange::Mask::value); } bool isEqual( TInt rhs ) const { return _value == rhs; } public: /** Set or unset bits of \a rhs. */ template<class TRange> BitField & set( TInt rhs, bool doset_r ) { return set( (rhs & TRange::Mask::value), doset_r ); } BitField & set( TInt rhs, bool doset_r ) { return doset_r ? set( rhs ) : unset( rhs ); } /** Set bits of \a rhs. */ template<class TRange> BitField & set( TInt rhs ) { return set( rhs & TRange::Mask::value ); } BitField & set( TInt rhs ) { _value |= rhs; return *this; } /** Unset bits of \a rhs. */ template<class TRange> BitField & unset( TInt rhs ) { return unset( rhs & TRange::Mask::value ); } BitField & unset( TInt rhs ) { _value &= ~rhs; return *this; } /** Test whether \b all bits of \a rhs are set. */ template<class TRange> bool test( TInt rhs ) { return test( rhs & TRange::Mask::value ); } bool test( TInt rhs ) const { return (_value & rhs) == rhs; } /** Test whether \b at \b least \b one bit of \a rhs is set. */ template<class TRange> bool testAnyOf( TInt rhs ) { return testAnyOf( rhs & TRange::Mask::value ); } bool testAnyOf( TInt rhs ) const { return (_value & rhs); } public: BitField & operator=( const BitField & rhs ) { _value = rhs._value; return *this; } BitField & operator&=( const BitField & rhs ) { _value &= rhs._value; return *this; } BitField & operator|=( const BitField & rhs ) { _value |= rhs._value; return *this; } BitField & operator^=( const BitField & rhs ) { _value ^= rhs._value; return *this; } BitField & operator<<=( unsigned num ) { _value <<= num; return *this; } BitField & operator>>=( unsigned num ) { _value >>= num; return *this; } BitField operator~() const { return ~_value; } private: TInt _value; }; /////////////////////////////////////////////////////////////////// /** \relates BitField Stream output */ template<class TInt> std::ostream & operator<<( std::ostream & str, const BitField<TInt> & obj ) { return str << obj.asString(); } /** \relates BitField */ template<class TInt> inline bool operator==( const BitField<TInt> & lhs, const BitField<TInt> & rhs ) { return lhs.value() == rhs.value(); } /** \relates BitField */ template<class TInt> inline bool operator!=( const BitField<TInt> & lhs, const BitField<TInt> & rhs ) { return ! (lhs == rhs); } /** \relates BitField */ template<class TInt> inline BitField<TInt> operator&( const BitField<TInt> & lhs, const BitField<TInt> & rhs ) { return BitField<TInt>(lhs) &= rhs; } /** \relates BitField */ template<class TInt> inline BitField<TInt> operator|( const BitField<TInt> & lhs, const BitField<TInt> & rhs ) { return BitField<TInt>(lhs) |= rhs; } /** \relates BitField */ template<class TInt> inline BitField<TInt> operator^( const BitField<TInt> & lhs, const BitField<TInt> & rhs ) { return BitField<TInt>(lhs) ^= rhs; } /** \relates BitField */ template<class TInt> inline BitField<TInt> operator<<( const BitField<TInt> & lhs, unsigned num ) { return BitField<TInt>(lhs) <<= num; } /** \relates BitField */ template<class TInt> inline BitField<TInt> operator>>( const BitField<TInt> & lhs, unsigned num ) { return BitField<TInt>(lhs) >>= num; } ///////////////////////////////////////////////////////////////// } // namespace bit /////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// } // namespace zypp /////////////////////////////////////////////////////////////////// #endif // ZYPP_BIT_H
utf-8
1
unknown
unknown
0ad-0.0.25b/build/premake/premake5/contrib/curl/lib/non-ascii.c
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "curl_setup.h" #ifdef CURL_DOES_CONVERSIONS #include <curl/curl.h> #include "non-ascii.h" #include "formdata.h" #include "sendf.h" #include "urldata.h" #include "curl_memory.h" /* The last #include file should be: */ #include "memdebug.h" #ifdef HAVE_ICONV #include <iconv.h> /* set default codesets for iconv */ #ifndef CURL_ICONV_CODESET_OF_NETWORK #define CURL_ICONV_CODESET_OF_NETWORK "ISO8859-1" #endif #ifndef CURL_ICONV_CODESET_FOR_UTF8 #define CURL_ICONV_CODESET_FOR_UTF8 "UTF-8" #endif #define ICONV_ERROR (size_t)-1 #endif /* HAVE_ICONV */ /* * Curl_convert_clone() returns a malloced copy of the source string (if * returning CURLE_OK), with the data converted to network format. */ CURLcode Curl_convert_clone(struct Curl_easy *data, const char *indata, size_t insize, char **outbuf) { char *convbuf; CURLcode result; convbuf = malloc(insize); if(!convbuf) return CURLE_OUT_OF_MEMORY; memcpy(convbuf, indata, insize); result = Curl_convert_to_network(data, convbuf, insize); if(result) { free(convbuf); return result; } *outbuf = convbuf; /* return the converted buffer */ return CURLE_OK; } /* * Curl_convert_to_network() is an internal function for performing ASCII * conversions on non-ASCII platforms. It convers the buffer _in place_. */ CURLcode Curl_convert_to_network(struct Curl_easy *data, char *buffer, size_t length) { if(data->set.convtonetwork) { /* use translation callback */ CURLcode result = data->set.convtonetwork(buffer, length); if(result) { failf(data, "CURLOPT_CONV_TO_NETWORK_FUNCTION callback returned %d: %s", (int)result, curl_easy_strerror(result)); } return result; } else { #ifdef HAVE_ICONV /* do the translation ourselves */ char *input_ptr, *output_ptr; size_t in_bytes, out_bytes, rc; int error; /* open an iconv conversion descriptor if necessary */ if(data->outbound_cd == (iconv_t)-1) { data->outbound_cd = iconv_open(CURL_ICONV_CODESET_OF_NETWORK, CURL_ICONV_CODESET_OF_HOST); if(data->outbound_cd == (iconv_t)-1) { error = ERRNO; failf(data, "The iconv_open(\"%s\", \"%s\") call failed with errno %i: %s", CURL_ICONV_CODESET_OF_NETWORK, CURL_ICONV_CODESET_OF_HOST, error, strerror(error)); return CURLE_CONV_FAILED; } } /* call iconv */ input_ptr = output_ptr = buffer; in_bytes = out_bytes = length; rc = iconv(data->outbound_cd, (const char **)&input_ptr, &in_bytes, &output_ptr, &out_bytes); if((rc == ICONV_ERROR) || (in_bytes != 0)) { error = ERRNO; failf(data, "The Curl_convert_to_network iconv call failed with errno %i: %s", error, strerror(error)); return CURLE_CONV_FAILED; } #else failf(data, "CURLOPT_CONV_TO_NETWORK_FUNCTION callback required"); return CURLE_CONV_REQD; #endif /* HAVE_ICONV */ } return CURLE_OK; } /* * Curl_convert_from_network() is an internal function for performing ASCII * conversions on non-ASCII platforms. It convers the buffer _in place_. */ CURLcode Curl_convert_from_network(struct Curl_easy *data, char *buffer, size_t length) { if(data->set.convfromnetwork) { /* use translation callback */ CURLcode result = data->set.convfromnetwork(buffer, length); if(result) { failf(data, "CURLOPT_CONV_FROM_NETWORK_FUNCTION callback returned %d: %s", (int)result, curl_easy_strerror(result)); } return result; } else { #ifdef HAVE_ICONV /* do the translation ourselves */ char *input_ptr, *output_ptr; size_t in_bytes, out_bytes, rc; int error; /* open an iconv conversion descriptor if necessary */ if(data->inbound_cd == (iconv_t)-1) { data->inbound_cd = iconv_open(CURL_ICONV_CODESET_OF_HOST, CURL_ICONV_CODESET_OF_NETWORK); if(data->inbound_cd == (iconv_t)-1) { error = ERRNO; failf(data, "The iconv_open(\"%s\", \"%s\") call failed with errno %i: %s", CURL_ICONV_CODESET_OF_HOST, CURL_ICONV_CODESET_OF_NETWORK, error, strerror(error)); return CURLE_CONV_FAILED; } } /* call iconv */ input_ptr = output_ptr = buffer; in_bytes = out_bytes = length; rc = iconv(data->inbound_cd, (const char **)&input_ptr, &in_bytes, &output_ptr, &out_bytes); if((rc == ICONV_ERROR) || (in_bytes != 0)) { error = ERRNO; failf(data, "Curl_convert_from_network iconv call failed with errno %i: %s", error, strerror(error)); return CURLE_CONV_FAILED; } #else failf(data, "CURLOPT_CONV_FROM_NETWORK_FUNCTION callback required"); return CURLE_CONV_REQD; #endif /* HAVE_ICONV */ } return CURLE_OK; } /* * Curl_convert_from_utf8() is an internal function for performing UTF-8 * conversions on non-ASCII platforms. */ CURLcode Curl_convert_from_utf8(struct Curl_easy *data, char *buffer, size_t length) { if(data->set.convfromutf8) { /* use translation callback */ CURLcode result = data->set.convfromutf8(buffer, length); if(result) { failf(data, "CURLOPT_CONV_FROM_UTF8_FUNCTION callback returned %d: %s", (int)result, curl_easy_strerror(result)); } return result; } else { #ifdef HAVE_ICONV /* do the translation ourselves */ const char *input_ptr; char *output_ptr; size_t in_bytes, out_bytes, rc; int error; /* open an iconv conversion descriptor if necessary */ if(data->utf8_cd == (iconv_t)-1) { data->utf8_cd = iconv_open(CURL_ICONV_CODESET_OF_HOST, CURL_ICONV_CODESET_FOR_UTF8); if(data->utf8_cd == (iconv_t)-1) { error = ERRNO; failf(data, "The iconv_open(\"%s\", \"%s\") call failed with errno %i: %s", CURL_ICONV_CODESET_OF_HOST, CURL_ICONV_CODESET_FOR_UTF8, error, strerror(error)); return CURLE_CONV_FAILED; } } /* call iconv */ input_ptr = output_ptr = buffer; in_bytes = out_bytes = length; rc = iconv(data->utf8_cd, &input_ptr, &in_bytes, &output_ptr, &out_bytes); if((rc == ICONV_ERROR) || (in_bytes != 0)) { error = ERRNO; failf(data, "The Curl_convert_from_utf8 iconv call failed with errno %i: %s", error, strerror(error)); return CURLE_CONV_FAILED; } if(output_ptr < input_ptr) { /* null terminate the now shorter output string */ *output_ptr = 0x00; } #else failf(data, "CURLOPT_CONV_FROM_UTF8_FUNCTION callback required"); return CURLE_CONV_REQD; #endif /* HAVE_ICONV */ } return CURLE_OK; } /* * Init conversion stuff for a Curl_easy */ void Curl_convert_init(struct Curl_easy *data) { #if defined(CURL_DOES_CONVERSIONS) && defined(HAVE_ICONV) /* conversion descriptors for iconv calls */ data->outbound_cd = (iconv_t)-1; data->inbound_cd = (iconv_t)-1; data->utf8_cd = (iconv_t)-1; #else (void)data; #endif /* CURL_DOES_CONVERSIONS && HAVE_ICONV */ } /* * Setup conversion stuff for a Curl_easy */ void Curl_convert_setup(struct Curl_easy *data) { data->inbound_cd = iconv_open(CURL_ICONV_CODESET_OF_HOST, CURL_ICONV_CODESET_OF_NETWORK); data->outbound_cd = iconv_open(CURL_ICONV_CODESET_OF_NETWORK, CURL_ICONV_CODESET_OF_HOST); data->utf8_cd = iconv_open(CURL_ICONV_CODESET_OF_HOST, CURL_ICONV_CODESET_FOR_UTF8); } /* * Close conversion stuff for a Curl_easy */ void Curl_convert_close(struct Curl_easy *data) { #ifdef HAVE_ICONV /* close iconv conversion descriptors */ if(data->inbound_cd != (iconv_t)-1) { iconv_close(data->inbound_cd); } if(data->outbound_cd != (iconv_t)-1) { iconv_close(data->outbound_cd); } if(data->utf8_cd != (iconv_t)-1) { iconv_close(data->utf8_cd); } #else (void)data; #endif /* HAVE_ICONV */ } /* * Curl_convert_form() is used from http.c, this converts any form items that need to be sent in the network encoding. Returns CURLE_OK on success. */ CURLcode Curl_convert_form(struct Curl_easy *data, struct FormData *form) { CURLcode result; if(!data) return CURLE_BAD_FUNCTION_ARGUMENT; while(form) { if(form->type == FORM_DATA) { result = Curl_convert_to_network(data, form->line, form->length); /* Curl_convert_to_network calls failf if unsuccessful */ if(result) return result; } form = form->next; } return CURLE_OK; } #endif /* CURL_DOES_CONVERSIONS */
utf-8
1
GPL-2.0+
2000-2021 Wildfire Games
osmo-iuh-0.6.1+dfsg1/src/tests/hnb-test-ranap.c
#include <osmocom/core/msgb.h> #include <osmocom/ranap/ranap_ies_defs.h> #include "hnb-test-layers.h" static const char *printstr(OCTET_STRING_t *s) { return osmo_hexdump((char*)s->buf, s->size); } #define PP(octet_string_t) \ printf(#octet_string_t " = %s\n",\ printstr(&octet_string_t)) void hnb_test_rua_dt_handle_ranap(struct hnb_test *hnb, struct ranap_message_s *ranap_msg) { int len; char *data; RANAP_PermittedIntegrityProtectionAlgorithms_t *algs; RANAP_IntegrityProtectionAlgorithm_t *first_alg; printf("rx ranap_msg->procedureCode %d\n", ranap_msg->procedureCode); switch (ranap_msg->procedureCode) { case RANAP_ProcedureCode_id_DirectTransfer: printf("rx DirectTransfer: presence = %hx\n", ranap_msg->msg.directTransferIEs.presenceMask); PP(ranap_msg->msg.directTransferIEs.nas_pdu); len = ranap_msg->msg.directTransferIEs.nas_pdu.size; data = ranap_msg->msg.directTransferIEs.nas_pdu.buf; hnb_test_nas_rx_dtap(hnb, data, len); return; case RANAP_ProcedureCode_id_SecurityModeControl: printf("rx SecurityModeControl: presence = %hx\n", ranap_msg->msg.securityModeCommandIEs.presenceMask); /* Just pick the first available IP alg, don't care about * encryption (yet?) */ algs = &ranap_msg->msg.securityModeCommandIEs.integrityProtectionInformation.permittedAlgorithms; if (algs->list.count < 1) { printf("Security Mode Command: No permitted algorithms.\n"); return; } first_alg = *algs->list.array; hnb_test_rx_secmode_cmd(hnb, *first_alg); return; case RANAP_ProcedureCode_id_Iu_Release: hnb_test_rx_iu_release(hnb); return; } } void hnb_test_rua_cl_handle_ranap(struct hnb_test *hnb, struct ranap_message_s *ranap_msg) { char imsi[16]; printf("rx ranap_msg->procedureCode %d\n", ranap_msg->procedureCode); switch (ranap_msg->procedureCode) { case RANAP_ProcedureCode_id_Paging: if (ranap_msg->msg.pagingIEs.permanentNAS_UE_ID.present == RANAP_PermanentNAS_UE_ID_PR_iMSI) { ranap_bcd_decode(imsi, sizeof(imsi), ranap_msg->msg.pagingIEs.permanentNAS_UE_ID.choice.iMSI.buf, ranap_msg->msg.pagingIEs.permanentNAS_UE_ID.choice.iMSI.size); } else imsi[0] = '\0'; printf("rx Paging: presence=%hx domain=%d IMSI=%s\n", ranap_msg->msg.pagingIEs.presenceMask, ranap_msg->msg.pagingIEs.cN_DomainIndicator, imsi ); hnb_test_rx_paging(hnb, imsi); return; } }
utf-8
1
AGPL-3.0+
2015 Daniel Willmann <dwillmann@sysmocom.de> 2015 Harald Welte <laforge@gnumonks.org> 2015-2017 sysmocom s.f.m.c. GmbH <info@sysmocom.de>
unixodbc-2.3.9/odbcinst/SQLGetConfigMode.c
/************************************************** * SQLGetConfigMode * ************************************************** * This code was created by Peter Harvey @ CodeByDesign. * Released under LGPL 28.JAN.99 * * Contributions from... * ----------------------------------------------- * Peter Harvey - pharvey@codebydesign.com * Nick Gorham - nick@lurcher.org **************************************************/ #include <config.h> #include <stdlib.h> #include <odbcinstext.h> BOOL SQLGetConfigMode( UWORD *pnConfigMode ) { inst_logClear(); *pnConfigMode = __get_config_mode(); return TRUE; }
utf-8
1
LGPL-2.1+
1999-2020 Nick Gorham <nick@lurcher.org> 1999-2007 Peter Harvey <pharvey@peterharvey.org>, et al.
ceph-16.2.7+ds/src/mds/Beacon.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2012 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "common/dout.h" #include "common/HeartbeatMap.h" #include "include/stringify.h" #include "include/util.h" #include "mon/MonClient.h" #include "mds/MDLog.h" #include "mds/MDSRank.h" #include "mds/MDSMap.h" #include "mds/Locker.h" #include "Beacon.h" #include <chrono> #define dout_context g_ceph_context #define dout_subsys ceph_subsys_mds #undef dout_prefix #define dout_prefix *_dout << "mds.beacon." << name << ' ' using namespace std::chrono_literals; Beacon::Beacon(CephContext *cct, MonClient *monc, std::string_view name) : Dispatcher(cct), beacon_interval(g_conf()->mds_beacon_interval), monc(monc), name(name), compat(MDSMap::get_compat_set_all()) { } Beacon::~Beacon() { shutdown(); } void Beacon::shutdown() { std::unique_lock<std::mutex> lock(mutex); if (!finished) { finished = true; lock.unlock(); if (sender.joinable()) sender.join(); } } void Beacon::init(const MDSMap &mdsmap) { std::unique_lock lock(mutex); _notify_mdsmap(mdsmap); sender = std::thread([this]() { std::unique_lock<std::mutex> lock(mutex); std::condition_variable c; // no one wakes us while (!finished) { auto now = clock::now(); auto since = std::chrono::duration<double>(now-last_send).count(); auto interval = beacon_interval; if (since >= interval*.90) { if (!_send()) { interval = 0.5; /* 500ms */ } } else { interval -= since; } dout(20) << "sender thread waiting interval " << interval << "s" << dendl; c.wait_for(lock, interval*1s); } }); } bool Beacon::ms_can_fast_dispatch2(const cref_t<Message>& m) const { return m->get_type() == MSG_MDS_BEACON; } void Beacon::ms_fast_dispatch2(const ref_t<Message>& m) { bool handled = ms_dispatch2(m); ceph_assert(handled); } bool Beacon::ms_dispatch2(const ref_t<Message>& m) { if (m->get_type() == MSG_MDS_BEACON) { if (m->get_connection()->get_peer_type() == CEPH_ENTITY_TYPE_MON) { handle_mds_beacon(ref_cast<MMDSBeacon>(m)); } return true; } return false; } /** * Update lagginess state based on response from remote MDSMonitor * * This function puts the passed message before returning */ void Beacon::handle_mds_beacon(const cref_t<MMDSBeacon> &m) { std::unique_lock lock(mutex); version_t seq = m->get_seq(); // update lab auto it = seq_stamp.find(seq); if (it != seq_stamp.end()) { auto now = clock::now(); last_acked_stamp = it->second; auto rtt = std::chrono::duration<double>(now - last_acked_stamp).count(); dout(5) << "received beacon reply " << ceph_mds_state_name(m->get_state()) << " seq " << m->get_seq() << " rtt " << rtt << dendl; if (laggy && rtt < g_conf()->mds_beacon_grace) { dout(0) << " MDS is no longer laggy" << dendl; laggy = false; last_laggy = now; } // clean up seq_stamp map seq_stamp.erase(seq_stamp.begin(), ++it); // Wake a waiter up if present cvar.notify_all(); } else { dout(1) << "discarding unexpected beacon reply " << ceph_mds_state_name(m->get_state()) << " seq " << m->get_seq() << " dne" << dendl; } } void Beacon::send() { std::unique_lock lock(mutex); _send(); } void Beacon::send_and_wait(const double duration) { std::unique_lock lock(mutex); _send(); auto awaiting_seq = last_seq; dout(20) << __func__ << ": awaiting " << awaiting_seq << " for up to " << duration << "s" << dendl; auto start = clock::now(); while (!seq_stamp.empty() && seq_stamp.begin()->first <= awaiting_seq) { auto now = clock::now(); auto s = duration*.95-std::chrono::duration<double>(now-start).count(); if (s < 0) break; cvar.wait_for(lock, s*1s); } } /** * Call periodically, or when you have updated the desired state */ bool Beacon::_send() { auto now = clock::now(); auto since = std::chrono::duration<double>(now-last_acked_stamp).count(); if (!cct->get_heartbeat_map()->is_healthy()) { /* If anything isn't progressing, let avoid sending a beacon so that * the MDS will consider us laggy */ dout(0) << "Skipping beacon heartbeat to monitors (last acked " << since << "s ago); MDS internal heartbeat is not healthy!" << dendl; return false; } ++last_seq; dout(5) << "Sending beacon " << ceph_mds_state_name(want_state) << " seq " << last_seq << dendl; seq_stamp[last_seq] = now; ceph_assert(want_state != MDSMap::STATE_NULL); auto beacon = make_message<MMDSBeacon>( monc->get_fsid(), mds_gid_t(monc->get_global_id()), name, epoch, want_state, last_seq, CEPH_FEATURES_SUPPORTED_DEFAULT); beacon->set_health(health); beacon->set_compat(compat); beacon->set_fs(g_conf().get_val<std::string>("mds_join_fs")); // piggyback the sys info on beacon msg if (want_state == MDSMap::STATE_BOOT) { map<string, string> sys_info; collect_sys_info(&sys_info, cct); sys_info["addr"] = stringify(monc->get_myaddrs()); beacon->set_sys_info(sys_info); } monc->send_mon_message(beacon.detach()); last_send = now; return true; } /** * Call this when there is a new MDSMap available */ void Beacon::notify_mdsmap(const MDSMap &mdsmap) { std::unique_lock lock(mutex); _notify_mdsmap(mdsmap); } void Beacon::_notify_mdsmap(const MDSMap &mdsmap) { ceph_assert(mdsmap.get_epoch() >= epoch); if (mdsmap.get_epoch() >= epoch) { epoch = mdsmap.get_epoch(); } } bool Beacon::is_laggy() { std::unique_lock lock(mutex); auto now = clock::now(); auto since = std::chrono::duration<double>(now-last_acked_stamp).count(); if (since > g_conf()->mds_beacon_grace) { if (!laggy) { dout(1) << "MDS connection to Monitors appears to be laggy; " << since << "s since last acked beacon" << dendl; } laggy = true; return true; } return false; } void Beacon::set_want_state(const MDSMap &mdsmap, MDSMap::DaemonState newstate) { std::unique_lock lock(mutex); // Update mdsmap epoch atomically with updating want_state, so that when // we send a beacon with the new want state it has the latest epoch, and // once we have updated to the latest epoch, we are not sending out // a stale want_state (i.e. one from before making it through MDSMap // handling) _notify_mdsmap(mdsmap); if (want_state != newstate) { dout(5) << __func__ << ": " << ceph_mds_state_name(want_state) << " -> " << ceph_mds_state_name(newstate) << dendl; want_state = newstate; } } /** * We are 'shown' an MDS briefly in order to update * some health metrics that we will send in the next * beacon. */ void Beacon::notify_health(MDSRank const *mds) { std::unique_lock lock(mutex); if (!mds) { // No MDS rank held return; } // I'm going to touch this MDS, so it must be locked ceph_assert(ceph_mutex_is_locked_by_me(mds->mds_lock)); health.metrics.clear(); // Detect presence of entries in DamageTable if (!mds->damage_table.empty()) { MDSHealthMetric m(MDS_HEALTH_DAMAGE, HEALTH_ERR, std::string( "Metadata damage detected")); health.metrics.push_back(m); } // Detect MDS_HEALTH_TRIM condition // Indicates MDS is not trimming promptly { if (mds->mdlog->get_num_segments() > (size_t)(g_conf()->mds_log_max_segments * g_conf().get_val<double>("mds_log_warn_factor"))) { CachedStackStringStream css; *css << "Behind on trimming (" << mds->mdlog->get_num_segments() << "/" << g_conf()->mds_log_max_segments << ")"; MDSHealthMetric m(MDS_HEALTH_TRIM, HEALTH_WARN, css->strv()); m.metadata["num_segments"] = stringify(mds->mdlog->get_num_segments()); m.metadata["max_segments"] = stringify(g_conf()->mds_log_max_segments); health.metrics.push_back(m); } } // Detect clients failing to respond to modifications to capabilities in // CLIENT_CAPS messages. { auto&& late_clients = mds->locker->get_late_revoking_clients(mds->mdsmap->get_session_timeout()); std::vector<MDSHealthMetric> late_cap_metrics; for (const auto& client : late_clients) { // client_t is equivalent to session.info.inst.name.num // Construct an entity_name_t to lookup into SessionMap entity_name_t ename(CEPH_ENTITY_TYPE_CLIENT, client.v); Session const *s = mds->sessionmap.get_session(ename); if (s == NULL) { // Shouldn't happen, but not worth crashing if it does as this is // just health-reporting code. derr << "Client ID without session: " << client.v << dendl; continue; } CachedStackStringStream css; *css << "Client " << s->get_human_name() << " failing to respond to capability release"; MDSHealthMetric m(MDS_HEALTH_CLIENT_LATE_RELEASE, HEALTH_WARN, css->strv()); m.metadata["client_id"] = stringify(client.v); late_cap_metrics.emplace_back(std::move(m)); } if (late_cap_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) { auto&& m = late_cap_metrics; health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m)); } else { CachedStackStringStream css; *css << "Many clients (" << late_cap_metrics.size() << ") failing to respond to capability release"; MDSHealthMetric m(MDS_HEALTH_CLIENT_LATE_RELEASE_MANY, HEALTH_WARN, css->strv()); m.metadata["client_count"] = stringify(late_cap_metrics.size()); health.metrics.push_back(std::move(m)); } } // Detect clients failing to generate cap releases from CEPH_SESSION_RECALL_STATE // messages. May be due to buggy client or resource-hogging application. // // Detect clients failing to advance their old_client_tid { set<Session*> sessions; mds->sessionmap.get_client_session_set(sessions); const auto min_caps_working_set = g_conf().get_val<uint64_t>("mds_min_caps_working_set"); const auto recall_warning_threshold = g_conf().get_val<Option::size_t>("mds_recall_warning_threshold"); const auto max_completed_requests = g_conf()->mds_max_completed_requests; const auto max_completed_flushes = g_conf()->mds_max_completed_flushes; std::vector<MDSHealthMetric> late_recall_metrics; std::vector<MDSHealthMetric> large_completed_requests_metrics; for (auto& session : sessions) { const uint64_t num_caps = session->get_num_caps(); const uint64_t recall_caps = session->get_recall_caps(); if (recall_caps > recall_warning_threshold && num_caps > min_caps_working_set) { dout(2) << "Session " << *session << " is not releasing caps fast enough. Recalled caps at " << recall_caps << " > " << recall_warning_threshold << " (mds_recall_warning_threshold)." << dendl; CachedStackStringStream css; *css << "Client " << session->get_human_name() << " failing to respond to cache pressure"; MDSHealthMetric m(MDS_HEALTH_CLIENT_RECALL, HEALTH_WARN, css->strv()); m.metadata["client_id"] = stringify(session->get_client()); late_recall_metrics.emplace_back(std::move(m)); } if ((session->get_num_trim_requests_warnings() > 0 && session->get_num_completed_requests() >= max_completed_requests) || (session->get_num_trim_flushes_warnings() > 0 && session->get_num_completed_flushes() >= max_completed_flushes)) { CachedStackStringStream css; *css << "Client " << session->get_human_name() << " failing to advance its oldest client/flush tid. "; MDSHealthMetric m(MDS_HEALTH_CLIENT_OLDEST_TID, HEALTH_WARN, css->strv()); m.metadata["client_id"] = stringify(session->get_client()); large_completed_requests_metrics.emplace_back(std::move(m)); } } if (late_recall_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) { auto&& m = late_recall_metrics; health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m)); } else { CachedStackStringStream css; *css << "Many clients (" << late_recall_metrics.size() << ") failing to respond to cache pressure"; MDSHealthMetric m(MDS_HEALTH_CLIENT_RECALL_MANY, HEALTH_WARN, css->strv()); m.metadata["client_count"] = stringify(late_recall_metrics.size()); health.metrics.push_back(m); late_recall_metrics.clear(); } if (large_completed_requests_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) { auto&& m = large_completed_requests_metrics; health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m)); } else { CachedStackStringStream css; *css << "Many clients (" << large_completed_requests_metrics.size() << ") failing to advance their oldest client/flush tid"; MDSHealthMetric m(MDS_HEALTH_CLIENT_OLDEST_TID_MANY, HEALTH_WARN, css->strv()); m.metadata["client_count"] = stringify(large_completed_requests_metrics.size()); health.metrics.push_back(m); large_completed_requests_metrics.clear(); } } // Detect MDS_HEALTH_SLOW_REQUEST condition { int slow = mds->get_mds_slow_req_count(); if (slow) { dout(20) << slow << " slow request found" << dendl; CachedStackStringStream css; *css << slow << " slow requests are blocked > " << g_conf()->mds_op_complaint_time << " secs"; MDSHealthMetric m(MDS_HEALTH_SLOW_REQUEST, HEALTH_WARN, css->strv()); health.metrics.push_back(m); } } { auto complaint_time = g_conf()->osd_op_complaint_time; auto now = clock::now(); auto cutoff = now - ceph::make_timespan(complaint_time); std::string count; ceph::coarse_mono_time oldest; if (MDSIOContextBase::check_ios_in_flight(cutoff, count, oldest)) { dout(20) << count << " slow metadata IOs found" << dendl; auto oldest_secs = std::chrono::duration<double>(now - oldest).count(); CachedStackStringStream css; *css << count << " slow metadata IOs are blocked > " << complaint_time << " secs, oldest blocked for " << (int64_t)oldest_secs << " secs"; MDSHealthMetric m(MDS_HEALTH_SLOW_METADATA_IO, HEALTH_WARN, css->strv()); health.metrics.push_back(m); } } // Report a health warning if we are readonly if (mds->mdcache->is_readonly()) { MDSHealthMetric m(MDS_HEALTH_READ_ONLY, HEALTH_WARN, "MDS in read-only mode"); health.metrics.push_back(m); } // Report if we have significantly exceeded our cache size limit if (mds->mdcache->cache_overfull()) { CachedStackStringStream css; *css << "MDS cache is too large (" << bytes2str(mds->mdcache->cache_size()) << "/" << bytes2str(mds->mdcache->cache_limit_memory()) << "); " << mds->mdcache->num_inodes_with_caps << " inodes in use by clients, " << mds->mdcache->get_num_strays() << " stray files"; MDSHealthMetric m(MDS_HEALTH_CACHE_OVERSIZED, HEALTH_WARN, css->strv()); health.metrics.push_back(m); } } MDSMap::DaemonState Beacon::get_want_state() const { std::unique_lock lock(mutex); return want_state; }
utf-8
1
unknown
unknown
cantor-21.08.3/src/backends/maxima/maximahighlighter.cpp
/* SPDX-License-Identifier: GPL-2.0-or-later SPDX-FileCopyrightText: 2009-2012 Alexander Rieder <alexanderrieder@gmail.com> */ #include "maximahighlighter.h" #include "maximakeywords.h" #include "maximasession.h" #include "maximavariablemodel.h" MaximaHighlighter::MaximaHighlighter(QObject* parent, MaximaSession* session) : Cantor::DefaultHighlighter(parent, session) { //addRule(QRegExp("\\b[A-Za-z0-9_]+(?=\\()"), functionFormat()); //Code highlighting the different keywords addKeywords(MaximaKeywords::instance()->keywords()); addRule(QLatin1String("FIXME"), commentFormat()); addRule(QLatin1String("TODO"), commentFormat()); addFunctions(MaximaKeywords::instance()->functions()); addVariables(MaximaKeywords::instance()->variables()); //addRule(QRegExp("\".*\""), stringFormat()); //addRule(QRegExp("'.*'"), stringFormat()); commentStartExpression = QRegularExpression(QStringLiteral("/\\*")); commentEndExpression = QRegularExpression(QStringLiteral("\\*/")); } void MaximaHighlighter::highlightBlock(const QString& text) { if (skipHighlighting(text)) return; //Do some backend independent highlighting (brackets etc.) DefaultHighlighter::highlightBlock(text); setCurrentBlockState(-1); int commentLevel = 0; bool inString = false; int startIndex = -1; if (previousBlockState() > 0) { commentLevel = previousBlockState(); startIndex = 0; } else if (previousBlockState() < -1) { inString = true; startIndex = 0; } for (int i = 0; i < text.size(); ++i) { if (text[i] == QLatin1Char('\\')) { ++i; // skip the next character } else if (text[i] == QLatin1Char('"') && commentLevel == 0) { if (!inString) startIndex = i; else setFormat(startIndex, i - startIndex + 1, stringFormat()); inString = !inString; } else if (text.mid(i,2) == QLatin1String("/*") && !inString) { if (commentLevel == 0) startIndex = i; ++commentLevel; ++i; } else if (text.mid(i,2) == QLatin1String("*/") && !inString) { if (commentLevel == 0) { setFormat(i, 2, errorFormat()); // undo the --commentLevel below, so we stay at 0 ++commentLevel; } else if (commentLevel == 1) { setFormat(startIndex, i - startIndex + 2, commentFormat()); } ++i; --commentLevel; } } if (inString) { setCurrentBlockState(-2); setFormat(startIndex, text.size() - startIndex, stringFormat()); } else if (commentLevel > 0) { setCurrentBlockState(commentLevel); setFormat(startIndex, text.size() - startIndex, commentFormat()); } } QString MaximaHighlighter::nonSeparatingCharacters() const { return QLatin1String("%"); }
utf-8
1
GPL-2+
2009-2010 Alexander Rieder <alexanderrieder@gmail.com> 2010 Miha Čančula <miha.cancula@gmail.com> 2006 David Saxton <david@bluehaze.org> 2010 Raffaele De Feo <alberthilbert@gmail.com> 2009-2010 Oleksiy Protas <elfy.ua@gmail.com> 2009 Aleix Pol <aleixpol@kde.org> 2011 Filipe Saraiva <filipe@kde.org> 2009 Milian Wolff <mail@milianw.de> 2011 Martin Kuettler <martinkuettler@gmail.com> 2011 Matteo Agostinelli <agostinelli@gmail.com>
mediastreamer2-4.4.21/src/voip/qosanalyzer.c
/* * Copyright (c) 2010-2019 Belledonne Communications SARL. * * This file is part of mediastreamer2. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "mediastreamer2/bitratecontrol.h" #include "qosanalyzer.h" #include <bctoolbox/defs.h> #include <math.h> #define LOSS_RATE_MIN_INTERVAL 60 #define LOSS_RATE_MIN_TIME 3000 /** * Analyses a received RTCP packet. * Returns TRUE is relevant information has been found in the rtcp message, FALSE otherwise. **/ bool_t ms_qos_analyzer_process_rtcp(MSQosAnalyzer *obj,mblk_t *msg){ if (obj->desc->process_rtcp){ return obj->desc->process_rtcp(obj,msg); } ms_error("MSQosAnalyzer: Unimplemented process_rtcp() call."); return FALSE; } void ms_qos_analyzer_suggest_action(MSQosAnalyzer *obj, MSRateControlAction *action){ if (obj->desc->suggest_action){ obj->desc->suggest_action(obj,action); } } void ms_qos_analyzer_update(MSQosAnalyzer *obj){ if (obj->desc->update){ obj->desc->update(obj); } } bool_t ms_qos_analyzer_has_improved(MSQosAnalyzer *obj){ if (obj->desc->has_improved){ return obj->desc->has_improved(obj); } ms_error("MSQosAnalyzer: Unimplemented has_improved() call."); return TRUE; } void ms_qos_analyzer_set_on_action_suggested(MSQosAnalyzer *obj, void (*on_action_suggested)(void*, int, const char**), void* u){ obj->on_action_suggested=on_action_suggested; obj->on_action_suggested_user_pointer=u; } void ms_qos_analyser_set_label(MSQosAnalyzer *obj, const char *label){ if (obj->label){ ms_free(obj->label); obj->label=NULL; } if (label) obj->label=ms_strdup(label); } const char* ms_qos_analyzer_algorithm_to_string(MSQosAnalyzerAlgorithm alg) { switch (alg){ case MSQosAnalyzerAlgorithmSimple: return "Simple"; case MSQosAnalyzerAlgorithmStateful: return "Stateful"; default: return NULL; } } MSQosAnalyzerAlgorithm ms_qos_analyzer_algorithm_from_string(const char* alg) { if (alg == NULL || strcasecmp(alg, "Simple")==0) return MSQosAnalyzerAlgorithmSimple; else if (strcasecmp(alg, "Stateful")==0) return MSQosAnalyzerAlgorithmStateful; ms_error("MSQosAnalyzer: Invalid QoS analyzer: %s", alg); return MSQosAnalyzerAlgorithmSimple; } const char* ms_qos_analyzer_get_name(MSQosAnalyzer *obj){ return ms_qos_analyzer_algorithm_to_string(obj->type); } MSQosAnalyzer *ms_qos_analyzer_ref(MSQosAnalyzer *obj){ obj->refcnt++; return obj; } void ms_qos_analyzer_unref(MSQosAnalyzer *obj){ obj->refcnt--; if (obj->refcnt<=0){ if (obj->desc->uninit) obj->desc->uninit(obj); if (obj->label) ms_free(obj->label); if (obj->lre) ortp_loss_rate_estimator_destroy(obj->lre); ms_free(obj); } } const char *ms_rate_control_action_type_name(MSRateControlActionType t){ switch(t){ case MSRateControlActionDoNothing: return "DoNothing"; case MSRateControlActionIncreaseQuality: return "IncreaseQuality"; case MSRateControlActionDecreaseBitrate: return "DecreaseBitrate"; case MSRateControlActionDecreasePacketRate: return "DecreasePacketRate"; } return "bad action type"; } /******************************************************************************/ /***************************** Simple QoS analyzer ****************************/ /******************************************************************************/ static bool_t rt_prop_doubled(rtpstats_t *cur,rtpstats_t *prev){ //ms_message("AudioBitrateController: cur=%f, prev=%f",cur->rt_prop,prev->rt_prop); if (cur->rt_prop>=significant_delay && prev->rt_prop>0){ if (cur->rt_prop>=(prev->rt_prop*2.0)){ /*propagation doubled since last report */ return TRUE; } } return FALSE; } static bool_t simple_rt_prop_increased(MSSimpleQosAnalyzer *obj){ rtpstats_t *cur=&obj->stats[obj->curindex % STATS_HISTORY]; rtpstats_t *prev=&obj->stats[(STATS_HISTORY+obj->curindex-1) % STATS_HISTORY]; if (rt_prop_doubled(cur,prev)){ obj->rt_prop_doubled=TRUE; return TRUE; } return FALSE; } static bool_t simple_analyzer_process_rtcp(MSQosAnalyzer *objbase, mblk_t *rtcp){ MSSimpleQosAnalyzer *obj=(MSSimpleQosAnalyzer*)objbase; rtpstats_t *cur; const report_block_t *rb=NULL; bool_t got_stats=FALSE; if (rtcp_is_SR(rtcp)){ rb=rtcp_SR_get_report_block(rtcp,0); }else if (rtcp_is_RR(rtcp)){ rb=rtcp_RR_get_report_block(rtcp,0); } if (rb && report_block_get_ssrc(rb)==rtp_session_get_send_ssrc(obj->session)){ obj->curindex++; cur=&obj->stats[obj->curindex % STATS_HISTORY]; if (obj->clockrate==0){ PayloadType *pt=rtp_profile_get_payload(rtp_session_get_send_profile(obj->session),rtp_session_get_send_payload_type(obj->session)); if (pt!=NULL) obj->clockrate=pt->clock_rate; else return FALSE; } if (ortp_loss_rate_estimator_process_report_block(objbase->lre,obj->session,rb)){ cur->lost_percentage=ortp_loss_rate_estimator_get_value(objbase->lre); cur->int_jitter=1000.0f*(float)report_block_get_interarrival_jitter(rb)/(float)obj->clockrate; cur->rt_prop=rtp_session_get_round_trip_propagation(obj->session); ms_message("MSSimpleQosAnalyzer: lost_percentage=%f, int_jitter=%f ms, rt_prop=%f sec", cur->lost_percentage,cur->int_jitter,cur->rt_prop); got_stats=TRUE; } } return got_stats; } static void simple_analyzer_suggest_action(MSQosAnalyzer *objbase, MSRateControlAction *action){ MSSimpleQosAnalyzer *obj=(MSSimpleQosAnalyzer*)objbase; rtpstats_t *cur=&obj->stats[obj->curindex % STATS_HISTORY]; /*big losses and big jitter */ if (cur->lost_percentage>=unacceptable_loss_rate && cur->int_jitter>=big_jitter){ action->type=MSRateControlActionDecreaseBitrate; action->value=(int)MIN(cur->lost_percentage,50); ms_message("MSSimpleQosAnalyzer: loss rate unacceptable and big jitter"); }else if (simple_rt_prop_increased(obj)){ action->type=MSRateControlActionDecreaseBitrate; action->value=20; ms_message("MSSimpleQosAnalyzer: rt_prop doubled."); }else if (cur->lost_percentage>=unacceptable_loss_rate){ /*big loss rate but no jitter, and no big rtp_prop: pure lossy network*/ action->type=MSRateControlActionDecreaseBitrate; action->value=(int)MIN(cur->lost_percentage,50); ms_message("MSSimpleQosAnalyzer: loss rate unacceptable."); }else{ action->type=MSRateControlActionDoNothing; ms_message("MSSimpleQosAnalyzer: everything is fine."); } if (objbase->on_action_suggested!=NULL){ int i; char *data[4]; int datac = sizeof(data) / sizeof(data[0]); data[0]=ms_strdup("%loss rt_prop_increased int_jitter_ms rt_prop_ms"); data[1]=ms_strdup_printf("%d %d %d %d" , (int)cur->lost_percentage , (simple_rt_prop_increased(obj)==TRUE) , (int)cur->int_jitter , (int)(1000*cur->rt_prop)); data[2]=ms_strdup("action_type action_value"); data[3]=ms_strdup_printf("%s %d" , ms_rate_control_action_type_name(action->type) , action->value); objbase->on_action_suggested(objbase->on_action_suggested_user_pointer, datac, (const char**)data); for (i=0;i<datac;++i){ ms_free(data[i]); } } } static bool_t simple_analyzer_has_improved(MSQosAnalyzer *objbase){ MSSimpleQosAnalyzer *obj=(MSSimpleQosAnalyzer*)objbase; rtpstats_t *cur=&obj->stats[obj->curindex % STATS_HISTORY]; rtpstats_t *prev=&obj->stats[(STATS_HISTORY+obj->curindex-1) % STATS_HISTORY]; if (prev->lost_percentage>=unacceptable_loss_rate){ if (cur->lost_percentage<prev->lost_percentage){ ms_message("MSSimpleQosAnalyzer: lost percentage has improved"); return TRUE; }else goto end; } if (obj->rt_prop_doubled && cur->rt_prop<prev->rt_prop){ ms_message("MSSimpleQosAnalyzer: rt prop decreased"); obj->rt_prop_doubled=FALSE; return TRUE; } end: ms_message("MSSimpleQosAnalyzer: no improvements."); return FALSE; } static MSQosAnalyzerDesc simple_analyzer_desc={ simple_analyzer_process_rtcp, simple_analyzer_suggest_action, simple_analyzer_has_improved, NULL, NULL }; MSQosAnalyzer * ms_simple_qos_analyzer_new(RtpSession *session){ MSSimpleQosAnalyzer *obj=ms_new0(MSSimpleQosAnalyzer,1); obj->session=session; obj->parent.desc=&simple_analyzer_desc; obj->parent.type=MSQosAnalyzerAlgorithmSimple; obj->parent.lre=ortp_loss_rate_estimator_new(LOSS_RATE_MIN_INTERVAL, LOSS_RATE_MIN_TIME, session); return (MSQosAnalyzer*)obj; } /******************************************************************************/ /***************************** Stateful QoS analyzer **************************/ /******************************************************************************/ static int earlier_than(rtcpstatspoint_t *p, const time_t * now){ if (p->timestamp < *now){ ms_free(p); return FALSE; } return TRUE; } static int sort_by_bandwidth(const rtcpstatspoint_t *p1, const rtcpstatspoint_t *p2){ return p1->bandwidth > p2->bandwidth; } static float stateful_qos_analyzer_upload_bandwidth(MSStatefulQosAnalyzer *obj, uint32_t seq_num){ int latest_bw; float bw_per_seqnum=0.f; float bw_per_avg=0.f; /*First method to compute bandwidth*/ if (obj->upload_bandwidth_count){ bw_per_avg=(float)(obj->upload_bandwidth_sum/obj->upload_bandwidth_count); } obj->upload_bandwidth_count=0; obj->upload_bandwidth_sum=0; for (latest_bw=0;latest_bw<BW_HISTORY;++latest_bw){ ms_debug("MSStatefulQosAnalyzer[%p]:\t%u\t-->\t%f", obj, obj->upload_bandwidth[latest_bw].seq_number, obj->upload_bandwidth[latest_bw].up_bandwidth); } if (obj->upload_bandwidth[(obj->upload_bandwidth_cur+1)%BW_HISTORY].seq_number>seq_num){ ms_warning("MSStatefulQosAnalyzer[%p]: saved to much points - seq_number lower " "than oldest measure! Increase BW_HISTORY or reduce ptime!", obj); }else{ int count = 0; latest_bw=obj->upload_bandwidth_cur; /*Get the average of all measures with seq number lower than the one from the report*/ for (latest_bw=0; latest_bw<BW_HISTORY; ++latest_bw){ if (obj->upload_bandwidth[latest_bw].seq_number>0 && obj->upload_bandwidth[latest_bw].seq_number<seq_num){ count++; bw_per_seqnum+=obj->upload_bandwidth[latest_bw].up_bandwidth; } } // invalid, no measures available if (count==0){ ms_error("MSStatefulQosAnalyzer[%p]: no measures available to compute bandwidth for ext_seq=%u", obj, seq_num); bw_per_seqnum = rtp_session_get_send_bandwidth(obj->session)/1000.0f; }else{ bw_per_seqnum /= count;//((BW_HISTORY + obj->upload_bandwidth_cur - latest_bw) % BW_HISTORY); ms_debug("MSStatefulQosAnalyzer[%p]: found average bandwidth for seq_num=%u", obj, seq_num); } } ms_message("MSStatefulQosAnalyzer[%p]: bw_curent=%f vs bw_per_avg=%f vs bw_per_seqnum=%f" , obj , rtp_session_get_send_bandwidth(obj->session)/1000.0 , bw_per_avg , bw_per_seqnum); obj->upload_bandwidth_latest = bw_per_seqnum; return (float)obj->upload_bandwidth_latest; } static bool_t stateful_analyzer_process_rtcp(MSQosAnalyzer *objbase, mblk_t *rtcp){ MSStatefulQosAnalyzer *obj=(MSStatefulQosAnalyzer*)objbase; const report_block_t *rb=NULL; if (rtcp_is_SR(rtcp)){ rb=rtcp_SR_get_report_block(rtcp,0); }else if (rtcp_is_RR(rtcp)){ rb=rtcp_RR_get_report_block(rtcp,0); } if (rb && report_block_get_ssrc(rb)==rtp_session_get_send_ssrc(obj->session)){ if (ortp_loss_rate_estimator_process_report_block(objbase->lre,obj->session,rb)){ int i; float loss_rate = ortp_loss_rate_estimator_get_value(objbase->lre); float up_bw = stateful_qos_analyzer_upload_bandwidth(obj,report_block_get_high_ext_seq(rb)); obj->curindex++; /*flush bandwidth estimation measures for seq number lower than remote report block received*/ for (i=0;i<BW_HISTORY;i++){ if (obj->upload_bandwidth[i].seq_number<report_block_get_high_ext_seq(rb)){ obj->upload_bandwidth[i].seq_number=0; obj->upload_bandwidth[i].up_bandwidth=0.f; } } /* Always skip the first report, since values might be erroneous due to initialization of multiples objects (encoder/decoder/stats computing..) Instead assume loss rate is a good estimation of network capacity */ if (obj->curindex==1) { obj->network_loss_rate=loss_rate; return TRUE; } obj->latest=ms_new0(rtcpstatspoint_t, 1); obj->latest->timestamp=ms_time(0); obj->latest->bandwidth=up_bw; obj->latest->loss_percent=loss_rate; obj->latest->rtt=rtp_session_get_round_trip_propagation(obj->session); obj->rtcpstatspoint=bctbx_list_insert_sorted(obj->rtcpstatspoint, obj->latest, (bctbx_compare_func)sort_by_bandwidth); /*if the measure was 0% loss, reset to 0% every measures below it*/ if (obj->latest->loss_percent < 1e-5){ bctbx_list_t *it=obj->rtcpstatspoint; bctbx_list_t *latest_pos=bctbx_list_find(obj->rtcpstatspoint,obj->latest); while (it!=latest_pos->next){ ((rtcpstatspoint_t *)it->data)->loss_percent=0.f; it = it->next; } } ms_message("MSStatefulQosAnalyzer[%p]: one more %d: %f %f", obj, obj->curindex-1, obj->latest->bandwidth, obj->latest->loss_percent); if (bctbx_list_size(obj->rtcpstatspoint) > ESTIM_HISTORY){ size_t prev_size = bctbx_list_size(obj->rtcpstatspoint); /*clean everything which occurred 60 sec or more ago*/ time_t clear_time = ms_time(0) - 60; obj->rtcpstatspoint = bctbx_list_remove_custom(obj->rtcpstatspoint, (bctbx_compare_func)earlier_than, &clear_time); ms_message("MSStatefulQosAnalyzer[%p]: reached list maximum capacity " "(count=%u) --> Cleaned list (count=%u)", obj, (unsigned int)prev_size, (unsigned int)bctbx_list_size(obj->rtcpstatspoint)); } return TRUE; } } return FALSE; } static double lerp(double inf, double sup, double v){ return inf + (sup - inf) * v; } static bctbx_list_t *find_first_with_loss(bctbx_list_t *list){ for(;list!=NULL;list=list->next){ if (((rtcpstatspoint_t *)list->data)->loss_percent > 1e-5){ return list; } } return NULL; } static void smooth_values(MSStatefulQosAnalyzer *obj){ bctbx_list_t *first_loss = find_first_with_loss(obj->rtcpstatspoint); bctbx_list_t *it = obj->rtcpstatspoint; rtcpstatspoint_t *curr = (rtcpstatspoint_t *)it->data; double prev_loss = 0.; if (first_loss == obj->rtcpstatspoint){ prev_loss = curr->loss_percent; curr->loss_percent = lerp(curr->loss_percent, ((rtcpstatspoint_t *)it->next->data)->loss_percent, .25); it = it->next; }else{ it = first_loss; } /*nothing to smooth*/ if (it == NULL){ return; } curr = (rtcpstatspoint_t *)it->data; while (it->next != NULL){ rtcpstatspoint_t *prev = ((rtcpstatspoint_t *)it->prev->data); rtcpstatspoint_t *next = ((rtcpstatspoint_t *)it->next->data); double v = ((curr->bandwidth - prev->bandwidth) / (next->bandwidth - prev->bandwidth)); double new_loss = lerp(prev_loss, next->loss_percent, v); prev_loss = curr->loss_percent; curr->loss_percent = (curr->loss_percent + new_loss) / 2.; it = it->next; curr = (rtcpstatspoint_t *)it->data; } curr->loss_percent = lerp(prev_loss, curr->loss_percent, .75); } static double compute_available_bw(MSStatefulQosAnalyzer *obj){ bctbx_list_t *it; double constant_network_loss = 0.; double mean_bw = 0.; bctbx_list_t *current = obj->rtcpstatspoint; bctbx_list_t *last = current; size_t size = bctbx_list_size(obj->rtcpstatspoint); if (current == NULL){ ms_message("MSStatefulQosAnalyzer[%p]: no points available for estimation", obj); return -1; } while (last->next){ last = last->next; } if (size > 3){ smooth_values(obj); } /*suppose that first point is a reliable estimation of the constant network loss rate*/ constant_network_loss = ((rtcpstatspoint_t *)obj->rtcpstatspoint->data)->loss_percent; ms_message("MSStatefulQosAnalyzer[%p]:\tconstant_network_loss=%f", obj, constant_network_loss); #ifdef DEBUG for (it = obj->rtcpstatspoint; it != NULL; it=it->next){ rtcpstatspoint_t * point = (rtcpstatspoint_t *)it->data; (void)point; ms_message("MSStatefulQosAnalyzer[%p]:\t\tsorted values %d: %f %f", obj, bctbx_list_position(obj->rtcpstatspoint, it), point->bandwidth, point->loss_percent); } #endif if (size == 1){ rtcpstatspoint_t *p = (rtcpstatspoint_t *)current->data; ms_message("MSStatefulQosAnalyzer[%p]: one single point", obj); mean_bw = p->bandwidth * ((p->loss_percent>1e-5) ? (100-p->loss_percent)/100.f:2); }else{ while (current!=NULL && ((rtcpstatspoint_t*)current->data)->loss_percent<3+constant_network_loss){ ms_message("MSStatefulQosAnalyzer[%p]:\t%d is stable", obj, bctbx_list_position(obj->rtcpstatspoint, current)); /*find the last stable measure point, starting from highest bandwidth*/ for (it=last;it!=current;it=it->prev){ if (((rtcpstatspoint_t *)it->data)->loss_percent <= 3 + ((rtcpstatspoint_t*)current->data)->loss_percent){ ms_message("MSStatefulQosAnalyzer[%p]:\t%d is less than %d", obj, bctbx_list_position(obj->rtcpstatspoint, it), bctbx_list_position(obj->rtcpstatspoint, current)); current = it; break; } } /*current is the first unstable point, so taking the next one*/ current = current->next; } /*all points are below the constant loss rate threshold: there might be bad network conditions but no congestion*/ if (current == NULL){ mean_bw = 2 * ((rtcpstatspoint_t*)last->data)->bandwidth; /*only first packet is stable*/ }else if (current->prev == obj->rtcpstatspoint){ rtcpstatspoint_t *p = (rtcpstatspoint_t *)current->prev->data; mean_bw = p->bandwidth * (100 - p->loss_percent) / 100.; /*otherwise, there is a congestion detected starting at "current"*/ }else{ rtcpstatspoint_t *laststable = (rtcpstatspoint_t*)current->prev->data; rtcpstatspoint_t *firstunstable = (rtcpstatspoint_t*)current->data; mean_bw = .5*(laststable->bandwidth+firstunstable->bandwidth); } ms_message("MSStatefulQosAnalyzer[%p]: [0->%d] last stable is %d(%f;%f)" , obj , bctbx_list_position(obj->rtcpstatspoint, last) , bctbx_list_position(obj->rtcpstatspoint, (current ? current->prev : last)) , ((rtcpstatspoint_t*) (current ? current->prev->data : last->data))->bandwidth , ((rtcpstatspoint_t*) (current ? current->prev->data : last->data))->loss_percent); if (current!=NULL){ ms_message("MSStatefulQosAnalyzer[%p]: , first unstable is %d(%f;%f)" , obj , bctbx_list_position(obj->rtcpstatspoint, current) , ((rtcpstatspoint_t*) current->data)->bandwidth , ((rtcpstatspoint_t*) current->data)->loss_percent); } } ms_message("MSStatefulQosAnalyzer[%p]: --> estimated_available_bw=%f", obj, mean_bw); obj->network_loss_rate = constant_network_loss; obj->congestion_bandwidth = mean_bw; return mean_bw; } static void stateful_analyzer_suggest_action(MSQosAnalyzer *objbase, MSRateControlAction *action){ MSStatefulQosAnalyzer *obj=(MSStatefulQosAnalyzer*)objbase; double curbw = 0; double bw = 0; rtcpstatspoint_t* greatest_pt = NULL; /*if this is the first measure, there is not enough reliable data to use; we assume loss rate is due to non congestionned network. This is mainly useful in the case loss rate is high (>30%), to reduce quality even before the second RTCP report which can be really used. */ if (obj->curindex==1){ if (obj->network_loss_rate!=0.f){ action->type=MSRateControlActionDecreaseBitrate; action->value=(int)obj->network_loss_rate; } }else { curbw = obj->latest ? obj->latest->bandwidth : 0.; bw = compute_available_bw(obj); greatest_pt = bctbx_list_size(obj->rtcpstatspoint) ? (rtcpstatspoint_t*)bctbx_list_nth_data(obj->rtcpstatspoint, (int)bctbx_list_size(obj->rtcpstatspoint)-1) : NULL; /*try a burst every 50 seconds (10 RTCP packets)*/ if (obj->curindex % 10 == 6){ ms_message("MSStatefulQosAnalyzer[%p]: try burst!", obj); obj->burst_state = MSStatefulQosAnalyzerBurstEnable; } /*test a min burst to avoid overestimation of available bandwidth but only if there is some loss*/ else if (greatest_pt!=NULL && greatest_pt->loss_percent>1 && (obj->curindex % 10 == 2 || obj->curindex % 10 == 3)){ ms_message("MSStatefulQosAnalyzer[%p]: try minimal burst!", obj); bw *= .33; } /*no bandwidth estimation computed*/ if (bw <= 0 || curbw <= 0){ action->type=MSRateControlActionDoNothing; action->value=0; }else if (bw > curbw){ action->type=MSRateControlActionIncreaseQuality; action->value=MAX(0, (int)(100 * (bw / curbw - 1))); }else{ action->type=MSRateControlActionDecreaseBitrate; action->value=MAX(10, (int)(-100 * (bw / curbw - 1))); } } ms_message("MSStatefulQosAnalyzer[%p]: %s of value %d", obj, ms_rate_control_action_type_name(action->type), action->value); if (objbase->on_action_suggested!=NULL){ int i; char *data[4]; int datac = sizeof(data) / sizeof(data[0]); data[0]=ms_strdup("%loss rtt_ms cur_bw"); data[1]=ms_strdup_printf("%d %d %d" , obj->latest?(int)obj->latest->loss_percent:0 , obj->latest?(int)obj->latest->rtt:0 , obj->latest?(int)obj->latest->bandwidth:0 ); data[2]=ms_strdup("action_type action_value est_bw"); data[3]=ms_strdup_printf("%s %d %d" , ms_rate_control_action_type_name(action->type) , action->value , (int)bw ); objbase->on_action_suggested(objbase->on_action_suggested_user_pointer, datac, (const char**)data); for (i=0;i<datac;++i){ ms_free(data[i]); } } } static bool_t stateful_analyzer_has_improved(MSQosAnalyzer *objbase){ /*never tell the controller that situation has improved to avoid 'Stable' state which is not necessary for this analyzer*/ return FALSE; } static void stateful_analyzer_update(MSQosAnalyzer *objbase){ MSStatefulQosAnalyzer *obj=(MSStatefulQosAnalyzer*)objbase; static time_t last_measure; /* Every seconds, save the bandwidth used. This is needed to know how much bandwidth was used when receiving a receiver report. Since the report contains the "last sequence number", it allows us to precisely know which interval to consider */ if (last_measure != ms_time(0)){ obj->upload_bandwidth_count++; obj->upload_bandwidth_sum+=rtp_session_get_send_bandwidth(obj->session)/1000.0; /* Save bandwidth used at this time */ obj->upload_bandwidth[obj->upload_bandwidth_cur].seq_number = rtp_session_get_seq_number(obj->session); obj->upload_bandwidth[obj->upload_bandwidth_cur].up_bandwidth = rtp_session_get_send_bandwidth(obj->session)/1000.0f; obj->upload_bandwidth_cur = (obj->upload_bandwidth_cur+1)%BW_HISTORY; } last_measure = ms_time(0); if (obj->burst_duration_ms>0){ switch (obj->burst_state){ case MSStatefulQosAnalyzerBurstEnable:{ obj->burst_state=MSStatefulQosAnalyzerBurstInProgress; ortp_gettimeofday(&obj->start_time, NULL); rtp_session_set_duplication_ratio(obj->session, (float)obj->burst_ratio); BCTBX_NO_BREAK; /*intentionally no break*/ } case MSStatefulQosAnalyzerBurstInProgress: { struct timeval now; float elapsed; ortp_gettimeofday(&now,NULL); elapsed=((now.tv_sec-obj->start_time.tv_sec)*1000.0f) + ((now.tv_usec-obj->start_time.tv_usec)/1000.0f); if (elapsed > obj->burst_duration_ms){ obj->burst_state=MSStatefulQosAnalyzerBurstDisable; rtp_session_set_duplication_ratio(obj->session, 0); } BCTBX_NO_BREAK; /*intentionally no break*/ } case MSStatefulQosAnalyzerBurstDisable: { } } } } static void stateful_analyzer_uninit(MSQosAnalyzer *objbase){ MSStatefulQosAnalyzer *obj=(MSStatefulQosAnalyzer*)objbase; bctbx_list_for_each(obj->rtcpstatspoint, ms_free); bctbx_list_free(obj->rtcpstatspoint); } static MSQosAnalyzerDesc stateful_analyzer_desc={ stateful_analyzer_process_rtcp, stateful_analyzer_suggest_action, stateful_analyzer_has_improved, stateful_analyzer_update, stateful_analyzer_uninit, }; MSQosAnalyzer * ms_stateful_qos_analyzer_new(RtpSession *session){ MSStatefulQosAnalyzer *obj=ms_new0(MSStatefulQosAnalyzer,1); obj->session=session; obj->parent.desc=&stateful_analyzer_desc; obj->parent.type=MSQosAnalyzerAlgorithmStateful; obj->parent.lre=ortp_loss_rate_estimator_new(LOSS_RATE_MIN_INTERVAL, LOSS_RATE_MIN_TIME, session); /*burst period will float the upload bandwidth assuming 5 sec RTCP reports interval*/ obj->burst_duration_ms=1000; obj->burst_ratio=9; return (MSQosAnalyzer*)obj; }
utf-8
1
GPL-2+
2006-2010 Belledonne Communications, Grenoble, France
openms-2.6.0+cleaned1/src/openms/source/TRANSFORMATIONS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.cpp
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2020. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg$ // $Authors: Stephan Aiche $ // -------------------------------------------------------------------------- #include <OpenMS/KERNEL/Peak1D.h> #include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.h> namespace OpenMS { FeatureFinderAlgorithmPickedHelperStructs::IsotopePattern::IsotopePattern(Size size) : peak(size, -1), spectrum(size), intensity(size), mz_score(size), theoretical_mz(size) { } Size FeatureFinderAlgorithmPickedHelperStructs::TheoreticalIsotopePattern::size() const { return intensity.size(); } bool FeatureFinderAlgorithmPickedHelperStructs::Seed::operator<(const Seed& rhs) const { return intensity < rhs.intensity; } ConvexHull2D FeatureFinderAlgorithmPickedHelperStructs::MassTrace::getConvexhull() const { ConvexHull2D::PointArrayType hull_points(peaks.size()); for (Size i = 0; i < peaks.size(); ++i) { hull_points[i][0] = peaks[i].first; hull_points[i][1] = peaks[i].second->getMZ(); } ConvexHull2D hull; hull.addPoints(hull_points); return hull; } void FeatureFinderAlgorithmPickedHelperStructs::MassTrace::updateMaximum() { if (peaks.empty()) return; max_rt = peaks.begin()->first; max_peak = peaks.begin()->second; for (Size i = 1; i < peaks.size(); ++i) { if (peaks[i].second->getIntensity() > max_peak->getIntensity()) { max_rt = peaks[i].first; max_peak = peaks[i].second; } } } double FeatureFinderAlgorithmPickedHelperStructs::MassTrace::getAvgMZ() const { double sum = 0.0; double intensities = 0.0; for (Size i = 0; i < peaks.size(); ++i) { sum += peaks[i].second->getMZ() * peaks[i].second->getIntensity(); intensities += peaks[i].second->getIntensity(); } return sum / intensities; } bool FeatureFinderAlgorithmPickedHelperStructs::MassTrace::isValid() const { return peaks.size() >= 3; } FeatureFinderAlgorithmPickedHelperStructs::MassTraces::MassTraces() : max_trace(0) { } Size FeatureFinderAlgorithmPickedHelperStructs::MassTraces::getPeakCount() const { Size sum = 0; for (Size i = 0; i < this->size(); ++i) { sum += this->at(i).peaks.size(); } return sum; } bool FeatureFinderAlgorithmPickedHelperStructs::MassTraces::isValid(double seed_mz, double trace_tolerance) { //Abort if too few traces were found if (this->size() < 2) return false; //Abort if the seed was removed for (Size j = 0; j < this->size(); ++j) { if (std::fabs(seed_mz - this->at(j).getAvgMZ()) <= trace_tolerance) { return true; } } return false; } Size FeatureFinderAlgorithmPickedHelperStructs::MassTraces::getTheoreticalmaxPosition() const { if (!this->size()) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "There must be at least one trace to determine the theoretical maximum trace!"); } Size max = 0; double max_int = this->at(0).theoretical_int; for (Size i = 1; i < this->size(); ++i) { if (this->at(i).theoretical_int > max_int) { max_int = this->at(i).theoretical_int; max = i; } } return max; } void FeatureFinderAlgorithmPickedHelperStructs::MassTraces::updateBaseline() { if (this->size() == 0) { baseline = 0.0; return; } bool first = true; for (Size i = 0; i < this->size(); ++i) { for (Size j = 0; j < this->at(i).peaks.size(); ++j) { if (first) { baseline = this->at(i).peaks[j].second->getIntensity(); first = false; } if (this->at(i).peaks[j].second->getIntensity() < baseline) { baseline = this->at(i).peaks[j].second->getIntensity(); } } } } std::pair<double, double> FeatureFinderAlgorithmPickedHelperStructs::MassTraces::getRTBounds() const { if (!this->size()) { throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "There must be at least one trace to determine the RT boundaries!"); } double min = std::numeric_limits<double>::max(); double max = -std::numeric_limits<double>::max(); //Abort if the seed was removed for (Size i = 0; i < this->size(); ++i) { for (Size j = 0; j < this->at(i).peaks.size(); ++j) { double rt = this->at(i).peaks[j].first; if (rt > max) max = rt; if (rt < min) min = rt; } } return std::make_pair(min, max); } void FeatureFinderAlgorithmPickedHelperStructs::MassTraces::computeIntensityProfile(std::list<std::pair<double, double> >& intensity_profile) const { // typedefs for better readability typedef MassTraces::const_iterator TTraceIterator; typedef std::list<std::pair<double, double> >::iterator TProfileIterator; typedef std::vector<std::pair<double, const Peak1D*> > TMassTracePeakList; typedef TMassTracePeakList::const_iterator TTracePeakIterator; TTraceIterator trace_it = this->begin(); // we add the first trace without check, as the profile is currently empty for (TTracePeakIterator trace_peak_it = trace_it->peaks.begin(); trace_peak_it != trace_it->peaks.end(); ++trace_peak_it) { intensity_profile.push_back(std::make_pair(trace_peak_it->first, trace_peak_it->second->getIntensity())); } ++trace_it; // accumulate intensities over all the remaining mass traces for (; trace_it != this->end(); ++trace_it) { TProfileIterator profile_it = intensity_profile.begin(); TTracePeakIterator trace_peak_it = trace_it->peaks.begin(); while (trace_peak_it != trace_it->peaks.end()) { // append .. if profile has already ended if (profile_it == intensity_profile.end()) { intensity_profile.push_back(std::make_pair(trace_peak_it->first, trace_peak_it->second->getIntensity())); ++trace_peak_it; } // prepend else if (profile_it->first > trace_peak_it->first) { intensity_profile.insert(profile_it, std::make_pair(trace_peak_it->first, trace_peak_it->second->getIntensity())); ++trace_peak_it; } // proceed else if (profile_it->first < trace_peak_it->first) { ++profile_it; } // merge else if (profile_it->first == trace_peak_it->first) { profile_it->second += trace_peak_it->second->getIntensity(); ++trace_peak_it; ++profile_it; } } } } }
utf-8
1
BSD-3-clause
2002-2020 The OpenMS Team -- Eberhard Karls University Tuebingen 20002-2020 The OpenMS Team -- ETH Zurich 20002-2020 The OpenMS Team -- Freie Universitaet Berlin
qtwebengine-opensource-src-5.15.8+dfsg/src/3rdparty/chromium/third_party/ots/src/gasp.cc
// Copyright (c) 2009-2017 The OTS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "gasp.h" // gasp - Grid-fitting And Scan-conversion Procedure // http://www.microsoft.com/typography/otspec/gasp.htm namespace ots { bool OpenTypeGASP::Parse(const uint8_t *data, size_t length) { Buffer table(data, length); uint16_t num_ranges = 0; if (!table.ReadU16(&this->version) || !table.ReadU16(&num_ranges)) { return Error("Failed to read table header"); } if (this->version > 1) { // Lots of Linux fonts have bad version numbers... return Drop("Unsupported version: %u", this->version); } if (num_ranges == 0) { return Drop("numRanges is zero"); } this->gasp_ranges.reserve(num_ranges); for (unsigned i = 0; i < num_ranges; ++i) { uint16_t max_ppem = 0; uint16_t behavior = 0; if (!table.ReadU16(&max_ppem) || !table.ReadU16(&behavior)) { return Error("Failed to read GASPRANGE %d", i); } if ((i > 0) && (this->gasp_ranges[i - 1].first >= max_ppem)) { // The records in the gaspRange[] array must be sorted in order of // increasing rangeMaxPPEM value. return Drop("Ranges are not sorted"); } if ((i == num_ranges - 1u) && // never underflow. (max_ppem != 0xffffu)) { return Drop("The last record should be 0xFFFF as a sentinel value " "for rangeMaxPPEM"); } if (behavior >> 8) { Warning("Undefined bits are used: %x", behavior); // mask undefined bits. behavior &= 0x000fu; } if (this->version == 0 && (behavior >> 2) != 0) { Warning("Changed the version number to 1"); this->version = 1; } this->gasp_ranges.push_back(std::make_pair(max_ppem, behavior)); } return true; } bool OpenTypeGASP::Serialize(OTSStream *out) { const uint16_t num_ranges = static_cast<uint16_t>(this->gasp_ranges.size()); if (num_ranges != this->gasp_ranges.size() || !out->WriteU16(this->version) || !out->WriteU16(num_ranges)) { return Error("Failed to write table header"); } for (uint16_t i = 0; i < num_ranges; ++i) { if (!out->WriteU16(this->gasp_ranges[i].first) || !out->WriteU16(this->gasp_ranges[i].second)) { return Error("Failed to write GASPRANGE %d", i); } } return true; } } // namespace ots
utf-8
1
LGPL-3 or GPL-2
2006-2021 The Chromium Authors 2016-2021 The Qt Company Ltd.
cegui-mk2-0.8.7/samples/InventoryDemo/InventoryItem.cpp
/*********************************************************************** created: Fri Apr 22 2011 author: Paul D Turner <paul@cegui.org.uk> *************************************************************************/ /*************************************************************************** * Copyright (C) 2004 - 2011 Paul D Turner & The CEGUI Development Team * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "InventoryItem.h" #include "InventoryReceiver.h" #include <CEGUI/PropertyHelper.h> #include <CEGUI/Image.h> // Start of CEGUI namespace section namespace CEGUI { //------------------------------------------------------------------------------// const String InventoryItem::WidgetTypeName("InventoryItem"); const String InventoryItem::EventNamespace("InventoryItem"); //------------------------------------------------------------------------------// InventoryItem::InventoryItem(const String& type, const String& name) : DragContainer(type, name), d_validDropTarget(false), d_receiverLocationX(-1), d_receiverLocationY(-1) { } //------------------------------------------------------------------------------// void InventoryItem::setContentSize(int width, int height) { InventoryBase::setContentSize(width, height); d_content.clear(true); } //------------------------------------------------------------------------------// bool InventoryItem::isSolidAtLocation(int x, int y) const { return d_content.elementAtLocation(x, y); } //------------------------------------------------------------------------------// void InventoryItem::setItemLayout(const bool* layout) { for (int y = 0; y < d_content.height(); ++y) for (int x = 0; x < d_content.width(); ++x) d_content.setElementAtLocation(x, y, *layout++); } //------------------------------------------------------------------------------// int InventoryItem::locationOnReceiverX() const { return d_receiverLocationX; } //------------------------------------------------------------------------------// int InventoryItem::locationOnReceiverY() const { return d_receiverLocationY; } //------------------------------------------------------------------------------// void InventoryItem::setLocationOnReceiver(int x, int y) { d_receiverLocationX = x; d_receiverLocationY = y; } //------------------------------------------------------------------------------// bool InventoryItem::isHit(const Vector2f& position, const bool allow_disabled) const { if (!DragContainer::isHit(position, allow_disabled)) return false; int gx = gridXLocationFromPixelPosition(position.d_x); int gy = gridYLocationFromPixelPosition(position.d_y); if (gx < 0 || gx >= d_content.width() || gy < 0 || gy >= d_content.height()) return false; return d_content.elementAtLocation(gx, gy); } //------------------------------------------------------------------------------// bool InventoryItem::currentDropTargetIsValid() const { return d_validDropTarget; } //------------------------------------------------------------------------------// void InventoryItem::populateGeometryBuffer() { if (!isUserStringDefined("BlockImage")) return; const Image* img = PropertyHelper<Image*>::fromString(getUserString("BlockImage")); if (!img) return; const Sizef square_size(squarePixelSize()); argb_t colour = 0xFF00FF00; if (d_dragging && !currentDropTargetIsValid()) colour = 0xFFFF0000; for (int y = 0; y < d_content.height(); ++y) { for (int x = 0; x < d_content.width(); ++x) { if (d_content.elementAtLocation(x, y)) img->render(*d_geometry, Vector2f(x * square_size.d_width + 1, y * square_size.d_height + 1), Sizef(square_size.d_width - 2, square_size.d_height - 2), 0, ColourRect(colour)); } } } //------------------------------------------------------------------------------// Rectf InventoryItem::gridBasePixelRect() const { return getUnclippedOuterRect().get(); } //------------------------------------------------------------------------------// void InventoryItem::onMoved(ElementEventArgs& e) { invalidate(); DragContainer::onMoved(e); InventoryReceiver* receiver = dynamic_cast<InventoryReceiver*>(d_dropTarget); if (receiver) { const Sizef square_size(receiver->squarePixelSize()); Rectf area(getUnclippedOuterRect().get()); area.offset(Vector2f(square_size.d_width / 2, square_size.d_height / 2)); const int x = receiver->gridXLocationFromPixelPosition(area.left()); const int y = receiver->gridYLocationFromPixelPosition(area.top()); d_validDropTarget = receiver->itemWillFitAtLocation(*this, x, y); return; } d_validDropTarget = false; } //------------------------------------------------------------------------------// void InventoryItem::onDragDropTargetChanged(DragDropEventArgs& e) { DragContainer::onDragDropTargetChanged(e); d_validDropTarget = (dynamic_cast<InventoryReceiver*>(d_dropTarget) != 0); invalidate(); } //------------------------------------------------------------------------------// } // End of CEGUI namespace section
utf-8
1
MIT
2004 - 2016 Paul D Turner & The CEGUI Development Team
opencv-4.5.4+dfsg/contrib/modules/ximgproc/samples/slic.cpp
#include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/core/utility.hpp> #include <opencv2/ximgproc.hpp> #include <ctype.h> #include <stdio.h> #include <iostream> using namespace cv; using namespace cv::ximgproc; using namespace std; static const char* window_name = "SLIC Superpixels"; static const char* keys = "{h help | | help menu}" "{c camera |0| camera id}" "{i image | | image file}" "{a algorithm |1| SLIC(0),SLICO(1),MSLIC(2)}" ; int main(int argc, char** argv) { CommandLineParser cmd(argc,argv,keys); if (cmd.has("help")) { cmd.about("This program demonstrates SLIC superpixels using OpenCV class SuperpixelSLIC.\n" "If no image file is supplied, try to open a webcam.\n" "Use [space] to toggle output mode, ['q' or 'Q' or 'esc'] to exit.\n"); cmd.printMessage(); return 0; } int capture = cmd.get<int>("camera"); String img_file = cmd.get<String>("image"); int algorithm = cmd.get<int>("algorithm"); int region_size = 50; int ruler = 30; int min_element_size = 50; int num_iterations = 3; bool use_video_capture = img_file.empty(); VideoCapture cap; Mat input_image; if( use_video_capture ) { if( !cap.open(capture) ) { cout << "Could not initialize capturing..."<<capture<<"\n"; return -1; } } else { input_image = imread(img_file); if( input_image.empty() ) { cout << "Could not open image..."<<img_file<<"\n"; return -1; } } namedWindow(window_name, 0); createTrackbar("Algorithm", window_name, &algorithm, 2, 0); createTrackbar("Region size", window_name, &region_size, 200, 0); createTrackbar("Ruler", window_name, &ruler, 100, 0); createTrackbar("Connectivity", window_name, &min_element_size, 100, 0); createTrackbar("Iterations", window_name, &num_iterations, 12, 0); Mat result, mask; int display_mode = 0; for (;;) { Mat frame; if( use_video_capture ) cap >> frame; else input_image.copyTo(frame); if( frame.empty() ) break; result = frame; Mat converted; cvtColor(frame, converted, COLOR_BGR2HSV); double t = (double) getTickCount(); Ptr<SuperpixelSLIC> slic = createSuperpixelSLIC(converted,algorithm+SLIC,region_size,float(ruler)); slic->iterate(num_iterations); if (min_element_size>0) slic->enforceLabelConnectivity(min_element_size); t = ((double) getTickCount() - t) / getTickFrequency(); cout << "SLIC" << (algorithm?'O':' ') << " segmentation took " << (int) (t * 1000) << " ms with " << slic->getNumberOfSuperpixels() << " superpixels" << endl; // get the contours for displaying slic->getLabelContourMask(mask, true); result.setTo(Scalar(0, 0, 255), mask); // display output switch (display_mode) { case 0: //superpixel contours imshow(window_name, result); break; case 1: //mask imshow(window_name, mask); break; case 2: //labels array { // use the last x bit to determine the color. Note that this does not // guarantee that 2 neighboring superpixels have different colors. // retrieve the segmentation result Mat labels; slic->getLabels(labels); const int num_label_bits = 2; labels &= (1 << num_label_bits) - 1; labels *= 1 << (16 - num_label_bits); imshow(window_name, labels); break; } } int c = waitKey(1) & 0xff; if( c == 'q' || c == 'Q' || c == 27 ) break; else if( c == ' ' ) display_mode = (display_mode + 1) % 3; } return 0; }
utf-8
1
Apache-2.0 AND BSD-3-Clause
2000-2020, Intel Corporation, all rights reserved. 2009-2011, Willow Garage Inc., all rights reserved. 2009-2016, NVIDIA Corporation, all rights reserved. 2010-2013, Advanced Micro Devices, Inc., all rights reserved. 2015-2020, OpenCV Foundation, all rights reserved. 2008-2016, Itseez Inc., all rights reserved. 2019-2020, Xperience AI, all rights reserved. 2019-2020, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. Respective opencv contributors.
qgis-3.16.16+dfsg/src/core/layertree/qgslayertreemodel.cpp
/*************************************************************************** qgslayertreemodel.cpp -------------------------------------- Date : May 2014 Copyright : (C) 2014 by Martin Dobias Email : wonder dot sk at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include <QMimeData> #include <QTextStream> #include "qgslayertreemodel.h" #include "qgsapplication.h" #include "qgslayertree.h" #include "qgslayertreeutils.h" #include "qgslayertreemodellegendnode.h" #include "qgsproject.h" #include "qgsdataitem.h" #include "qgsmaphittest.h" #include "qgsmaplayer.h" #include "qgsmaplayerlegend.h" #include "qgsmaplayerstylemanager.h" #include "qgspluginlayer.h" #include "qgsrasterlayer.h" #include "qgsrenderer.h" #include "qgssymbollayerutils.h" #include "qgsvectorlayer.h" #include "qgslayerdefinition.h" QgsLayerTreeModel::QgsLayerTreeModel( QgsLayerTree *rootNode, QObject *parent ) : QAbstractItemModel( parent ) , mRootNode( rootNode ) , mFlags( ShowLegend | AllowLegendChangeState | DeferredLegendInvalidation ) , mAutoCollapseLegendNodesCount( -1 ) , mLegendFilterByScale( 0 ) , mLegendFilterUsesExtent( false ) , mLegendMapViewMupp( 0 ) , mLegendMapViewDpi( 0 ) , mLegendMapViewScale( 0 ) { connectToRootNode(); mFontLayer.setBold( true ); connect( &mDeferLegendInvalidationTimer, &QTimer::timeout, this, &QgsLayerTreeModel::invalidateLegendMapBasedData ); mDeferLegendInvalidationTimer.setSingleShot( true ); } QgsLayerTreeModel::~QgsLayerTreeModel() { legendCleanup(); } QgsLayerTreeNode *QgsLayerTreeModel::index2node( const QModelIndex &index ) const { if ( !index.isValid() ) return mRootNode; QObject *obj = reinterpret_cast<QObject *>( index.internalPointer() ); return qobject_cast<QgsLayerTreeNode *>( obj ); } int QgsLayerTreeModel::rowCount( const QModelIndex &parent ) const { if ( QgsLayerTreeModelLegendNode *nodeLegend = index2legendNode( parent ) ) return legendNodeRowCount( nodeLegend ); QgsLayerTreeNode *n = index2node( parent ); if ( !n ) return 0; if ( QgsLayerTree::isLayer( n ) ) { if ( !testFlag( ShowLegend ) ) return 0; return legendRootRowCount( QgsLayerTree::toLayer( n ) ); } return n->children().count(); } int QgsLayerTreeModel::columnCount( const QModelIndex &parent ) const { Q_UNUSED( parent ) return 1; } QModelIndex QgsLayerTreeModel::index( int row, int column, const QModelIndex &parent ) const { if ( column < 0 || column >= columnCount( parent ) || row < 0 || row >= rowCount( parent ) ) return QModelIndex(); if ( QgsLayerTreeModelLegendNode *nodeLegend = index2legendNode( parent ) ) return legendNodeIndex( row, column, nodeLegend ); QgsLayerTreeNode *n = index2node( parent ); if ( !n ) return QModelIndex(); // have no children if ( testFlag( ShowLegend ) && QgsLayerTree::isLayer( n ) ) { return legendRootIndex( row, column, QgsLayerTree::toLayer( n ) ); } return createIndex( row, column, static_cast<QObject *>( n->children().at( row ) ) ); } QModelIndex QgsLayerTreeModel::parent( const QModelIndex &child ) const { if ( !child.isValid() ) return QModelIndex(); if ( QgsLayerTreeNode *n = index2node( child ) ) { return indexOfParentLayerTreeNode( n->parent() ); // must not be null } else if ( QgsLayerTreeModelLegendNode *legendNode = index2legendNode( child ) ) { return legendParent( legendNode ); } else { Q_ASSERT( false ); // no other node types! return QModelIndex(); } } QModelIndex QgsLayerTreeModel::indexOfParentLayerTreeNode( QgsLayerTreeNode *parentNode ) const { Q_ASSERT( parentNode ); QgsLayerTreeNode *grandParentNode = parentNode->parent(); if ( !grandParentNode ) return QModelIndex(); // root node -> invalid index int row = grandParentNode->children().indexOf( parentNode ); Q_ASSERT( row >= 0 ); return createIndex( row, 0, static_cast<QObject *>( parentNode ) ); } QVariant QgsLayerTreeModel::data( const QModelIndex &index, int role ) const { if ( !index.isValid() || index.column() > 1 ) return QVariant(); if ( QgsLayerTreeModelLegendNode *sym = index2legendNode( index ) ) return legendNodeData( sym, role ); QgsLayerTreeNode *node = index2node( index ); if ( role == Qt::DisplayRole || role == Qt::EditRole ) { if ( QgsLayerTree::isGroup( node ) ) return QgsLayerTree::toGroup( node )->name(); if ( QgsLayerTree::isLayer( node ) ) { QgsLayerTreeLayer *nodeLayer = QgsLayerTree::toLayer( node ); QString name = nodeLayer->name(); if ( nodeLayer->customProperty( QStringLiteral( "showFeatureCount" ), 0 ).toInt() && role == Qt::DisplayRole ) { QgsVectorLayer *vlayer = qobject_cast<QgsVectorLayer *>( nodeLayer->layer() ); if ( vlayer && vlayer->featureCount() >= 0 ) name += QStringLiteral( " [%1]" ).arg( vlayer->featureCount() ); } return name; } } else if ( role == Qt::DecorationRole && index.column() == 0 ) { if ( QgsLayerTree::isGroup( node ) ) return iconGroup(); if ( QgsLayerTree::isLayer( node ) ) { QgsLayerTreeLayer *nodeLayer = QgsLayerTree::toLayer( node ); QgsMapLayer *layer = nodeLayer->layer(); if ( !layer ) return QVariant(); // icons possibly overriding default icon switch ( layer->type() ) { case QgsMapLayerType::RasterLayer: return QgsLayerItem::iconRaster(); case QgsMapLayerType::MeshLayer: return QgsLayerItem::iconMesh(); case QgsMapLayerType::VectorTileLayer: return QgsLayerItem::iconVectorTile(); case QgsMapLayerType::VectorLayer: case QgsMapLayerType::PluginLayer: case QgsMapLayerType::AnnotationLayer: break; } QgsVectorLayer *vlayer = qobject_cast<QgsVectorLayer *>( layer ); QIcon icon; // if there's just on legend entry that should be embedded in layer - do that! if ( testFlag( ShowLegend ) && legendEmbeddedInParent( nodeLayer ) ) { icon = legendIconEmbeddedInParent( nodeLayer ); } else if ( vlayer && layer->type() == QgsMapLayerType::VectorLayer ) { if ( vlayer->geometryType() == QgsWkbTypes::PointGeometry ) icon = QgsLayerItem::iconPoint(); else if ( vlayer->geometryType() == QgsWkbTypes::LineGeometry ) icon = QgsLayerItem::iconLine(); else if ( vlayer->geometryType() == QgsWkbTypes::PolygonGeometry ) icon = QgsLayerItem::iconPolygon(); else if ( vlayer->geometryType() == QgsWkbTypes::NullGeometry ) icon = QgsLayerItem::iconTable(); else icon = QgsLayerItem::iconDefault(); } if ( vlayer && vlayer->isEditable() && testFlag( UseTextFormatting ) ) { const int iconSize = scaleIconSize( 16 ); QPixmap pixmap( icon.pixmap( iconSize, iconSize ) ); QPainter painter( &pixmap ); painter.drawPixmap( 0, 0, iconSize, iconSize, QgsApplication::getThemePixmap( vlayer->isModified() ? QStringLiteral( "/mIconEditableEdits.svg" ) : QStringLiteral( "/mActionToggleEditing.svg" ) ) ); painter.end(); icon = QIcon( pixmap ); } return icon; } } else if ( role == Qt::CheckStateRole ) { if ( !testFlag( AllowNodeChangeVisibility ) ) return QVariant(); if ( QgsLayerTree::isLayer( node ) ) { QgsLayerTreeLayer *nodeLayer = QgsLayerTree::toLayer( node ); if ( nodeLayer->layer() && !nodeLayer->layer()->isSpatial() ) return QVariant(); // do not show checkbox for non-spatial tables return nodeLayer->itemVisibilityChecked() ? Qt::Checked : Qt::Unchecked; } else if ( QgsLayerTree::isGroup( node ) ) { QgsLayerTreeGroup *nodeGroup = QgsLayerTree::toGroup( node ); return nodeGroup->itemVisibilityChecked() ? Qt::Checked : Qt::Unchecked; } } else if ( role == Qt::FontRole && testFlag( UseTextFormatting ) ) { QFont f( QgsLayerTree::isLayer( node ) ? mFontLayer : ( QgsLayerTree::isGroup( node ) ? mFontGroup : QFont() ) ); if ( index == mCurrentIndex ) f.setUnderline( true ); if ( QgsLayerTree::isLayer( node ) ) { const QgsMapLayer *layer = QgsLayerTree::toLayer( node )->layer(); if ( ( !node->isVisible() && ( !layer || layer->isSpatial() ) ) || ( layer && !layer->isInScaleRange( mLegendMapViewScale ) ) ) { f.setItalic( !f.italic() ); } } return f; } else if ( role == Qt::ForegroundRole && testFlag( UseTextFormatting ) ) { QBrush brush( qApp->palette().color( QPalette::Text ), Qt::SolidPattern ); if ( QgsLayerTree::isLayer( node ) ) { const QgsMapLayer *layer = QgsLayerTree::toLayer( node )->layer(); if ( ( !node->isVisible() && ( !layer || layer->isSpatial() ) ) || ( layer && !layer->isInScaleRange( mLegendMapViewScale ) ) ) { QColor fadedTextColor = brush.color(); fadedTextColor.setAlpha( 128 ); brush.setColor( fadedTextColor ); } } return brush; } else if ( role == Qt::ToolTipRole ) { if ( QgsLayerTree::isLayer( node ) ) { if ( QgsMapLayer *layer = QgsLayerTree::toLayer( node )->layer() ) { QString title = !layer->title().isEmpty() ? layer->title() : !layer->shortName().isEmpty() ? layer->shortName() : layer->name(); title = "<b>" + title.toHtmlEscaped() + "</b>"; if ( layer->isSpatial() && layer->crs().isValid() ) { if ( QgsVectorLayer *vl = qobject_cast<QgsVectorLayer *>( layer ) ) title += tr( " (%1 - %2)" ).arg( QgsWkbTypes::displayString( vl->wkbType() ), layer->crs().authid() ).toHtmlEscaped(); else title += tr( " (%1)" ).arg( layer->crs().authid() ).toHtmlEscaped(); } QStringList parts; parts << title; if ( !layer->abstract().isEmpty() ) { parts << QString(); const QStringList abstractLines = layer->abstract().split( '\n' ); for ( const auto &l : abstractLines ) { parts << l.toHtmlEscaped(); } parts << QString(); } QString source( layer->publicSource() ); if ( source.size() > 1024 ) { source = source.left( 1023 ) + QString( QChar( 0x2026 ) ); } parts << "<i>" + source.toHtmlEscaped() + "</i>"; return parts.join( QLatin1String( "<br/>" ) ); } } } return QVariant(); } Qt::ItemFlags QgsLayerTreeModel::flags( const QModelIndex &index ) const { if ( !index.isValid() ) { Qt::ItemFlags rootFlags = Qt::ItemFlags(); if ( testFlag( AllowNodeReorder ) ) rootFlags |= Qt::ItemIsDropEnabled; return rootFlags; } if ( QgsLayerTreeModelLegendNode *symn = index2legendNode( index ) ) return legendNodeFlags( symn ); Qt::ItemFlags f = Qt::ItemIsEnabled | Qt::ItemIsSelectable; if ( testFlag( AllowNodeRename ) ) f |= Qt::ItemIsEditable; QgsLayerTreeNode *node = index2node( index ); bool isEmbedded = node->customProperty( QStringLiteral( "embedded" ) ).toInt(); if ( testFlag( AllowNodeReorder ) ) { // only root embedded nodes can be reordered if ( !isEmbedded || ( isEmbedded && node->parent() && !node->parent()->customProperty( QStringLiteral( "embedded" ) ).toInt() ) ) f |= Qt::ItemIsDragEnabled; } if ( testFlag( AllowNodeChangeVisibility ) && ( QgsLayerTree::isLayer( node ) || QgsLayerTree::isGroup( node ) ) ) f |= Qt::ItemIsUserCheckable; if ( testFlag( AllowNodeReorder ) && QgsLayerTree::isGroup( node ) && !isEmbedded ) f |= Qt::ItemIsDropEnabled; return f; } bool QgsLayerTreeModel::setData( const QModelIndex &index, const QVariant &value, int role ) { QgsLayerTreeModelLegendNode *sym = index2legendNode( index ); if ( sym ) { if ( role == Qt::CheckStateRole && !testFlag( AllowLegendChangeState ) ) return false; bool res = sym->setData( value, role ); if ( res ) emit dataChanged( index, index ); return res; } QgsLayerTreeNode *node = index2node( index ); if ( !node ) return QAbstractItemModel::setData( index, value, role ); if ( role == Qt::CheckStateRole ) { if ( !testFlag( AllowNodeChangeVisibility ) ) return false; bool checked = static_cast< Qt::CheckState >( value.toInt() ) == Qt::Checked; if ( checked && node->children().isEmpty() ) { node->setItemVisibilityCheckedParentRecursive( checked ); } else if ( testFlag( ActionHierarchical ) ) { if ( node->children().isEmpty() ) node->setItemVisibilityCheckedParentRecursive( checked ); else node->setItemVisibilityCheckedRecursive( checked ); } else { node->setItemVisibilityChecked( checked ); } recursivelyEmitDataChanged( index ); return true; } else if ( role == Qt::EditRole ) { if ( !testFlag( AllowNodeRename ) ) return false; if ( QgsLayerTree::isLayer( node ) ) { QgsLayerTreeLayer *layer = QgsLayerTree::toLayer( node ); layer->setName( value.toString() ); emit dataChanged( index, index ); } else if ( QgsLayerTree::isGroup( node ) ) { QgsLayerTree::toGroup( node )->setName( value.toString() ); emit dataChanged( index, index ); } } return QAbstractItemModel::setData( index, value, role ); } QModelIndex QgsLayerTreeModel::node2index( QgsLayerTreeNode *node ) const { if ( !node || !node->parent() ) return QModelIndex(); // this is the only root item -> invalid index QModelIndex parentIndex = node2index( node->parent() ); int row = node->parent()->children().indexOf( node ); Q_ASSERT( row >= 0 ); return index( row, 0, parentIndex ); } static bool _isChildOfNode( QgsLayerTreeNode *child, QgsLayerTreeNode *node ) { if ( !child->parent() ) return false; if ( child->parent() == node ) return true; return _isChildOfNode( child->parent(), node ); } static bool _isChildOfNodes( QgsLayerTreeNode *child, const QList<QgsLayerTreeNode *> &nodes ) { for ( QgsLayerTreeNode *n : nodes ) { if ( _isChildOfNode( child, n ) ) return true; } return false; } QList<QgsLayerTreeNode *> QgsLayerTreeModel::indexes2nodes( const QModelIndexList &list, bool skipInternal ) const { QList<QgsLayerTreeNode *> nodes; const auto constList = list; for ( const QModelIndex &index : constList ) { QgsLayerTreeNode *node = index2node( index ); if ( !node ) continue; nodes << node; } if ( !skipInternal ) return nodes; // remove any children of nodes if both parent node and children are selected QList<QgsLayerTreeNode *> nodesFinal; for ( QgsLayerTreeNode *node : qgis::as_const( nodes ) ) { if ( !_isChildOfNodes( node, nodes ) ) nodesFinal << node; } return nodesFinal; } QgsLayerTree *QgsLayerTreeModel::rootGroup() const { return mRootNode; } void QgsLayerTreeModel::setRootGroup( QgsLayerTree *newRootGroup ) { beginResetModel(); disconnectFromRootNode(); Q_ASSERT( mLegend.isEmpty() ); mRootNode = newRootGroup; endResetModel(); connectToRootNode(); } void QgsLayerTreeModel::refreshLayerLegend( QgsLayerTreeLayer *nodeLayer ) { // update title QModelIndex idx = node2index( nodeLayer ); emit dataChanged( idx, idx ); // update children int oldNodeCount = rowCount( idx ); if ( oldNodeCount > 0 ) { beginRemoveRows( idx, 0, oldNodeCount - 1 ); removeLegendFromLayer( nodeLayer ); endRemoveRows(); } addLegendToLayer( nodeLayer ); int newNodeCount = rowCount( idx ); // automatic collapse of legend nodes - useful if a layer has many legend nodes if ( mAutoCollapseLegendNodesCount != -1 && oldNodeCount != newNodeCount && newNodeCount >= mAutoCollapseLegendNodesCount ) nodeLayer->setExpanded( false ); } QModelIndex QgsLayerTreeModel::currentIndex() const { return mCurrentIndex; } void QgsLayerTreeModel::setCurrentIndex( const QModelIndex &currentIndex ) { QModelIndex oldIndex = mCurrentIndex; mCurrentIndex = currentIndex; if ( oldIndex.isValid() ) emit dataChanged( oldIndex, oldIndex ); if ( currentIndex.isValid() ) emit dataChanged( currentIndex, currentIndex ); } void QgsLayerTreeModel::setLayerTreeNodeFont( int nodeType, const QFont &font ) { if ( nodeType == QgsLayerTreeNode::NodeGroup ) { if ( mFontGroup != font ) { mFontGroup = font; recursivelyEmitDataChanged(); } } else if ( nodeType == QgsLayerTreeNode::NodeLayer ) { if ( mFontLayer != font ) { mFontLayer = font; recursivelyEmitDataChanged(); } } else { QgsDebugMsgLevel( QStringLiteral( "invalid node type" ), 4 ); } } QFont QgsLayerTreeModel::layerTreeNodeFont( int nodeType ) const { if ( nodeType == QgsLayerTreeNode::NodeGroup ) return mFontGroup; else if ( nodeType == QgsLayerTreeNode::NodeLayer ) return mFontLayer; else { QgsDebugMsgLevel( QStringLiteral( "invalid node type" ), 4 ); return QFont(); } } void QgsLayerTreeModel::setLegendFilterByScale( double scale ) { mLegendFilterByScale = scale; // this could be later done in more efficient way // by just updating active legend nodes, without refreshing original legend nodes const auto layers = mRootNode->findLayers(); for ( QgsLayerTreeLayer *nodeLayer : layers ) refreshLayerLegend( nodeLayer ); } void QgsLayerTreeModel::setLegendFilterByMap( const QgsMapSettings *settings ) { setLegendFilter( settings, /* useExtent = */ true ); } void QgsLayerTreeModel::setLegendFilter( const QgsMapSettings *settings, bool useExtent, const QgsGeometry &polygon, bool useExpressions ) { if ( settings && settings->hasValidSettings() ) { mLegendFilterMapSettings.reset( new QgsMapSettings( *settings ) ); mLegendFilterMapSettings->setLayerStyleOverrides( mLayerStyleOverrides ); QgsMapHitTest::LayerFilterExpression exprs; mLegendFilterUsesExtent = useExtent; // collect expression filters if ( useExpressions ) { const auto layers = mRootNode->findLayers(); for ( QgsLayerTreeLayer *nodeLayer : layers ) { bool enabled; QString expr = QgsLayerTreeUtils::legendFilterByExpression( *nodeLayer, &enabled ); if ( enabled && !expr.isEmpty() ) { exprs[ nodeLayer->layerId()] = expr; } } } bool polygonValid = !polygon.isNull() && polygon.type() == QgsWkbTypes::PolygonGeometry; if ( useExpressions && !useExtent && !polygonValid ) // only expressions { mLegendFilterHitTest.reset( new QgsMapHitTest( *mLegendFilterMapSettings, exprs ) ); } else { mLegendFilterHitTest.reset( new QgsMapHitTest( *mLegendFilterMapSettings, polygon, exprs ) ); } mLegendFilterHitTest->run(); } else { if ( !mLegendFilterMapSettings ) return; // no change mLegendFilterMapSettings.reset(); mLegendFilterHitTest.reset(); } // temporarily disable autocollapse so that legend nodes stay visible int bkAutoCollapse = autoCollapseLegendNodes(); setAutoCollapseLegendNodes( -1 ); // this could be later done in more efficient way // by just updating active legend nodes, without refreshing original legend nodes const auto layers = mRootNode->findLayers(); for ( QgsLayerTreeLayer *nodeLayer : layers ) refreshLayerLegend( nodeLayer ); setAutoCollapseLegendNodes( bkAutoCollapse ); } void QgsLayerTreeModel::setLegendMapViewData( double mapUnitsPerPixel, int dpi, double scale ) { if ( mLegendMapViewDpi == dpi && qgsDoubleNear( mLegendMapViewMupp, mapUnitsPerPixel ) && qgsDoubleNear( mLegendMapViewScale, scale ) ) return; double previousScale = mLegendMapViewScale; mLegendMapViewScale = scale; mLegendMapViewMupp = mapUnitsPerPixel; mLegendMapViewDpi = dpi; // now invalidate legend nodes! legendInvalidateMapBasedData(); if ( scale != previousScale ) refreshScaleBasedLayers( QModelIndex(), previousScale ); } void QgsLayerTreeModel::legendMapViewData( double *mapUnitsPerPixel, int *dpi, double *scale ) const { if ( mapUnitsPerPixel ) *mapUnitsPerPixel = mLegendMapViewMupp; if ( dpi ) *dpi = mLegendMapViewDpi; if ( scale ) *scale = mLegendMapViewScale; } QMap<QString, QString> QgsLayerTreeModel::layerStyleOverrides() const { return mLayerStyleOverrides; } void QgsLayerTreeModel::setLayerStyleOverrides( const QMap<QString, QString> &overrides ) { mLayerStyleOverrides = overrides; } int QgsLayerTreeModel::scaleIconSize( int standardSize ) { return QgsApplication::scaleIconSize( standardSize, true ); } void QgsLayerTreeModel::nodeWillAddChildren( QgsLayerTreeNode *node, int indexFrom, int indexTo ) { Q_ASSERT( node ); beginInsertRows( node2index( node ), indexFrom, indexTo ); } static QList<QgsLayerTreeLayer *> _layerNodesInSubtree( QgsLayerTreeNode *node, int indexFrom, int indexTo ) { QList<QgsLayerTreeNode *> children = node->children(); QList<QgsLayerTreeLayer *> newLayerNodes; for ( int i = indexFrom; i <= indexTo; ++i ) { QgsLayerTreeNode *child = children.at( i ); if ( QgsLayerTree::isLayer( child ) ) newLayerNodes << QgsLayerTree::toLayer( child ); else if ( QgsLayerTree::isGroup( child ) ) newLayerNodes << QgsLayerTree::toGroup( child )->findLayers(); } return newLayerNodes; } void QgsLayerTreeModel::nodeAddedChildren( QgsLayerTreeNode *node, int indexFrom, int indexTo ) { Q_ASSERT( node ); endInsertRows(); const auto subNodes = _layerNodesInSubtree( node, indexFrom, indexTo ); for ( QgsLayerTreeLayer *newLayerNode : subNodes ) connectToLayer( newLayerNode ); } void QgsLayerTreeModel::nodeWillRemoveChildren( QgsLayerTreeNode *node, int indexFrom, int indexTo ) { Q_ASSERT( node ); beginRemoveRows( node2index( node ), indexFrom, indexTo ); // disconnect from layers and remove their legend const auto subNodes = _layerNodesInSubtree( node, indexFrom, indexTo ); for ( QgsLayerTreeLayer *nodeLayer : subNodes ) disconnectFromLayer( nodeLayer ); } void QgsLayerTreeModel::nodeRemovedChildren() { endRemoveRows(); } void QgsLayerTreeModel::nodeVisibilityChanged( QgsLayerTreeNode *node ) { Q_ASSERT( node ); QModelIndex index = node2index( node ); emit dataChanged( index, index ); } void QgsLayerTreeModel::nodeNameChanged( QgsLayerTreeNode *node, const QString &name ) { Q_UNUSED( name ) Q_ASSERT( node ); QModelIndex index = node2index( node ); emit dataChanged( index, index ); } void QgsLayerTreeModel::nodeCustomPropertyChanged( QgsLayerTreeNode *node, const QString &key ) { if ( QgsLayerTree::isLayer( node ) && key == QLatin1String( "showFeatureCount" ) ) refreshLayerLegend( QgsLayerTree::toLayer( node ) ); } void QgsLayerTreeModel::nodeLayerLoaded() { QgsLayerTreeLayer *nodeLayer = qobject_cast<QgsLayerTreeLayer *>( sender() ); if ( !nodeLayer ) return; // deferred connection to the layer connectToLayer( nodeLayer ); } void QgsLayerTreeModel::nodeLayerWillBeUnloaded() { QgsLayerTreeLayer *nodeLayer = qobject_cast<QgsLayerTreeLayer *>( sender() ); if ( !nodeLayer ) return; disconnectFromLayer( nodeLayer ); // wait for the layer to appear again connect( nodeLayer, &QgsLayerTreeLayer::layerLoaded, this, &QgsLayerTreeModel::nodeLayerLoaded ); } void QgsLayerTreeModel::layerLegendChanged() { if ( !mRootNode ) return; if ( !testFlag( ShowLegend ) ) return; QgsMapLayer *layer = qobject_cast<QgsMapLayer *>( sender() ); if ( !layer ) return; QgsLayerTreeLayer *nodeLayer = mRootNode->findLayer( layer->id() ); if ( !nodeLayer ) return; refreshLayerLegend( nodeLayer ); } void QgsLayerTreeModel::layerNeedsUpdate() { QgsMapLayer *layer = qobject_cast<QgsMapLayer *>( sender() ); if ( !layer ) return; QgsLayerTreeLayer *nodeLayer = mRootNode->findLayer( layer->id() ); if ( !nodeLayer ) return; QModelIndex index = node2index( nodeLayer ); emit dataChanged( index, index ); if ( nodeLayer->customProperty( QStringLiteral( "showFeatureCount" ) ).toInt() ) refreshLayerLegend( nodeLayer ); } void QgsLayerTreeModel::legendNodeDataChanged() { QgsLayerTreeModelLegendNode *legendNode = qobject_cast<QgsLayerTreeModelLegendNode *>( sender() ); if ( !legendNode ) return; QModelIndex index = legendNode2index( legendNode ); if ( index.isValid() ) emit dataChanged( index, index ); } void QgsLayerTreeModel::legendNodeSizeChanged() { QgsLayerTreeModelLegendNode *legendNode = qobject_cast<QgsLayerTreeModelLegendNode *>( sender() ); if ( !legendNode ) return; QModelIndex index = legendNode2index( legendNode ); if ( index.isValid() ) emit dataChanged( index, index, QVector<int> { Qt::SizeHintRole } ); } void QgsLayerTreeModel::connectToLayer( QgsLayerTreeLayer *nodeLayer ) { if ( !nodeLayer->layer() ) { // in order to connect to layer, we need to have it loaded. // keep an eye on the layer ID: once loaded, we will use it connect( nodeLayer, &QgsLayerTreeLayer::layerLoaded, this, &QgsLayerTreeModel::nodeLayerLoaded ); return; } // watch if the layer is getting removed connect( nodeLayer, &QgsLayerTreeLayer::layerWillBeUnloaded, this, &QgsLayerTreeModel::nodeLayerWillBeUnloaded ); if ( testFlag( ShowLegend ) ) { addLegendToLayer( nodeLayer ); // automatic collapse of legend nodes - useful if a layer has many legend nodes if ( !mRootNode->customProperty( QStringLiteral( "loading" ) ).toBool() ) { if ( mAutoCollapseLegendNodesCount != -1 && rowCount( node2index( nodeLayer ) ) >= mAutoCollapseLegendNodesCount ) nodeLayer->setExpanded( false ); } } QgsMapLayer *layer = nodeLayer->layer(); connect( layer, &QgsMapLayer::legendChanged, this, &QgsLayerTreeModel::layerLegendChanged, Qt::UniqueConnection ); if ( layer->type() == QgsMapLayerType::VectorLayer ) { // using unique connection because there may be temporarily more nodes for a layer than just one // which would create multiple connections, however disconnect() would disconnect all multiple connections // even if we wanted to disconnect just one connection in each call. QgsVectorLayer *vl = qobject_cast< QgsVectorLayer * >( layer ); connect( vl, &QgsVectorLayer::editingStarted, this, &QgsLayerTreeModel::layerNeedsUpdate, Qt::UniqueConnection ); connect( vl, &QgsVectorLayer::editingStopped, this, &QgsLayerTreeModel::layerNeedsUpdate, Qt::UniqueConnection ); connect( vl, &QgsVectorLayer::layerModified, this, &QgsLayerTreeModel::layerNeedsUpdate, Qt::UniqueConnection ); } } // try to find out if the layer ID is present in the tree multiple times static int _numLayerCount( QgsLayerTreeGroup *group, const QString &layerId ) { int count = 0; const auto constChildren = group->children(); for ( QgsLayerTreeNode *child : constChildren ) { if ( QgsLayerTree::isLayer( child ) ) { if ( QgsLayerTree::toLayer( child )->layerId() == layerId ) count++; } else if ( QgsLayerTree::isGroup( child ) ) { count += _numLayerCount( QgsLayerTree::toGroup( child ), layerId ); } } return count; } void QgsLayerTreeModel::disconnectFromLayer( QgsLayerTreeLayer *nodeLayer ) { disconnect( nodeLayer, nullptr, this, nullptr ); // disconnect from delayed load of layer if ( !nodeLayer->layer() ) return; // we were never connected if ( testFlag( ShowLegend ) ) { removeLegendFromLayer( nodeLayer ); } if ( _numLayerCount( mRootNode, nodeLayer->layerId() ) == 1 ) { // last instance of the layer in the tree: disconnect from all signals from layer! disconnect( nodeLayer->layer(), nullptr, this, nullptr ); } } void QgsLayerTreeModel::connectToLayers( QgsLayerTreeGroup *parentGroup ) { const auto constChildren = parentGroup->children(); for ( QgsLayerTreeNode *node : constChildren ) { if ( QgsLayerTree::isGroup( node ) ) connectToLayers( QgsLayerTree::toGroup( node ) ); else if ( QgsLayerTree::isLayer( node ) ) connectToLayer( QgsLayerTree::toLayer( node ) ); } } void QgsLayerTreeModel::disconnectFromLayers( QgsLayerTreeGroup *parentGroup ) { const auto constChildren = parentGroup->children(); for ( QgsLayerTreeNode *node : constChildren ) { if ( QgsLayerTree::isGroup( node ) ) disconnectFromLayers( QgsLayerTree::toGroup( node ) ); else if ( QgsLayerTree::isLayer( node ) ) disconnectFromLayer( QgsLayerTree::toLayer( node ) ); } } void QgsLayerTreeModel::connectToRootNode() { Q_ASSERT( mRootNode ); connect( mRootNode, &QgsLayerTreeNode::willAddChildren, this, &QgsLayerTreeModel::nodeWillAddChildren ); connect( mRootNode, &QgsLayerTreeNode::addedChildren, this, &QgsLayerTreeModel::nodeAddedChildren ); connect( mRootNode, &QgsLayerTreeNode::willRemoveChildren, this, &QgsLayerTreeModel::nodeWillRemoveChildren ); connect( mRootNode, &QgsLayerTreeNode::removedChildren, this, &QgsLayerTreeModel::nodeRemovedChildren ); connect( mRootNode, &QgsLayerTreeNode::visibilityChanged, this, &QgsLayerTreeModel::nodeVisibilityChanged ); connect( mRootNode, &QgsLayerTreeNode::nameChanged, this, &QgsLayerTreeModel::nodeNameChanged ); connect( mRootNode, &QgsLayerTreeNode::customPropertyChanged, this, &QgsLayerTreeModel::nodeCustomPropertyChanged ); connectToLayers( mRootNode ); } void QgsLayerTreeModel::disconnectFromRootNode() { disconnect( mRootNode, nullptr, this, nullptr ); disconnectFromLayers( mRootNode ); } void QgsLayerTreeModel::recursivelyEmitDataChanged( const QModelIndex &idx ) { QgsLayerTreeNode *node = index2node( idx ); if ( !node ) return; int count = node->children().count(); if ( count == 0 ) return; emit dataChanged( index( 0, 0, idx ), index( count - 1, 0, idx ) ); for ( int i = 0; i < count; ++i ) recursivelyEmitDataChanged( index( i, 0, idx ) ); } void QgsLayerTreeModel::refreshScaleBasedLayers( const QModelIndex &idx, double previousScale ) { QgsLayerTreeNode *node = index2node( idx ); if ( !node ) return; if ( node->nodeType() == QgsLayerTreeNode::NodeLayer ) { const QgsMapLayer *layer = QgsLayerTree::toLayer( node )->layer(); if ( layer && layer->hasScaleBasedVisibility() ) { if ( layer->isInScaleRange( mLegendMapViewScale ) != layer->isInScaleRange( previousScale ) ) emit dataChanged( idx, idx, QVector<int>() << Qt::FontRole << Qt::ForegroundRole ); } } int count = node->children().count(); for ( int i = 0; i < count; ++i ) refreshScaleBasedLayers( index( i, 0, idx ), previousScale ); } Qt::DropActions QgsLayerTreeModel::supportedDropActions() const { return Qt::CopyAction | Qt::MoveAction; } QStringList QgsLayerTreeModel::mimeTypes() const { QStringList types; types << QStringLiteral( "application/qgis.layertreemodeldata" ); return types; } QMimeData *QgsLayerTreeModel::mimeData( const QModelIndexList &indexes ) const { // Sort the indexes. Depending on how the user selected the items, the indexes may be unsorted. QModelIndexList sortedIndexes = indexes; std::sort( sortedIndexes.begin(), sortedIndexes.end(), std::less<QModelIndex>() ); QList<QgsLayerTreeNode *> nodesFinal = indexes2nodes( sortedIndexes, true ); if ( nodesFinal.isEmpty() ) return nullptr; QMimeData *mimeData = new QMimeData(); QDomDocument layerTreeDoc; QDomElement rootLayerTreeElem = layerTreeDoc.createElement( QStringLiteral( "layer_tree_model_data" ) ); for ( QgsLayerTreeNode *node : qgis::as_const( nodesFinal ) ) { node->writeXml( rootLayerTreeElem, QgsReadWriteContext() ); } layerTreeDoc.appendChild( rootLayerTreeElem ); QString errorMessage; QgsReadWriteContext readWriteContext; QDomDocument layerDefinitionsDoc( QStringLiteral( "qgis-layer-definition" ) ); QgsLayerDefinition::exportLayerDefinition( layerDefinitionsDoc, nodesFinal, errorMessage, QgsReadWriteContext() ); QString txt = layerDefinitionsDoc.toString(); mimeData->setData( QStringLiteral( "application/qgis.layertreemodeldata" ), layerTreeDoc.toString().toUtf8() ); mimeData->setData( QStringLiteral( "application/qgis.application.pid" ), QString::number( QCoreApplication::applicationPid() ).toUtf8() ); mimeData->setData( QStringLiteral( "application/qgis.layertree.layerdefinitions" ), txt.toUtf8() ); mimeData->setData( QStringLiteral( "application/x-vnd.qgis.qgis.uri" ), QgsMimeDataUtils::layerTreeNodesToUriList( nodesFinal ) ); return mimeData; } bool QgsLayerTreeModel::dropMimeData( const QMimeData *data, Qt::DropAction action, int row, int column, const QModelIndex &parent ) { if ( action == Qt::IgnoreAction ) return true; if ( !data->hasFormat( QStringLiteral( "application/qgis.layertreemodeldata" ) ) ) return false; if ( column >= columnCount( parent ) ) return false; QgsLayerTreeNode *nodeParent = index2node( parent ); if ( !QgsLayerTree::isGroup( nodeParent ) ) return false; if ( parent.isValid() && row == -1 ) row = 0; // if dropped directly onto group item, insert at first position // if we are coming from another QGIS instance, we need to add the layers too bool ok = false; // the application pid is only provided from QGIS 3.14, so do not check to OK before defaulting to moving in the legend qint64 qgisPid = data->data( QStringLiteral( "application/qgis.application.pid" ) ).toInt( &ok ); if ( ok && qgisPid != QCoreApplication::applicationPid() ) { QByteArray encodedLayerDefinitionData = data->data( QStringLiteral( "application/qgis.layertree.layerdefinitions" ) ); QDomDocument layerDefinitionDoc; if ( !layerDefinitionDoc.setContent( QString::fromUtf8( encodedLayerDefinitionData ) ) ) return false; QgsReadWriteContext context; QString errorMessage; QgsLayerDefinition::loadLayerDefinition( layerDefinitionDoc, QgsProject::instance(), QgsLayerTree::toGroup( nodeParent ), errorMessage, context ); emit messageEmitted( tr( "New layers added from another QGIS instance" ) ); } else { QByteArray encodedLayerTreeData = data->data( QStringLiteral( "application/qgis.layertreemodeldata" ) ); QDomDocument layerTreeDoc; if ( !layerTreeDoc.setContent( QString::fromUtf8( encodedLayerTreeData ) ) ) return false; QDomElement rootLayerTreeElem = layerTreeDoc.documentElement(); if ( rootLayerTreeElem.tagName() != QLatin1String( "layer_tree_model_data" ) ) return false; QList<QgsLayerTreeNode *> nodes; QDomElement elem = rootLayerTreeElem.firstChildElement(); while ( !elem.isNull() ) { QgsLayerTreeNode *node = QgsLayerTreeNode::readXml( elem, QgsProject::instance() ); if ( node ) nodes << node; elem = elem.nextSiblingElement(); } if ( nodes.isEmpty() ) return false; QgsLayerTree::toGroup( nodeParent )->insertChildNodes( row, nodes ); } return true; } bool QgsLayerTreeModel::removeRows( int row, int count, const QModelIndex &parent ) { QgsLayerTreeNode *parentNode = index2node( parent ); if ( QgsLayerTree::isGroup( parentNode ) ) { QgsLayerTree::toGroup( parentNode )->removeChildren( row, count ); return true; } return false; } void QgsLayerTreeModel::setFlags( QgsLayerTreeModel::Flags f ) { mFlags = f; } void QgsLayerTreeModel::setFlag( QgsLayerTreeModel::Flag f, bool on ) { if ( on ) mFlags |= f; else mFlags &= ~f; } QgsLayerTreeModel::Flags QgsLayerTreeModel::flags() const { return mFlags; } bool QgsLayerTreeModel::testFlag( QgsLayerTreeModel::Flag f ) const { return mFlags.testFlag( f ); } QIcon QgsLayerTreeModel::iconGroup() { return QgsApplication::getThemeIcon( QStringLiteral( "/mActionFolder.svg" ) ); } QList<QgsLayerTreeModelLegendNode *> QgsLayerTreeModel::filterLegendNodes( const QList<QgsLayerTreeModelLegendNode *> &nodes ) { QList<QgsLayerTreeModelLegendNode *> filtered; if ( mLegendFilterByScale > 0 ) { for ( QgsLayerTreeModelLegendNode *node : qgis::as_const( nodes ) ) { if ( node->isScaleOK( mLegendFilterByScale ) ) filtered << node; } } else if ( mLegendFilterMapSettings ) { if ( !nodes.isEmpty() && mLegendFilterMapSettings->layers().contains( nodes.at( 0 )->layerNode()->layer() ) ) { for ( QgsLayerTreeModelLegendNode *node : qgis::as_const( nodes ) ) { switch ( node->data( QgsSymbolLegendNode::NodeTypeRole ).value<QgsLayerTreeModelLegendNode::NodeTypes>() ) { case QgsLayerTreeModelLegendNode::EmbeddedWidget: filtered << node; break; case QgsLayerTreeModelLegendNode::SimpleLegend: case QgsLayerTreeModelLegendNode::SymbolLegend: case QgsLayerTreeModelLegendNode::RasterSymbolLegend: case QgsLayerTreeModelLegendNode::ImageLegend: case QgsLayerTreeModelLegendNode::WmsLegend: case QgsLayerTreeModelLegendNode::DataDefinedSizeLegend: { const QString ruleKey = node->data( QgsSymbolLegendNode::RuleKeyRole ).toString(); bool checked = mLegendFilterUsesExtent || node->data( Qt::CheckStateRole ).toInt() == Qt::Checked; if ( checked ) { if ( QgsVectorLayer *vl = qobject_cast<QgsVectorLayer *>( node->layerNode()->layer() ) ) { if ( mLegendFilterHitTest->legendKeyVisible( ruleKey, vl ) ) filtered << node; } else { filtered << node; } } else // unknown node type or unchecked filtered << node; break; } } } } } else { return nodes; } return filtered; } /////////////////////////////////////////////////////////////////////////////// // Legend nodes routines - start void QgsLayerTreeModel::legendCleanup() { const auto constMLegend = mLegend; for ( const LayerLegendData &data : constMLegend ) { qDeleteAll( data.originalNodes ); delete data.tree; } mLegend.clear(); } void QgsLayerTreeModel::removeLegendFromLayer( QgsLayerTreeLayer *nodeLayer ) { if ( mLegend.contains( nodeLayer ) ) { qDeleteAll( mLegend[nodeLayer].originalNodes ); delete mLegend[nodeLayer].tree; mLegend.remove( nodeLayer ); } } void QgsLayerTreeModel::addLegendToLayer( QgsLayerTreeLayer *nodeL ) { if ( !nodeL || !nodeL->layer() ) return; QgsMapLayer *ml = nodeL->layer(); QgsMapLayerStyleOverride styleOverride( ml ); if ( mLayerStyleOverrides.contains( ml->id() ) ) styleOverride.setOverrideStyle( mLayerStyleOverrides.value( ml->id() ) ); QgsMapLayerLegend *layerLegend = ml->legend(); if ( !layerLegend ) return; QList<QgsLayerTreeModelLegendNode *> lstNew = layerLegend->createLayerTreeModelLegendNodes( nodeL ); // apply filtering defined in layer node's custom properties (reordering, filtering, custom labels) QgsMapLayerLegendUtils::applyLayerNodeProperties( nodeL, lstNew ); if ( testFlag( UseEmbeddedWidgets ) ) { // generate placeholder legend nodes that will be replaced by widgets in QgsLayerTreeView int widgetsCount = ml->customProperty( QStringLiteral( "embeddedWidgets/count" ), 0 ).toInt(); while ( widgetsCount > 0 ) { lstNew.insert( 0, new EmbeddedWidgetLegendNode( nodeL ) ); --widgetsCount; } } QList<QgsLayerTreeModelLegendNode *> filteredLstNew = filterLegendNodes( lstNew ); const auto constLstNew = lstNew; for ( QgsLayerTreeModelLegendNode *n : constLstNew ) { n->setParent( this ); connect( n, &QgsLayerTreeModelLegendNode::dataChanged, this, &QgsLayerTreeModel::legendNodeDataChanged ); connect( n, &QgsLayerTreeModelLegendNode::sizeChanged, this, &QgsLayerTreeModel::legendNodeSizeChanged ); } // See if we have an embedded node - if we do, we will not use it among active nodes. // Legend node embedded in parent does not have to be the first one, // there can be also nodes generated for embedded widgets QgsLayerTreeModelLegendNode *embeddedNode = nullptr; const auto constFilteredLstNew = filteredLstNew; for ( QgsLayerTreeModelLegendNode *legendNode : constFilteredLstNew ) { if ( legendNode->isEmbeddedInParent() ) { embeddedNode = legendNode; filteredLstNew.removeOne( legendNode ); break; } } LayerLegendTree *legendTree = nullptr; // maybe the legend nodes form a tree - try to create a tree structure from the list if ( testFlag( ShowLegendAsTree ) ) legendTree = tryBuildLegendTree( filteredLstNew ); int count = legendTree ? legendTree->children[nullptr].count() : filteredLstNew.count(); if ( !filteredLstNew.isEmpty() ) beginInsertRows( node2index( nodeL ), 0, count - 1 ); LayerLegendData data; data.originalNodes = lstNew; data.activeNodes = filteredLstNew; data.embeddedNodeInParent = embeddedNode; data.tree = legendTree; mLegend[nodeL] = data; if ( !filteredLstNew.isEmpty() ) endInsertRows(); // invalidate map based data even if the data is not map-based to make sure // the symbol sizes are computed at least once mInvalidatedNodes.insert( nodeL ); legendInvalidateMapBasedData(); } QgsLayerTreeModel::LayerLegendTree *QgsLayerTreeModel::tryBuildLegendTree( const QList<QgsLayerTreeModelLegendNode *> &nodes ) { // first check whether there are any legend nodes that are not top-level bool hasParentKeys = false; for ( QgsLayerTreeModelLegendNode *n : nodes ) { if ( !n->data( QgsLayerTreeModelLegendNode::ParentRuleKeyRole ).toString().isEmpty() ) { hasParentKeys = true; break; } } if ( !hasParentKeys ) return nullptr; // all legend nodes are top-level => stick with list representation // make mapping from rules to nodes and do some sanity checks QHash<QString, QgsLayerTreeModelLegendNode *> rule2node; rule2node[QString()] = nullptr; for ( QgsLayerTreeModelLegendNode *n : nodes ) { QString ruleKey = n->data( QgsLayerTreeModelLegendNode::RuleKeyRole ).toString(); if ( ruleKey.isEmpty() ) // in tree all nodes must have key return nullptr; if ( rule2node.contains( ruleKey ) ) // and they must be unique return nullptr; rule2node[ruleKey] = n; } // create the tree structure LayerLegendTree *tree = new LayerLegendTree; for ( QgsLayerTreeModelLegendNode *n : nodes ) { QString parentRuleKey = n->data( QgsLayerTreeModelLegendNode::ParentRuleKeyRole ).toString(); QgsLayerTreeModelLegendNode *parent = rule2node.value( parentRuleKey, nullptr ); tree->parents[n] = parent; tree->children[parent] << n; } return tree; } QgsRenderContext *QgsLayerTreeModel::createTemporaryRenderContext() const { double scale = 0.0; double mupp = 0.0; int dpi = 0; legendMapViewData( &mupp, &dpi, &scale ); bool validData = !qgsDoubleNear( mupp, 0.0 ) && dpi != 0 && !qgsDoubleNear( scale, 0.0 ); // setup temporary render context std::unique_ptr<QgsRenderContext> context( new QgsRenderContext ); context->setScaleFactor( dpi / 25.4 ); context->setRendererScale( scale ); context->setMapToPixel( QgsMapToPixel( mupp ) ); context->setFlag( QgsRenderContext::RenderSymbolPreview ); return validData ? context.release() : nullptr; } QgsLayerTreeModelLegendNode *QgsLayerTreeModel::index2legendNode( const QModelIndex &index ) { return qobject_cast<QgsLayerTreeModelLegendNode *>( reinterpret_cast<QObject *>( index.internalPointer() ) ); } QModelIndex QgsLayerTreeModel::legendNode2index( QgsLayerTreeModelLegendNode *legendNode ) { const LayerLegendData &data = mLegend[legendNode->layerNode()]; if ( data.tree ) { if ( QgsLayerTreeModelLegendNode *parentLegendNode = data.tree->parents[legendNode] ) { QModelIndex parentIndex = legendNode2index( parentLegendNode ); int row = data.tree->children[parentLegendNode].indexOf( legendNode ); return index( row, 0, parentIndex ); } else { QModelIndex parentIndex = node2index( legendNode->layerNode() ); int row = data.tree->children[nullptr].indexOf( legendNode ); return index( row, 0, parentIndex ); } } QModelIndex parentIndex = node2index( legendNode->layerNode() ); Q_ASSERT( parentIndex.isValid() ); int row = data.activeNodes.indexOf( legendNode ); if ( row < 0 ) // legend node may be filtered (exists within the list of original nodes, but not in active nodes) return QModelIndex(); return index( row, 0, parentIndex ); } int QgsLayerTreeModel::legendNodeRowCount( QgsLayerTreeModelLegendNode *node ) const { const LayerLegendData &data = mLegend[node->layerNode()]; if ( data.tree ) return data.tree->children[node].count(); return 0; // they are leaves } int QgsLayerTreeModel::legendRootRowCount( QgsLayerTreeLayer *nL ) const { if ( !mLegend.contains( nL ) ) return 0; const LayerLegendData &data = mLegend[nL]; if ( data.tree ) return data.tree->children[nullptr].count(); int count = data.activeNodes.count(); return count; } QModelIndex QgsLayerTreeModel::legendRootIndex( int row, int column, QgsLayerTreeLayer *nL ) const { Q_ASSERT( mLegend.contains( nL ) ); const LayerLegendData &data = mLegend[nL]; if ( data.tree ) return createIndex( row, column, static_cast<QObject *>( data.tree->children[nullptr].at( row ) ) ); return createIndex( row, column, static_cast<QObject *>( data.activeNodes.at( row ) ) ); } QModelIndex QgsLayerTreeModel::legendNodeIndex( int row, int column, QgsLayerTreeModelLegendNode *node ) const { const LayerLegendData &data = mLegend[node->layerNode()]; if ( data.tree ) return createIndex( row, column, static_cast<QObject *>( data.tree->children[node].at( row ) ) ); return QModelIndex(); // have no children } QModelIndex QgsLayerTreeModel::legendParent( QgsLayerTreeModelLegendNode *legendNode ) const { QgsLayerTreeLayer *layerNode = legendNode->layerNode(); const LayerLegendData &data = mLegend[layerNode]; if ( data.tree ) { if ( QgsLayerTreeModelLegendNode *parentNode = data.tree->parents[legendNode] ) { QgsLayerTreeModelLegendNode *grandParentNode = data.tree->parents[parentNode]; // may be null (not a problem) int row = data.tree->children[grandParentNode].indexOf( parentNode ); return createIndex( row, 0, static_cast<QObject *>( parentNode ) ); } else return indexOfParentLayerTreeNode( layerNode ); } return indexOfParentLayerTreeNode( layerNode ); } QVariant QgsLayerTreeModel::legendNodeData( QgsLayerTreeModelLegendNode *node, int role ) const { if ( role == Qt::CheckStateRole && !testFlag( AllowLegendChangeState ) ) return QVariant(); return node->data( role ); } Qt::ItemFlags QgsLayerTreeModel::legendNodeFlags( QgsLayerTreeModelLegendNode *node ) const { Qt::ItemFlags f = node->flags(); if ( !testFlag( AllowLegendChangeState ) ) f &= ~Qt::ItemIsUserCheckable; return f; } bool QgsLayerTreeModel::legendEmbeddedInParent( QgsLayerTreeLayer *nodeLayer ) const { return static_cast< bool >( mLegend[nodeLayer].embeddedNodeInParent ); } QgsLayerTreeModelLegendNode *QgsLayerTreeModel::legendNodeEmbeddedInParent( QgsLayerTreeLayer *nodeLayer ) const { return mLegend[nodeLayer].embeddedNodeInParent; } QIcon QgsLayerTreeModel::legendIconEmbeddedInParent( QgsLayerTreeLayer *nodeLayer ) const { QgsLayerTreeModelLegendNode *legendNode = mLegend[nodeLayer].embeddedNodeInParent; if ( !legendNode ) return QIcon(); return QIcon( qvariant_cast<QPixmap>( legendNode->data( Qt::DecorationRole ) ) ); } QList<QgsLayerTreeModelLegendNode *> QgsLayerTreeModel::layerLegendNodes( QgsLayerTreeLayer *nodeLayer, bool skipNodeEmbeddedInParent ) { if ( !mLegend.contains( nodeLayer ) ) return QList<QgsLayerTreeModelLegendNode *>(); const LayerLegendData &data = mLegend[nodeLayer]; QList<QgsLayerTreeModelLegendNode *> lst( data.activeNodes ); if ( !skipNodeEmbeddedInParent && data.embeddedNodeInParent ) lst.prepend( data.embeddedNodeInParent ); return lst; } QList<QgsLayerTreeModelLegendNode *> QgsLayerTreeModel::layerOriginalLegendNodes( QgsLayerTreeLayer *nodeLayer ) { return mLegend.value( nodeLayer ).originalNodes; } QgsLayerTreeModelLegendNode *QgsLayerTreeModel::findLegendNode( const QString &layerId, const QString &ruleKey ) const { for ( auto it = mLegend.constBegin(); it != mLegend.constEnd(); ++it ) { QgsLayerTreeLayer *layer = it.key(); if ( layer->layerId() == layerId ) { const auto activeNodes = mLegend.value( layer ).activeNodes; for ( QgsLayerTreeModelLegendNode *legendNode : activeNodes ) { if ( legendNode->data( QgsLayerTreeModelLegendNode::RuleKeyRole ).toString() == ruleKey ) { //found it! return legendNode; } } } } return nullptr; } void QgsLayerTreeModel::legendInvalidateMapBasedData() { if ( !testFlag( DeferredLegendInvalidation ) ) invalidateLegendMapBasedData(); else mDeferLegendInvalidationTimer.start( 10 ); } void QgsLayerTreeModel::invalidateLegendMapBasedData() { // we have varying icon sizes, and we want icon to be centered and // text to be left aligned, so we have to compute the max width of icons // // we do that for nodes which share a common parent // // we do that here because for symbols with size defined in map units // the symbol sizes changes depends on the zoom level std::unique_ptr<QgsRenderContext> context( createTemporaryRenderContext() ); for ( QgsLayerTreeLayer *layerNode : qgis::as_const( mInvalidatedNodes ) ) { const LayerLegendData &data = mLegend.value( layerNode ); QList<QgsSymbolLegendNode *> symbolNodes; QMap<QString, int> widthMax; for ( QgsLayerTreeModelLegendNode *legendNode : qgis::as_const( data.originalNodes ) ) { QgsSymbolLegendNode *n = qobject_cast<QgsSymbolLegendNode *>( legendNode ); if ( n ) { const QSize sz( n->minimumIconSize( context.get() ) ); const QString parentKey( n->data( QgsLayerTreeModelLegendNode::ParentRuleKeyRole ).toString() ); widthMax[parentKey] = std::max( sz.width(), widthMax.contains( parentKey ) ? widthMax[parentKey] : 0 ); n->setIconSize( sz ); symbolNodes.append( n ); } } for ( QgsSymbolLegendNode *n : qgis::as_const( symbolNodes ) ) { const QString parentKey( n->data( QgsLayerTreeModelLegendNode::ParentRuleKeyRole ).toString() ); Q_ASSERT( widthMax[parentKey] > 0 ); const int twiceMarginWidth = 2; // a one pixel margin avoids hugly rendering of icon n->setIconSize( QSize( widthMax[parentKey] + twiceMarginWidth, n->iconSize().rheight() + twiceMarginWidth ) ); } for ( QgsLayerTreeModelLegendNode *legendNode : qgis::as_const( data.originalNodes ) ) legendNode->invalidateMapBasedData(); } mInvalidatedNodes.clear(); } // Legend nodes routines - end ///////////////////////////////////////////////////////////////////////////////
utf-8
1
GPL-2+
1996, Matthias Ettrich <ettrich@kde.org> 1997-1998, Lars Doelle <lars.doelle@on-line.de> 2000, Stephan Kulow <coolo@kde.org> 2004, Peter Brewer 2004, Steve Halasz 2004-2005, Gavin Macaulay 2004-2005, Lars Luthman 2004-2005, Mark Coletti 2005, Brendan Morley 2006, Ionut Iosifescu Enescu 2006, Tom Elwertowski 2007, Peter J. Ersts 2006-2008, Robert Knight <robertknight@gmail.com> 2007-2008, Matthew Perry 2008, e_k <e_k at users.sourceforge.net> 2008, Sandro Furieri 2000, 2009, Richard Kostecky 2009, Florian El Ahdab 2009, Godofredo Contreras Nava 2009, Lorenzo "Il Rugginoso" Masini 2009, Mathias Walker <mwa at sourcepole.ch> 2009, Vita Cizek 2002-2005, 2007, 2009-2010, Gary E. Sherman 2009-2010, Manuel Massing 2010, Ivan Mincik <ivan.mincik@gista.sk> 2010, Jack R, Maxim Dubinin (GIS-Lab) 2010, Jeremy Palmer 2010, Michael Minn 2010, NextGIS (http:nextgis.org) 2010, Pirmin Kalberer 2010, Sourcepole 2010, Yakushev Sergey 2011, Luiz Motta 2011, SunilRajKiran-kCube 2007, 2009, 2012, Magnus Homann 2011-2012, Giuseppe Sucameli 2012, Arunmozhi 2012, Carterix Geomatics 2012, Etienne Tourigny 2012, Dr. Horst Düster 2012, Vaclav Petras 2002-2005, 2007-2013, Tim Sutton 2008-2013, Borys Jurgiel 2012-2013, Massimo Endrighi 2012-2013, Piotr Pociask 2012-2013, Salvatore Larosa 2012-2013, Vinayan Parameswaran 2013, Alvaro Huarte 2013, Chris Crook 2013, Christian Surlykke 2013, Daniel Vaz 2013, Emilio Loi 2013, Joshua Arnott 2011, 2014, Tamas Szekeres 2012-2014, Larry Shaffer 2014, Angelos Tzotsos <tzotsos@gmail.com> 2014, Detlev Neumann 2014, Michael Douchin 2014, Nicklas Avén 2014, Radoslaw Guzinski 2004-2006, 2009-2013, 2015, Radim Blazek 2015, Giovanni Manghi 2015, Karolina Alexiou 2015, Michael Kirk 2015, Pedro Venancio 2012, 2015, the GRASS Development Team 2008, 2015-2016, Stéphane Brunner 2010-2012, 2015-2016, Marco Bernasocchi 2012-2016, Victor Olaya 2016, Andrea Aime 2016, David Adler 2016, Médéric Ribreux 2016, Monsanto Company, USA 2013, 2017, CS Systemes d'information (CS SI) 2014, 2017, Tom Kralidis <tomkralidis@gmail.com> 2014-2015, 2017, Boundless Spatial, Inc. USA 2014-2017, Arnaud Morvan 2014-2017, Sandro Santilli 2014-2017, Médéric Ribreux 2016-2017, OPENGIS.ch 2016-2017, Patrick Valsecchi 2017, Alexander Lisovenko 2017, Bernhard Ströbl 2017, Jorge Gustavo Rocha 2017, Martí Angelats i Ribera 2017, Médéric Ribreux 2017, Muhammad Yarjuna Rohmat 2017, Vincent Mora 2008-2018, Jürgen E. Fischer 2011-2012, 2014-2018, Nathan Woodrow 2016, 2018, Even Rouault 2016-2018, Paul Blottiere 2017-2018, Matteo Ghetta 2017-2018, Viktor Sklencar 2018, Anita Graser 2018, Centre National d'Etudes et spatiales (CNES) 2018, elpaso 2018, GISCE-TI S.L. 2004-2017, 2019, Marco Hugentobler 2012-2019, Hugo Mercier 2014-2019, Sandro Mani 2015, 2018-2019, Luigi Pirelli 2016-2019, David Marteau 2017, 2019, Ismail Sunni 2017-2019, Daniele Viganò 2017-2019, Peter Petrik 2019, Lutra Consulting Limited 2019, Olivier Dalang 2005-2020, Martin Dobias 2009, 2011-2014, 2016-2020, Alexander Bruy 2011, 2020, German Carrillo <geotux_tuxman@linuxmail.org> 2012-2020, Denis Rouzaud 2012-2020, Matthias Kuhn 2015-2020, Mathieu Pellerin 2016, 2018, 2020, Even Rouault 2017-2020, David Signer 2017-2020, Etienne Trimaille 2017-2020, Loïc Bartoletti 2019-2020, Clemens Raffler 2019-2020, Håvard Tveite 2020, Alexis Roy-Lizotte 2020, Belgacem Nedjima 2020, Ivan Ivanov 2020, Samweli Mwakisambwe 2020, Stefanos Natsis 2020, Stephen Knox 2020, Tomas Mizera 2020, Vincent Cloarec 2012-2013, 2005, 2017-2021, René-Luc D'Hont 2012-2021, The QGIS Project 2013-2021, Nyall Dawson 2014-2021, Alessandro Pasotti 2019-2021, Julien Cabieces
openmw-0.47.0/apps/openmw/mwsound/watersoundupdater.cpp
#include "watersoundupdater.hpp" #include "../mwbase/world.hpp" #include "../mwworld/cellstore.hpp" #include "../mwworld/ptr.hpp" #include <components/esm/loadcell.hpp> #include <osg/Vec3f> namespace MWSound { WaterSoundUpdater::WaterSoundUpdater(const WaterSoundUpdaterSettings& settings) : mSettings(settings) { } WaterSoundUpdate WaterSoundUpdater::update(const MWWorld::ConstPtr& player, const MWBase::World& world) const { WaterSoundUpdate result; result.mId = player.getCell()->isExterior() ? mSettings.mNearWaterOutdoorID : mSettings.mNearWaterIndoorID; result.mVolume = std::min(1.0f, getVolume(player, world)); return result; } float WaterSoundUpdater::getVolume(const MWWorld::ConstPtr& player, const MWBase::World& world) const { if (mListenerUnderwater) return 1.0f; const MWWorld::CellStore& cell = *player.getCell(); if (!cell.getCell()->hasWater()) return 0.0f; const osg::Vec3f pos = player.getRefData().getPosition().asVec3(); const float dist = std::abs(cell.getWaterLevel() - pos.z()); if (cell.isExterior() && dist < mSettings.mNearWaterOutdoorTolerance) { if (mSettings.mNearWaterPoints <= 1) return (mSettings.mNearWaterOutdoorTolerance - dist) / mSettings.mNearWaterOutdoorTolerance; const float step = mSettings.mNearWaterRadius * 2.0f / (mSettings.mNearWaterPoints - 1); int underwaterPoints = 0; for (int x = 0; x < mSettings.mNearWaterPoints; x++) { for (int y = 0; y < mSettings.mNearWaterPoints; y++) { const float terrainX = pos.x() - mSettings.mNearWaterRadius + x * step; const float terrainY = pos.y() - mSettings.mNearWaterRadius + y * step; const float height = world.getTerrainHeightAt(osg::Vec3f(terrainX, terrainY, 0.0f)); if (height < 0) underwaterPoints++; } } return underwaterPoints * 2.0f / (mSettings.mNearWaterPoints * mSettings.mNearWaterPoints); } if (!cell.isExterior() && dist < mSettings.mNearWaterIndoorTolerance) return (mSettings.mNearWaterIndoorTolerance - dist) / mSettings.mNearWaterIndoorTolerance; return 0.0f; } }
utf-8
1
GPL-3+
2007-2021, OpenMW Development Team
qt6-declarative-6.2.2+dfsg/tests/manual/quickcontrols2/gifs/tst_gifs.cpp
/**************************************************************************** ** ** Copyright (C) 2017 The Qt Company Ltd. ** Contact: http://www.qt.io/licensing/ ** ** This file is part of the test suite of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL3$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see http://www.qt.io/terms-conditions. For further ** information use the contact form at http://www.qt.io/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 3 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPLv3 included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 3 requirements ** will be met: https://www.gnu.org/licenses/lgpl.html. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 2.0 or later as published by the Free ** Software Foundation and appearing in the file LICENSE.GPL included in ** the packaging of this file. Please review the following information to ** ensure the GNU General Public License version 2.0 requirements will be ** met: http://www.gnu.org/licenses/gpl-2.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include <QtTest> #include <QtQuick> #include "gifrecorder.h" #include "eventcapturer.h" //#define GENERATE_EVENT_CODE class tst_Gifs : public QObject { Q_OBJECT private slots: void initTestCase(); void tumblerWrap(); void slider(); void sliderSnap_data(); void sliderSnap(); void rangeSlider(); void busyIndicator(); void switchGif(); void button_data(); void button(); void tabBar(); void menu(); void swipeView(); void swipeDelegate_data(); void swipeDelegate(); void swipeDelegateBehind(); void delegates_data(); void delegates(); void dial_data(); void dial(); void scrollBar(); void scrollBarSnap_data(); void scrollBarSnap(); void scrollIndicator(); void progressBar_data(); void progressBar(); void triState_data(); void triState(); void checkables_data(); void checkables(); void comboBox(); void stackView_data(); void stackView(); void drawer(); void delayButton(); private: void moveSmoothly(QQuickWindow *window, const QPoint &from, const QPoint &to, int movements, QEasingCurve::Type easingCurveType = QEasingCurve::OutQuint, int movementDelay = 15); void moveSmoothlyAlongArc(QQuickWindow *window, QPoint arcCenter, qreal distanceFromCenter, qreal startAngleRadians, qreal endAngleRadians, QEasingCurve::Type easingCurveType = QEasingCurve::OutQuint); QString dataDirPath; QDir outputDir; }; void tst_Gifs::initTestCase() { dataDirPath = QFINDTESTDATA("data"); QVERIFY(!dataDirPath.isEmpty()); qInfo() << "data directory:" << dataDirPath; outputDir = QDir(QDir::current().filePath("gifs")); QVERIFY(outputDir.exists() || QDir::current().mkpath("gifs")); qInfo() << "output directory:" << outputDir.absolutePath(); } void tst_Gifs::moveSmoothly(QQuickWindow *window, const QPoint &from, const QPoint &to, int movements, QEasingCurve::Type easingCurveType, int movementDelay) { QEasingCurve curve(easingCurveType); int xDifference = to.x() - from.x(); int yDifference = to.y() - from.y(); for (int movement = 0; movement < movements; ++movement) { QPoint pos = QPoint( from.x() + qRound(curve.valueForProgress(movement / qreal(qAbs(xDifference))) * xDifference), from.y() + qRound(curve.valueForProgress(movement / qreal(qAbs(yDifference))) * yDifference)); QTest::mouseMove(window, pos, movementDelay); } } QPoint posAlongArc(QPoint arcCenter, qreal startAngleRadians, qreal endAngleRadians, qreal distanceFromCenter, qreal progress, QEasingCurve::Type easingCurveType) { QEasingCurve curve(easingCurveType); const qreal angle = startAngleRadians + curve.valueForProgress(progress) * (endAngleRadians - startAngleRadians); return (arcCenter - QTransform().rotateRadians(angle).map(QPointF(0, distanceFromCenter))).toPoint(); } void tst_Gifs::moveSmoothlyAlongArc(QQuickWindow *window, QPoint arcCenter, qreal distanceFromCenter, qreal startAngleRadians, qreal endAngleRadians, QEasingCurve::Type easingCurveType) { QEasingCurve curve(easingCurveType); const qreal angleSpan = endAngleRadians - startAngleRadians; const int movements = qAbs(angleSpan) * 20 + 20; for (int movement = 0; movement < movements; ++movement) { const qreal progress = movement / qreal(movements); const QPoint pos = posAlongArc(arcCenter, startAngleRadians, endAngleRadians, distanceFromCenter, progress, easingCurveType); QTest::mouseMove(window, pos, 15); } } void tst_Gifs::tumblerWrap() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(5); gifRecorder.setQmlFileName("qtquickcontrols2-tumbler-wrap.qml"); gifRecorder.start(); // Left as an example. Usually EventCapturer code would be removed after // the GIF has been generated. QQuickWindow *window = gifRecorder.window(); EventCapturer eventCapturer; #ifdef GENERATE_EVENT_CODE eventCapturer.setMoveEventTrimFlags(EventCapturer::TrimAll); eventCapturer.startCapturing(window, 4000); #else QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(89, 75), 326); QTest::mouseMove(window, QPoint(89, 76), 31); QTest::mouseMove(window, QPoint(89, 80), 10); QTest::mouseMove(window, QPoint(93, 93), 10); QTest::mouseMove(window, QPoint(95, 101), 10); QTest::mouseMove(window, QPoint(97, 109), 11); QTest::mouseMove(window, QPoint(101, 125), 10); QTest::mouseMove(window, QPoint(103, 133), 11); QTest::mouseMove(window, QPoint(103, 141), 11); QTest::mouseMove(window, QPoint(105, 158), 10); QTest::mouseMove(window, QPoint(105, 162), 13); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(105, 162), 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(154, 100), 1098); QTest::mouseMove(window, QPoint(154, 99), 16); QTest::mouseMove(window, QPoint(153, 98), 16); QTest::mouseMove(window, QPoint(153, 95), 16); QTest::mouseMove(window, QPoint(152, 91), 15); QTest::mouseMove(window, QPoint(152, 87), 14); QTest::mouseMove(window, QPoint(151, 83), 13); QTest::mouseMove(window, QPoint(151, 86), 13); QTest::mouseMove(window, QPoint(150, 79), 12); QTest::mouseMove(window, QPoint(148, 73), 12); QTest::mouseMove(window, QPoint(148, 68), 12); QTest::mouseMove(window, QPoint(148, 60), 10); QTest::mouseMove(window, QPoint(147, 50), 10); QTest::mouseMove(window, QPoint(147, 40), 9); QTest::mouseMove(window, QPoint(147, 30), 8); QTest::mouseMove(window, QPoint(147, 20), 7); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(147, 20), 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(154, 100), 1000); QTest::mouseMove(window, QPoint(147, 101), 16); QTest::mouseMove(window, QPoint(147, 102), 16); QTest::mouseMove(window, QPoint(147, 105), 16); QTest::mouseMove(window, QPoint(148, 109), 15); QTest::mouseMove(window, QPoint(148, 115), 14); QTest::mouseMove(window, QPoint(148, 120), 13); QTest::mouseMove(window, QPoint(150, 125), 13); QTest::mouseMove(window, QPoint(151, 130), 12); QTest::mouseMove(window, QPoint(151, 135), 12); QTest::mouseMove(window, QPoint(153, 140), 12); QTest::mouseMove(window, QPoint(153, 150), 10); QTest::mouseMove(window, QPoint(153, 160), 10); QTest::mouseMove(window, QPoint(153, 170), 9); QTest::mouseMove(window, QPoint(155, 180), 8); QTest::mouseMove(window, QPoint(155, 188), 7); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(20, 188), 0); #endif gifRecorder.waitForFinish(); const auto capturedEvents = eventCapturer.capturedEvents(); for (CapturedEvent event : capturedEvents) qDebug().noquote() << event.cppCommand(); } void tst_Gifs::slider() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(5); gifRecorder.setHighQuality(true); gifRecorder.setQmlFileName("qtquickcontrols2-slider.qml"); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *slider = window->property("slider").value<QQuickItem*>(); QVERIFY(slider); QQuickItem *handle = slider->property("handle").value<QQuickItem*>(); QVERIFY(handle); const QPoint handleCenter = handle->mapToItem(window->contentItem(), QPoint(handle->width() / 2, handle->height() / 2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, handleCenter, 100); QPoint pos1 = handleCenter + QPoint(slider->width() * 0.3, 0); moveSmoothly(window, handleCenter, pos1, pos1.x() - handleCenter.x(), QEasingCurve::OutQuint, 10); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, pos1, 20); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, handleCenter, 100); const QPoint pos2 = QPoint(slider->width() - handleCenter.x() + slider->property("rightPadding").toInt(), handleCenter.y()); moveSmoothly(window, pos1, pos2, pos2.x() - pos1.x(), QEasingCurve::OutQuint, 10); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, pos2, 20); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, pos2, 100); moveSmoothly(window, pos2, handleCenter, qAbs(handleCenter.x() - pos2.x()), QEasingCurve::OutQuint, 10); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, handleCenter, 20); gifRecorder.waitForFinish(); } void tst_Gifs::sliderSnap_data() { QTest::addColumn<QString>("gifBaseName"); QTest::addColumn<int>("snapMode"); QTest::newRow("NoSnap") << "qtquickcontrols2-slider-nosnap" << 0; QTest::newRow("SnapAlways") << "qtquickcontrols2-slider-snapalways" << 1; QTest::newRow("SnapOnRelease") << "qtquickcontrols2-slider-snaponrelease" << 2; } void tst_Gifs::sliderSnap() { QFETCH(QString, gifBaseName); QFETCH(int, snapMode); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(8); gifRecorder.setHighQuality(true); gifRecorder.setQmlFileName("qtquickcontrols2-slider-snap.qml"); gifRecorder.setOutputFileBaseName(gifBaseName); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *slider = window->property("slider").value<QQuickItem*>(); QVERIFY(slider); QVERIFY(slider->setProperty("snapMode", QVariant(snapMode))); QCOMPARE(slider->property("snapMode").toInt(), snapMode); QQuickItem *handle = slider->property("handle").value<QQuickItem*>(); QVERIFY(handle); const QPoint startPos(slider->property("leftPadding").toReal(), slider->height() / 2); const int trackWidth = slider->property("availableWidth").toReal(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, startPos, 200); QPoint pos1 = startPos + QPoint(trackWidth * 0.3, 0); moveSmoothly(window, startPos, pos1, pos1.x() - startPos.x(), QEasingCurve::OutQuint, 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, pos1, 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, startPos, 400); const QPoint pos2 = startPos + QPoint(trackWidth * 0.6, 0); moveSmoothly(window, pos1, pos2, pos2.x() - pos1.x(), QEasingCurve::OutQuint, 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, pos2, 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, pos2, 400); moveSmoothly(window, pos2, startPos, qAbs(startPos.x() - pos2.x()) / 2, QEasingCurve::OutQuint, 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, startPos, 0); gifRecorder.waitForFinish(); } void tst_Gifs::rangeSlider() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(7); gifRecorder.setHighQuality(true); gifRecorder.setQmlFileName("qtquickcontrols2-rangeslider.qml"); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *slider = window->property("slider").value<QQuickItem*>(); QVERIFY(slider); QObject *first = slider->property("first").value<QObject*>(); QVERIFY(first); QQuickItem *firstHandle = first->property("handle").value<QQuickItem*>(); QVERIFY(firstHandle); QObject *second = slider->property("second").value<QObject*>(); QVERIFY(second); QQuickItem *secondHandle = second->property("handle").value<QQuickItem*>(); QVERIFY(secondHandle); const QPoint firstCenter = firstHandle->mapToItem(slider, QPoint(firstHandle->width() / 2, firstHandle->height() / 2)).toPoint(); const QPoint secondCenter = secondHandle->mapToItem(slider, QPoint(secondHandle->width() / 2, secondHandle->height() / 2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, firstCenter, 100); const QPoint firstTarget = firstCenter + QPoint(slider->width() * 0.25, 0); moveSmoothly(window, firstCenter, firstTarget, firstTarget.x() - firstCenter.x()); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, firstTarget, 20); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, secondCenter, 100); const QPoint secondTarget = secondCenter - QPoint(slider->width() * 0.25, 0); moveSmoothly(window, secondCenter, secondTarget, qAbs(secondTarget.x() - secondCenter.x())); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, secondTarget, 20); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, secondTarget, 100); moveSmoothly(window, secondTarget, secondCenter, qAbs(secondTarget.x() - secondCenter.x())); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, secondCenter, 20); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, firstTarget, 100); moveSmoothly(window, firstTarget, firstCenter, firstTarget.x() - firstCenter.x()); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, firstCenter, 20); gifRecorder.waitForFinish(); } void tst_Gifs::busyIndicator() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(6); gifRecorder.setHighQuality(true); gifRecorder.setQmlFileName("qtquickcontrols2-busyindicator.qml"); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); // Record nothing for a bit to make it smoother. QTest::qWait(800 * 2); QQuickItem *busyIndicator = window->property("busyIndicator").value<QQuickItem*>(); QVERIFY(busyIndicator); busyIndicator->setProperty("running", false); // 800 ms is the duration of one rotation animation cycle for BusyIndicator. QTest::qWait(800 * 2); busyIndicator->setProperty("running", true); gifRecorder.waitForFinish(); } void tst_Gifs::switchGif() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(3); gifRecorder.setQmlFileName("qtquickcontrols2-switch.qml"); gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QTest::mouseClick(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.8, window->height() / 2), 0); QTest::mouseClick(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.2, window->height() / 2), 800); gifRecorder.waitForFinish(); } void tst_Gifs::button_data() { QTest::addColumn<QString>("qmlFileName"); QTest::newRow("button") << QString::fromLatin1("qtquickcontrols2-button.qml"); QTest::newRow("button-flat") << QString::fromLatin1("qtquickcontrols2-button-flat.qml"); QTest::newRow("button-highlighted") << QString::fromLatin1("qtquickcontrols2-button-highlighted.qml"); } void tst_Gifs::button() { QFETCH(QString, qmlFileName); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(3); gifRecorder.setQmlFileName(qmlFileName); // Seems to be necessary to show the Default button background. gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 0); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 700); gifRecorder.waitForFinish(); } void tst_Gifs::tabBar() { const QString qmlFileName = QStringLiteral("qtquickcontrols2-tabbar.qml"); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(4); gifRecorder.setQmlFileName(qmlFileName); gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.6, window->height() / 2), 0); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.6, window->height() / 2), 50); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.9, window->height() / 2), 400); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.9, window->height() / 2), 50); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.6, window->height() / 2), 800); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.6, window->height() / 2), 50); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.3, window->height() / 2), 400); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() * 0.3, window->height() / 2), 50); gifRecorder.waitForFinish(); } void tst_Gifs::menu() { const QString qmlFileName = QStringLiteral("qtquickcontrols2-menu.qml"); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(3); gifRecorder.setQmlFileName(qmlFileName); gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); const QQuickItem *fileButton = window->property("fileButton").value<QQuickItem*>(); QVERIFY(fileButton); const QPoint fileButtonCenter = fileButton->mapToScene(QPointF(fileButton->width() / 2, fileButton->height() / 2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, fileButtonCenter, 0); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, fileButtonCenter, 200); const QObject *menu = window->property("menu").value<QObject*>(); QVERIFY(menu); const QQuickItem *menuContentItem = menu->property("contentItem").value<QQuickItem*>(); QVERIFY(menuContentItem); const QPoint lastItemPos = menuContentItem->mapToScene(QPointF(menuContentItem->width() / 2, menuContentItem->height() - 10)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, lastItemPos, 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, lastItemPos, 300); gifRecorder.waitForFinish(); } void tst_Gifs::swipeView() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(8); gifRecorder.setQmlFileName(QStringLiteral("qtquickcontrols2-swipeview.qml")); gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *swipeView = window->property("swipeView").value<QQuickItem*>(); QVERIFY(swipeView); QTest::qWait(1200); swipeView->setProperty("currentIndex", 1); QTest::qWait(2000); swipeView->setProperty("currentIndex", 2); QTest::qWait(2000); swipeView->setProperty("currentIndex", 0); gifRecorder.waitForFinish(); } void tst_Gifs::swipeDelegate_data() { QTest::addColumn<QString>("qmlFileName"); QTest::newRow("qtquickcontrols2-swipedelegate.qml") << QString::fromLatin1("qtquickcontrols2-swipedelegate.qml"); QTest::newRow("qtquickcontrols2-swipedelegate-leading-trailing.qml") << QString::fromLatin1("qtquickcontrols2-swipedelegate-leading-trailing.qml"); } void tst_Gifs::swipeDelegate() { QFETCH(QString, qmlFileName); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(10); gifRecorder.setQmlFileName(qmlFileName); gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *swipeDelegate = window->property("swipeDelegate").value<QQuickItem*>(); QVERIFY(swipeDelegate); // Show left item. const QPoint leftTarget = QPoint(swipeDelegate->width() * 0.2, 0); const QPoint rightTarget = QPoint(swipeDelegate->width() * 0.8, 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, leftTarget, 100); const int movements = rightTarget.x() - leftTarget.x(); moveSmoothly(window, leftTarget, rightTarget, movements, QEasingCurve::OutQuint, 5); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, rightTarget, 20); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, rightTarget, 1000); moveSmoothly(window, rightTarget, leftTarget, movements, QEasingCurve::OutQuint, 5); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, leftTarget, 20); QTest::qWait(1000); // Show right item. QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, rightTarget, 1000); moveSmoothly(window, rightTarget, leftTarget, movements, QEasingCurve::OutQuint, 5); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, leftTarget, 20); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, leftTarget, 1000); moveSmoothly(window, leftTarget, rightTarget, movements, QEasingCurve::OutQuint, 5); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, rightTarget, 20); gifRecorder.waitForFinish(); } void tst_Gifs::swipeDelegateBehind() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(14); gifRecorder.setQmlFileName(QStringLiteral("qtquickcontrols2-swipedelegate-behind.qml")); gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *swipeDelegate = window->property("swipeDelegate").value<QQuickItem*>(); QVERIFY(swipeDelegate); // Show wrapping around left item. const QPoint leftTarget = QPoint(swipeDelegate->width() * 0.2, 0); const QPoint rightTarget = QPoint(swipeDelegate->width() * 0.8, 0); const int movements = rightTarget.x() - leftTarget.x(); for (int i = 0; i < 4; ++i) { QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, leftTarget, 100); moveSmoothly(window, leftTarget, rightTarget, movements, QEasingCurve::OutQuint, 5); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, rightTarget, 20); QTest::qWait(500); } QTest::qWait(1000); // Show wrapping around right item. for (int i = 0; i < 4; ++i) { QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, rightTarget, 100); moveSmoothly(window, rightTarget, leftTarget, movements, QEasingCurve::OutQuint, 5); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, leftTarget, 20); QTest::qWait(500); } gifRecorder.waitForFinish(); } void tst_Gifs::delegates_data() { QTest::addColumn<QString>("name"); QTest::addColumn<QList<int> >("pressIndices"); QTest::addColumn<int>("duration"); QTest::newRow("ItemDelegate") << "itemdelegate" << (QList<int> { 0, 1, 2 }) << 5; QTest::newRow("CheckDelegate") << "checkdelegate" << (QList<int> { 0, 0 }) << 5; QTest::newRow("RadioDelegate") << "radiodelegate" << (QList<int> { 1, 0 }) << 5; QTest::newRow("SwitchDelegate") << "switchdelegate" << (QList<int> { 0, 0 }) << 5; } void tst_Gifs::delegates() { QFETCH(QString, name); QFETCH(QList<int>, pressIndices); QFETCH(int, duration); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(duration); gifRecorder.setQmlFileName(QString::fromLatin1("qtquickcontrols2-%1.qml").arg(name)); gifRecorder.setHighQuality(true); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *delegate = window->property("delegate").value<QQuickItem*>(); QVERIFY(delegate); for (int i = 0; i < pressIndices.size(); ++i) { const int pressIndex = pressIndices.at(i); const QPoint delegateCenter(delegate->mapToScene(QPointF( delegate->width() / 2, delegate->height() / 2 + delegate->height() * pressIndex)).toPoint()); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, delegateCenter, i == 0 ? 200 : 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, delegateCenter, 400); } gifRecorder.waitForFinish(); } void tst_Gifs::dial_data() { QTest::addColumn<QString>("name"); QTest::newRow("dial-wrap") << "wrap"; QTest::newRow("dial-no-wrap") << "no-wrap"; } void tst_Gifs::dial() { QFETCH(QString, name); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(10); gifRecorder.setQmlFileName(QString::fromLatin1("qtquickcontrols2-dial-%1.qml").arg(name)); gifRecorder.setHighQuality(false); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *dial = window->property("dial").value<QQuickItem*>(); QVERIFY(dial); const QPoint arcCenter = dial->mapToScene(QPoint(dial->width() / 2, dial->height() / 2)).toPoint(); const qreal distanceFromCenter = dial->height() * 0.25; // Go a bit past the actual min/max to ensure that we get the full range. const qreal minAngle = qDegreesToRadians(-170.0); const qreal maxAngle = qDegreesToRadians(170.0); // Drag from start to end. qreal startAngle = minAngle; qreal endAngle = maxAngle; QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, posAlongArc( arcCenter, startAngle, endAngle, distanceFromCenter, 0, QEasingCurve::InOutQuad), 30); moveSmoothlyAlongArc(window, arcCenter, distanceFromCenter, startAngle, endAngle, QEasingCurve::InOutQuad); // Come back from the end a bit. startAngle = endAngle; endAngle -= qDegreesToRadians(50.0); moveSmoothlyAlongArc(window, arcCenter, distanceFromCenter, startAngle, endAngle, QEasingCurve::InOutQuad); // Try to drag over max to show what happens with different wrap settings. startAngle = endAngle; endAngle = qDegreesToRadians(270.0); moveSmoothlyAlongArc(window, arcCenter, distanceFromCenter, startAngle, endAngle, QEasingCurve::InOutQuad); // Go back to the start so that it loops nicely. startAngle = endAngle; endAngle = minAngle; moveSmoothlyAlongArc(window, arcCenter, distanceFromCenter, startAngle, endAngle, QEasingCurve::InOutQuad); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, posAlongArc( arcCenter, startAngle, endAngle, distanceFromCenter, 1, QEasingCurve::InOutQuad), 30); gifRecorder.waitForFinish(); } void tst_Gifs::checkables_data() { QTest::addColumn<QString>("name"); QTest::addColumn<QList<int> >("pressIndices"); QTest::newRow("checkbox") << "checkbox" << (QList<int> { 1, 2, 2, 1 }); QTest::newRow("radiobutton") << "radiobutton" << (QList<int> { 1, 2, 1, 0 }); } void tst_Gifs::checkables() { QFETCH(QString, name); QFETCH(QList<int>, pressIndices); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(6); gifRecorder.setQmlFileName(QString::fromLatin1("qtquickcontrols2-%1.qml").arg(name)); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); for (int i = 0; i < pressIndices.size(); ++i) { const int pressIndex = pressIndices.at(i); const QString controlId = QString::fromLatin1("control%1").arg(pressIndex + 1); QQuickItem *control = window->property(qPrintable(controlId)).value<QQuickItem*>(); QVERIFY(control); const QPoint pos = control->mapToScene(QPointF(control->width() / 2, control->height() / 2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, pos, 800); QTest::mouseClick(window, Qt::LeftButton, Qt::NoModifier, pos, 300); } gifRecorder.waitForFinish(); } void tst_Gifs::comboBox() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(6); gifRecorder.setQmlFileName(QStringLiteral("qtquickcontrols2-combobox.qml")); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *comboBox = window->property("comboBox").value<QQuickItem*>(); QVERIFY(comboBox); // Open the popup. const QPoint center = comboBox->mapToScene( QPoint(comboBox->width() / 2, comboBox->height() / 2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, center, 800); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, center, 80); // Select the third item. QObject *popup = comboBox->property("popup").value<QObject*>(); QVERIFY(popup); QQuickItem *popupContent = popup->property("contentItem").value<QQuickItem*>(); QVERIFY(popupContent); const QPoint lastItemPos = popupContent->mapToScene( QPoint(popupContent->width() / 2, popupContent->height() * 0.8)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, lastItemPos, 600); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, lastItemPos, 200); // Open the popup. QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, center, 1500); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, center, 80); // Select the first item. const QPoint firstItemPos = popupContent->mapToScene( QPoint(popupContent->width() / 2, popupContent->height() * 0.2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, firstItemPos, 600); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, firstItemPos, 200); gifRecorder.waitForFinish(); } void tst_Gifs::triState_data() { QTest::addColumn<QString>("name"); QTest::newRow("checkbox-tristate") << "checkbox-tristate"; QTest::newRow("checkdelegate-tristate") << "checkdelegate-tristate"; } void tst_Gifs::triState() { QFETCH(QString, name); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(6); gifRecorder.setQmlFileName(QString::fromLatin1("qtquickcontrols2-%1.qml").arg(name)); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *english = window->property("english").value<QQuickItem*>(); QVERIFY(english); QQuickItem *norwegian = window->property("norwegian").value<QQuickItem*>(); QVERIFY(norwegian); const QPoint englishCenter = english->mapToScene( QPointF(english->width() / 2, english->height() / 2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, englishCenter, 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, englishCenter, 300); const QPoint norwegianCenter = norwegian->mapToScene( QPointF(norwegian->width() / 2, norwegian->height() / 2)).toPoint(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, norwegianCenter, 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, norwegianCenter, 300); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, norwegianCenter, 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, norwegianCenter, 300); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, englishCenter, 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, englishCenter, 300); gifRecorder.waitForFinish(); } void tst_Gifs::scrollBar() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(6); gifRecorder.setQmlFileName("qtquickcontrols2-scrollbar.qml"); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *scrollBar = window->property("scrollBar").value<QQuickItem*>(); QVERIFY(scrollBar); // Flick in the center of the screen to show that there's a scroll bar. const QPoint lhsWindowBottom = QPoint(0, window->height() - 1); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, lhsWindowBottom, 100); QTest::mouseMove(window, lhsWindowBottom - QPoint(0, 10), 30); QTest::mouseMove(window, lhsWindowBottom - QPoint(0, 30), 30); QTest::mouseMove(window, lhsWindowBottom - QPoint(0, 60), 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, lhsWindowBottom - QPoint(0, 100), 30); // Scroll with the scroll bar. const QPoint rhsWindowBottom = QPoint(window->width() - 1, window->height() - 1); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, rhsWindowBottom, 2000); const QPoint rhsWindowTop = QPoint(window->width() - 1, 1); moveSmoothly(window, rhsWindowBottom, rhsWindowTop, qAbs(rhsWindowTop.y() - rhsWindowBottom.y()), QEasingCurve::InCubic, 10); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, rhsWindowTop, 20); gifRecorder.waitForFinish(); } void tst_Gifs::scrollBarSnap_data() { QTest::addColumn<QString>("gifBaseName"); QTest::addColumn<int>("snapMode"); QTest::newRow("NoSnap") << "qtquickcontrols2-scrollbar-nosnap" << 0; QTest::newRow("SnapAlways") << "qtquickcontrols2-scrollbar-snapalways" << 1; QTest::newRow("SnapOnRelease") << "qtquickcontrols2-scrollbar-snaponrelease" << 2; } void tst_Gifs::scrollBarSnap() { QFETCH(QString, gifBaseName); QFETCH(int, snapMode); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(8); gifRecorder.setHighQuality(true); gifRecorder.setQmlFileName("qtquickcontrols2-scrollbar-snap.qml"); gifRecorder.setOutputFileBaseName(gifBaseName); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QQuickItem *scrollbar = window->property("scrollbar").value<QQuickItem*>(); QVERIFY(scrollbar); QVERIFY(scrollbar->setProperty("snapMode", QVariant(snapMode))); QCOMPARE(scrollbar->property("snapMode").toInt(), snapMode); const QPoint startPos(scrollbar->property("leftPadding").toReal(), scrollbar->y() + scrollbar->height() / 2); const int availableWidth = scrollbar->property("availableWidth").toReal(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, startPos, 200); const QPoint pos1 = startPos + QPoint(availableWidth * 0.3, 0); moveSmoothly(window, startPos, pos1, pos1.x() - startPos.x(), QEasingCurve::OutQuint, 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, pos1, 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, pos1, 400); const QPoint pos2 = startPos + QPoint(availableWidth * 0.6, 0); moveSmoothly(window, pos1, pos2, pos2.x() - pos1.x(), QEasingCurve::OutQuint, 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, pos2, 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, pos2, 400); moveSmoothly(window, pos2, startPos, pos2.x() - startPos.x(), QEasingCurve::OutQuint, 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, startPos, 0); gifRecorder.waitForFinish(); } void tst_Gifs::scrollIndicator() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(6); gifRecorder.setQmlFileName("qtquickcontrols2-scrollindicator.qml"); gifRecorder.start(); // Flick in the center of the screen to show that there's a scroll indicator. QQuickWindow *window = gifRecorder.window(); const QPoint windowBottom = QPoint(0, window->height() - 1); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, windowBottom, 100); QTest::mouseMove(window, windowBottom - QPoint(0, 10), 30); QTest::mouseMove(window, windowBottom - QPoint(0, 30), 30); QTest::mouseMove(window, windowBottom - QPoint(0, 60), 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, windowBottom - QPoint(0, 100), 30); // Scroll back down. const QPoint windowTop = QPoint(0, 0); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, windowTop, 2000); QTest::mouseMove(window, windowTop + QPoint(0, 10), 30); QTest::mouseMove(window, windowTop + QPoint(0, 30), 30); QTest::mouseMove(window, windowTop + QPoint(0, 60), 30); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, windowTop + QPoint(0, 100), 30); gifRecorder.waitForFinish(); } void tst_Gifs::progressBar_data() { QTest::addColumn<bool>("indeterminate"); QTest::newRow("indeterminate:false") << false; QTest::newRow("indeterminate:true") << true; } void tst_Gifs::progressBar() { QFETCH(bool, indeterminate); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(4); gifRecorder.setQmlFileName(QString::fromLatin1("qtquickcontrols2-progressbar%1").arg( indeterminate ? QLatin1String("-indeterminate.qml") : QLatin1String(".qml"))); gifRecorder.start(); gifRecorder.waitForFinish(); } void tst_Gifs::stackView_data() { QTest::addColumn<QString>("name"); QTest::addColumn<int>("duration"); QTest::newRow("push") << "push" << 8; QTest::newRow("pop") << "pop" << 6; QTest::newRow("unwind") << "unwind" << 6; QTest::newRow("replace") << "replace" << 6; } void tst_Gifs::stackView() { QFETCH(QString, name); QFETCH(int, duration); GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(duration); gifRecorder.setHighQuality(true); gifRecorder.setQmlFileName(QString::fromLatin1("qtquickcontrols2-stackview-%1.qml").arg(name)); gifRecorder.start(); gifRecorder.waitForFinish(); } void tst_Gifs::drawer() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(4); gifRecorder.setHighQuality(true); gifRecorder.setQmlFileName("qtquickcontrols2-drawer.qml"); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QObject *drawer = window->property("drawer").value<QObject*>(); qreal width = drawer->property("width").toReal(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(1, 1), 100); moveSmoothly(window, QPoint(1, 1), QPoint(width, 1), width, QEasingCurve::InOutBack, 1); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(width, 1), 30); QTest::qWait(1000); QMetaObject::invokeMethod(drawer, "close"); gifRecorder.waitForFinish(); } void tst_Gifs::delayButton() { GifRecorder gifRecorder; gifRecorder.setDataDirPath(dataDirPath); gifRecorder.setOutputDir(outputDir); gifRecorder.setRecordingDuration(9); gifRecorder.setQmlFileName("qtquickcontrols2-delaybutton.qml"); gifRecorder.start(); QQuickWindow *window = gifRecorder.window(); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 0); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 1500); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 200); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 1500); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 1730); QTest::mousePress(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 1000); QTest::mouseRelease(window, Qt::LeftButton, Qt::NoModifier, QPoint(window->width() / 2, window->height() / 2), 2070); // 0.69 * 3000 gifRecorder.waitForFinish(); } QTEST_MAIN(tst_Gifs) #include "tst_gifs.moc"
utf-8
1
LGPL-3 or GPL-2
2016-2021 The Qt Company Ltd.
linux-5.16.7/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
/* * Copyright (C) 2016 Broadcom * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/device.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/mdio.h> #include <linux/phy.h> #include <linux/phy/phy.h> #define BLK_ADDR_REG_OFFSET 0x1f #define PLL_AFE1_100MHZ_BLK 0x2100 #define PLL_CLK_AMP_OFFSET 0x03 #define PLL_CLK_AMP_2P05V 0x2b18 static int ns2_pci_phy_init(struct phy *p) { struct mdio_device *mdiodev = phy_get_drvdata(p); int rc; /* select the AFE 100MHz block page */ rc = mdiodev_write(mdiodev, BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK); if (rc) goto err; /* set the 100 MHz reference clock amplitude to 2.05 v */ rc = mdiodev_write(mdiodev, PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V); if (rc) goto err; return 0; err: dev_err(&mdiodev->dev, "Error %d writing to phy\n", rc); return rc; } static const struct phy_ops ns2_pci_phy_ops = { .init = ns2_pci_phy_init, .owner = THIS_MODULE, }; static int ns2_pci_phy_probe(struct mdio_device *mdiodev) { struct device *dev = &mdiodev->dev; struct phy_provider *provider; struct phy *phy; phy = devm_phy_create(dev, dev->of_node, &ns2_pci_phy_ops); if (IS_ERR(phy)) { dev_err(dev, "failed to create Phy\n"); return PTR_ERR(phy); } phy_set_drvdata(phy, mdiodev); provider = devm_of_phy_provider_register(&phy->dev, of_phy_simple_xlate); if (IS_ERR(provider)) { dev_err(dev, "failed to register Phy provider\n"); return PTR_ERR(provider); } dev_info(dev, "%s PHY registered\n", dev_name(dev)); return 0; } static const struct of_device_id ns2_pci_phy_of_match[] = { { .compatible = "brcm,ns2-pcie-phy", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, ns2_pci_phy_of_match); static struct mdio_driver ns2_pci_phy_driver = { .mdiodrv = { .driver = { .name = "phy-bcm-ns2-pci", .of_match_table = ns2_pci_phy_of_match, }, }, .probe = ns2_pci_phy_probe, }; mdio_module_driver(ns2_pci_phy_driver); MODULE_AUTHOR("Broadcom"); MODULE_DESCRIPTION("Broadcom Northstar2 PCI Phy driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:phy-bcm-ns2-pci");
utf-8
1
GPL-2
1991-2012 Linus Torvalds and many others
wvstreams-4.6.1/include/wvcont.h
/* -*- Mode: C++ -*- * Worldvisions Weaver Software: * Copyright (C) 1997-2002 Net Integration Technologies, Inc. * * FIXME: I was too lazy to templatize this properly, so we only support * WvCallback<void*,void*>. It should be possible to work with any kind * of return value and parameter, although it makes sense to limit things * to just one parameter (since it currently has to be returned by yield() * somehow). */ #ifndef __WVCONT_H #define __WVCONT_H #include "wvlinklist.h" #include "wvstreamsdebugger.h" #include "wvtr1.h" typedef wv::function<void*(void*)> WvContCallback; /** * WvCont provides "continuations", which are apparently also known as * semi-coroutines. You can wrap any WvCallback<void*,void*> in a WvCont * and make it a "continuable" callback - that is, you can yield() from it * and return a value. Next time someone calls your callback, it will be * as if yield() has returned (and the parameter to your function is returned * from yield()). */ class WvCont { struct Data; friend struct Data; typedef WvList<Data> DataList; private: /** * When we copy a WvCont, we increase the reference count of the 'data' * member rather than copying it. That makes it so every copy of a given * callback object still refers to the same WvTask. */ Data *data; static DataList *data_list; static Data *curdata; static int taskdepth; static void bouncer(void *userdata); /** * Actually call the callback inside its task, and enforce a call stack. * Doesn't do anything with arguments. Returns the return value. */ void *call() { return _call(data); } /** * Call the callback inside its task, but don't assume this WvCont will * still be around when we come back. */ static void *_call(Data *data); /** * Construct a WvCont given a pre-existing Data structure. This is * basically equivalent to using the copy constructor. */ WvCont(Data *data); public: /** * Construct a WvCont using an existing WvCallback. The WvCont object * can be used in place of that callback, and stored in a callback of * the same data type. */ WvCont(const WvContCallback &cb, unsigned long stacksize = 64*1024); /** Copy constructor. */ WvCont(const WvCont &cb); /** Destructor. */ ~WvCont(); /** * call the callback, making p1 the return value of yield() or the * parameter to the function, and returning Ret, the argument of yield() * or the return value of the function. */ void *operator() (void *p1 = 0); // the following are static because a function doesn't really know // which WvCont it belongs to, and only one WvCont can be the "current" // one globally in an application anyway. // // Unfortunately this prevents us from assert()ing that you're in the // context you think you are. /** * Get a copy of the current WvCont. */ static WvCont current(); /** * "return" from the current callback, giving value 'ret' to the person * who called us. Next time this callback is called, it's as if yield() * had returned, and the parameter to the callback is the value of * yield(). */ static void *yield(void *ret = 0); /** * Tell us if the current context is "okay", that is, not trying to * die. If !isok(), you shouldn't yield(), because the caller is just * going to keep calling you until you die. Return as soon as you can. */ static bool isok(); /** * A templated function that allows you to pass a WvCont wherever a * C-style function pointer of the form * R func(T, void *userdata) * is taken. It's your job to make sure the 'userdata' provided is * a pointer to the right WvCont. * * Example: * typedef bool MyFunc(Obj *obj, void *userdata); * WvCont cont; * MyFunc *func = &WvCont::c_bouncer<bool,Obj *>; * bool b = func(new Obj, &cont); */ template <typename R, typename T> static R c_bouncer(T t, void *_cont) { WvCont &cont = *(WvCont *)_cont; return (R)cont((T)t); } /** * A templated function that allows you to pass a WvCont wherever a * C-style function pointer of the form * R func(void *userdata) * is taken. It's your job to make sure the 'userdata' provided is * a pointer to the right WvCont. * * Example: * typedef bool MyFunc(void *userdata); * WvCont cont; * MyFunc *func = &WvCont::c_bouncer<bool>; * bool b = func(&cont); */ template <typename R> static R c_bouncer(void *_cont) { WvCont &cont = *(WvCont *)_cont; return (R)cont(0); } private: static WvString debugger_conts_run_cb(WvStringParm cmd, WvStringList &args, WvStreamsDebugger::ResultCallback result_cb, void *); }; #endif // __WVCONT_H
utf-8
1
unknown
unknown
linux-5.16.7/arch/sh/include/mach-migor/mach/migor.h
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_SH_MIGOR_H #define __ASM_SH_MIGOR_H #define PORT_MSELCRA 0xa4050180 #define PORT_MSELCRB 0xa4050182 #define BSC_CS4BCR 0xfec10010 #define BSC_CS6ABCR 0xfec1001c #define BSC_CS4WCR 0xfec10030 #include <video/sh_mobile_lcdc.h> int migor_lcd_qvga_setup(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops); #endif /* __ASM_SH_MIGOR_H */
utf-8
1
GPL-2
1991-2012 Linus Torvalds and many others
grub2-2.06/grub-core/kern/i386/xen/pvh.c
/* * GRUB -- GRand Unified Bootloader * Copyright (C) 2018 Free Software Foundation, Inc. * * GRUB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GRUB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GRUB. If not, see <http://www.gnu.org/licenses/>. */ #include <grub/kernel.h> #include <grub/misc.h> #include <grub/memory.h> #include <grub/mm.h> #include <grub/i386/cpuid.h> #include <grub/i386/io.h> #include <grub/xen.h> #include <xen/hvm/start_info.h> #include <grub/i386/linux.h> #include <grub/machine/kernel.h> #include <grub/machine/memory.h> #include <xen/hvm/params.h> #include <xen/memory.h> #define XEN_MEMORY_MAP_SIZE 128 grub_uint64_t grub_rsdp_addr; static char hypercall_page[GRUB_XEN_PAGE_SIZE] __attribute__ ((aligned (GRUB_XEN_PAGE_SIZE))); static grub_uint32_t xen_cpuid_base; static struct start_info grub_xen_start_page; static struct grub_e820_mmap_entry map[XEN_MEMORY_MAP_SIZE]; static unsigned int nr_map_entries; static void grub_xen_cons_msg (const char *msg) { const char *c; for (c = msg; *c; c++) grub_outb (*c, XEN_HVM_DEBUGCONS_IOPORT); } static void grub_xen_panic (const char *msg) { grub_xen_cons_msg (msg); grub_xen_cons_msg ("System halted!\n"); asm volatile ("cli"); while (1) { asm volatile ("hlt"); } } static void grub_xen_cpuid_base (void) { grub_uint32_t base, eax, signature[3]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { grub_cpuid (base, eax, signature[0], signature[1], signature[2]); if (!grub_memcmp ("XenVMMXenVMM", signature, 12) && (eax - base) >= 2) { xen_cpuid_base = base; return; } } grub_xen_panic ("Found no Xen signature!\n"); } static void grub_xen_setup_hypercall_page (void) { grub_uint32_t msr, addr, eax, ebx, ecx, edx; /* Get base address of Xen-specific MSRs. */ grub_cpuid (xen_cpuid_base + 2, eax, ebx, ecx, edx); msr = ebx; addr = (grub_uint32_t) (&hypercall_page); /* Specify hypercall page address for Xen. */ asm volatile ("wrmsr" : : "c" (msr), "a" (addr), "d" (0) : "memory"); } int grub_xen_hypercall (grub_uint32_t callno, grub_uint32_t a0, grub_uint32_t a1, grub_uint32_t a2, grub_uint32_t a3, grub_uint32_t a4, grub_uint32_t a5 __attribute__ ((unused))) { grub_uint32_t res; asm volatile ("call *%[callno]" : "=a" (res), "+b" (a0), "+c" (a1), "+d" (a2), "+S" (a3), "+D" (a4) : [callno] "a" (&hypercall_page[callno * 32]) : "memory"); return res; } static grub_uint32_t grub_xen_get_param (int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = grub_xen_hypercall (__HYPERVISOR_hvm_op, HVMOP_get_param, (grub_uint32_t) (&xhv), 0, 0, 0, 0); if (r < 0) grub_xen_panic ("Could not get parameter from Xen!\n"); return xhv.value; } static void * grub_xen_add_physmap (unsigned int space, void *addr) { struct xen_add_to_physmap xatp; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = space; xatp.gpfn = (grub_addr_t) addr >> GRUB_XEN_LOG_PAGE_SIZE; if (grub_xen_hypercall (__HYPERVISOR_memory_op, XENMEM_add_to_physmap, (grub_uint32_t) (&xatp), 0, 0, 0, 0)) grub_xen_panic ("Memory_op hypercall failed!\n"); return addr; } static void grub_xen_sort_mmap (void) { grub_uint64_t from, to; unsigned int i; struct grub_e820_mmap_entry tmp; /* Align map entries to page boundaries. */ for (i = 0; i < nr_map_entries; i++) { from = map[i].addr; to = from + map[i].len; if (map[i].type == GRUB_MEMORY_AVAILABLE) { from = ALIGN_UP (from, GRUB_XEN_PAGE_SIZE); to = ALIGN_DOWN (to, GRUB_XEN_PAGE_SIZE); } else { from = ALIGN_DOWN (from, GRUB_XEN_PAGE_SIZE); to = ALIGN_UP (to, GRUB_XEN_PAGE_SIZE); } map[i].addr = from; map[i].len = to - from; } again: /* Sort entries by start address. */ for (i = 1; i < nr_map_entries; i++) { if (map[i].addr >= map[i - 1].addr) continue; tmp = map[i]; map[i] = map[i - 1]; map[i - 1] = tmp; i = 0; } /* Detect overlapping areas. */ for (i = 1; i < nr_map_entries; i++) { if (map[i].addr >= map[i - 1].addr + map[i - 1].len) continue; tmp = map[i - 1]; map[i - 1].len = map[i].addr - map[i - 1].addr; if (map[i].addr + map[i].len >= tmp.addr + tmp.len) continue; if (nr_map_entries < ARRAY_SIZE (map)) { map[nr_map_entries].addr = map[i].addr + map[i].len; map[nr_map_entries].len = tmp.addr + tmp.len - map[nr_map_entries].addr; map[nr_map_entries].type = tmp.type; nr_map_entries++; goto again; } } /* Merge adjacent entries. */ for (i = 1; i < nr_map_entries; i++) { if (map[i].type == map[i - 1].type && map[i].addr == map[i - 1].addr + map[i - 1].len) { map[i - 1].len += map[i].len; map[i] = map[nr_map_entries - 1]; nr_map_entries--; goto again; } } } static void grub_xen_get_mmap (void) { struct xen_memory_map memmap; memmap.nr_entries = ARRAY_SIZE (map); set_xen_guest_handle (memmap.buffer, map); if (grub_xen_hypercall (__HYPERVISOR_memory_op, XENMEM_memory_map, (grub_uint32_t) (&memmap), 0, 0, 0, 0)) grub_xen_panic ("Could not get memory map from Xen!\n"); nr_map_entries = memmap.nr_entries; grub_xen_sort_mmap (); } static void grub_xen_set_mmap (void) { struct xen_foreign_memory_map memmap; memmap.domid = DOMID_SELF; memmap.map.nr_entries = nr_map_entries; set_xen_guest_handle (memmap.map.buffer, map); grub_xen_hypercall (__HYPERVISOR_memory_op, XENMEM_set_memory_map, (grub_uint32_t) (&memmap), 0, 0, 0, 0); } static void grub_xen_mm_init_regions (void) { grub_uint64_t modend, from, to; unsigned int i; modend = grub_modules_get_end (); for (i = 0; i < nr_map_entries; i++) { if (map[i].type != GRUB_MEMORY_AVAILABLE) continue; from = map[i].addr; to = from + map[i].len; if (from < modend) from = modend; if (from >= to || from >= (1ULL << 32)) continue; if (to > (1ULL << 32)) to = 1ULL << 32; grub_mm_init_region ((void *) (grub_addr_t) from, to - from); } } static grub_uint64_t grub_xen_find_page (grub_uint64_t start) { unsigned int i, j; grub_uint64_t last = start; /* * Try to find a e820 map hole below 4G. * Relies on page-aligned entries (addr and len) and input (start). */ for (i = 0; i < nr_map_entries; i++) { if (last > map[i].addr + map[i].len) continue; if (last < map[i].addr) return last; if ((map[i].addr >> 32) || ((map[i].addr + map[i].len) >> 32)) break; last = map[i].addr + map[i].len; } if (i == nr_map_entries) return last; /* No hole found, use the highest RAM page below 4G and reserve it. */ if (nr_map_entries == ARRAY_SIZE (map)) grub_xen_panic ("Memory map size limit reached!\n"); for (i = 0, j = 0; i < nr_map_entries; i++) { if (map[i].type != GRUB_MEMORY_AVAILABLE) continue; if (map[i].addr >> 32) break; j = i; if ((map[i].addr + map[i].len) >> 32) break; } if (map[j].type != GRUB_MEMORY_AVAILABLE) grub_xen_panic ("No free memory page found!\n"); if ((map[j].addr + map[j].len) >> 32) last = (1ULL << 32) - GRUB_XEN_PAGE_SIZE; else last = map[j].addr + map[j].len - GRUB_XEN_PAGE_SIZE; map[nr_map_entries].addr = last; map[nr_map_entries].len = GRUB_XEN_PAGE_SIZE; map[nr_map_entries].type = GRUB_MEMORY_RESERVED; nr_map_entries++; grub_xen_sort_mmap (); return last; } void grub_xen_setup_pvh (void) { grub_addr_t par; grub_xen_cpuid_base (); grub_xen_setup_hypercall_page (); grub_xen_get_mmap (); /* Setup Xen data. */ grub_xen_start_page_addr = &grub_xen_start_page; par = grub_xen_get_param (HVM_PARAM_CONSOLE_PFN); grub_xen_start_page_addr->console.domU.mfn = par; grub_xen_xcons = (void *) (grub_addr_t) (par << GRUB_XEN_LOG_PAGE_SIZE); par = grub_xen_get_param (HVM_PARAM_CONSOLE_EVTCHN); grub_xen_start_page_addr->console.domU.evtchn = par; par = grub_xen_get_param (HVM_PARAM_STORE_PFN); grub_xen_start_page_addr->store_mfn = par; grub_xen_xenstore = (void *) (grub_addr_t) (par << GRUB_XEN_LOG_PAGE_SIZE); par = grub_xen_get_param (HVM_PARAM_STORE_EVTCHN); grub_xen_start_page_addr->store_evtchn = par; par = grub_xen_find_page (0); grub_xen_grant_table = grub_xen_add_physmap (XENMAPSPACE_grant_table, (void *) par); par = grub_xen_find_page (par + GRUB_XEN_PAGE_SIZE); grub_xen_shared_info = grub_xen_add_physmap (XENMAPSPACE_shared_info, (void *) par); grub_xen_set_mmap (); grub_xen_mm_init_regions (); grub_rsdp_addr = pvh_start_info->rsdp_paddr; } grub_err_t grub_machine_mmap_iterate (grub_memory_hook_t hook, void *hook_data) { unsigned int i; for (i = 0; i < nr_map_entries; i++) { if (map[i].len && hook (map[i].addr, map[i].len, map[i].type, hook_data)) break; } return GRUB_ERR_NONE; }
utf-8
1
GPL-3+
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc
webkit2gtk-2.34.5/Source/WebCore/Modules/webauthn/fido/U2fCommandConstructor.h
// Copyright 2018 The Chromium Authors. All rights reserved. // Copyright (C) 2019 Apple Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pragma once #if ENABLE(WEB_AUTHN) #include <wtf/Forward.h> namespace WebCore { struct AuthenticationExtensionsClientInputs; struct PublicKeyCredentialCreationOptions; struct PublicKeyCredentialDescriptor; struct PublicKeyCredentialRequestOptions; } namespace fido { // Checks whether the request can be translated to valid U2F request // parameter. Namely, U2F request does not support resident key and // user verification, and ES256 algorithm must be used for public key // credential. // https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html#u2f-authenticatorMakeCredential-interoperability WEBCORE_EXPORT bool isConvertibleToU2fRegisterCommand(const WebCore::PublicKeyCredentialCreationOptions&); // Checks whether user verification is not required and that allow list is // not empty. // https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html#u2f-authenticatorGetAssertion-interoperability WEBCORE_EXPORT bool isConvertibleToU2fSignCommand(const WebCore::PublicKeyCredentialRequestOptions&); // Extracts APDU encoded U2F register command from PublicKeyCredentialCreationOptions. WEBCORE_EXPORT std::optional<Vector<uint8_t>> convertToU2fRegisterCommand(const Vector<uint8_t>& clientDataHash, const WebCore::PublicKeyCredentialCreationOptions&); // Extracts APDU encoded U2F check only sign command from // PublicKeyCredentialCreationOptions. Invoked when U2F register operation includes key // handles in exclude list. WEBCORE_EXPORT std::optional<Vector<uint8_t>> convertToU2fCheckOnlySignCommand(const Vector<uint8_t>& clientDataHash, const WebCore::PublicKeyCredentialCreationOptions&, const WebCore::PublicKeyCredentialDescriptor&); // Extracts APDU encoded U2F sign command from PublicKeyCredentialRequestOptions. WEBCORE_EXPORT std::optional<Vector<uint8_t>> convertToU2fSignCommand(const Vector<uint8_t>& clientDataHash, const WebCore::PublicKeyCredentialRequestOptions&, const Vector<uint8_t>& keyHandle, bool isAppId = false); WEBCORE_EXPORT Vector<uint8_t> constructBogusU2fRegistrationCommand(); // Returns "https://www.gstatic.com/securitykey/origins.json" as an AppID when googleLegacyAppidSupport is true. WEBCORE_EXPORT String processGoogleLegacyAppIdSupportExtension(const std::optional<WebCore::AuthenticationExtensionsClientInputs>&); } // namespace fido #endif // ENABLE(WEB_AUTHN)
utf-8
1
BSD-2-clause
1999 Antti Koivisto <koivisto@kde.org> 1999-2000 Lars Knoll <knoll@kde.org> 1999-2001 Harri Porten <porten@kde.org> 2001 Dirk Mueller <mueller@kde.org> 2002-2013 Vivek Thampi 2003-2021 Apple Inc 2004-2006 Rob Buis <buis@kde.org> 2004-2008 Nikolas Zimmermann <zimmermann@kde.org> 2005 Frerich Raabe <raabe@kde.org> 2005 Maksim Orlovich <maksim@kde.org> 2005, 2007-2013, 2015, 2017-2021 Google Inc 2005, 2008-2013 Nokia 2005-2006 Alexey Proskuryakov 2005-2006 Kimmo Kinnunen <kimmo.t.kinnunen@nokia.com> 2005-2008 Eric Seidel <eric@webkit.org> 2006 Alexander Kellett <lypanov@kde.org> 2006 Graham Dennis <graham.dennis@gmail.com> 2006 Michael Emmel mike.emmel@gmail.com 2006 Samuel Weinig <sam.weinig@gmail.com> 2006-2007 Alexey Proskuryakov <ap@nypop.com> 2006-2007 Alexey Proskuryakov <ap@webkit.org> 2007 Christian Dywan <christian@twotoasts.de> 2007 Henry Mason <hmason@mac.com> 2007 Holger Hans Peter Freyther <zecke@selfish.org> 2007 Justin Haygood <jhaygood@reaktix.com> 2007 Samuel Weinig <sam@webkit.org> 2007, 2009-2010 Holger Hans Peter Freyther 2007-2008 Alp Toker <alp@atoker.com> 2007-2009 Torch Mobile Inc 2008 Alex Mathews <possessedpenguinbob@gmail.com> 2008 Christian Dywan <christian@imendio.com> 2008 Collin Jackson <collinj@webkit.org> 2008 Dirk Schulze <vbs85@gmx.de> 2008 Kelvin W Sherlock <ksherlock@gmail.com> 2008 Nuanti Ltd 2008, 2010 The Android Open Source Project 2008, 2010-2011 Julien Chaffraix <jchaffraix@webkit.org> 2008, 2014 Collabora Ltd 2008-2009 Dirk Schulze <krit@webkit.org> 2009 Antonio Gomes <tonikitoo@webkit.org> 2009 Jeff Schiller <codedread@gmail.com> 2009 Joseph Pecoraro 2009-2010 Alex Milowski <alex@milowski.com> 2009-2011 Brent Fulgham <bfulgham@webkit.org> 2009-2015 University of Szeged 2009-2021 Igalia S.L. 2010 Andras Becsi <abecsi@inf.u-szeged.hu>, University of Szeged 2010 Mozilla Corporation 2010 Peter Varga <pvarga@inf.u-szeged.hu>, University of Szeged 2010 Renata Hodovan <hodovan@inf.u-szeged.hu> 2010 Sencha Inc 2010 Torch Mobile (Beijing) Co 2010 Zoltan Herczeg <zherczeg@webkit.org> 2010, 2012 MIPS Technologies Inc 2010, 2012-2013 Company 100 Inc 2010, 2012-2014 Patrick Gansterer <paroga@paroga.com> 2010-2011 Adam Barth 2010-2011 Zoltan Herczeg 2010-2012 Research In Motion Limited 2010-2013 Motorola Mobility 2011 Adam Barth <abarth@webkit.org> 2011 Andreas Kling <kling@webkit.org> 2011 Benjamin Poulain <benjamin@webkit.org> 2011 Daniel Bates <dbates@intudata.com> 2011 Felician Marton 2011 Gabor Loki <loki@webkit.org> 2011 Peter Varga <pvarga@webkit.org>, University of Szeged 2011 ProFUSION embedded systems 2011 Renata Hodovan <reni@webkit.org> 2011, 2014-2017 The Chromium Authors 2011-2012, 2014-2015 Ericsson AB 2011-2013 Intel Corporation 2011-2013 Samsung Electronics 2011-2014 Adobe Systems Inc 2012 David Barton <dbarton@mathscribe.com> 2012 Gabor Rapcsanyi 2012 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged 2012 Intel Inc 2012 Koji Ishii <kojiishi@gmail.com> 2012 Mathias Bynens <mathias@qiwi.be> 2012 Rik Cabanier <cabanier@adobe.com> 2012 Sony Network Entertainment 2012 Victor Carbune <victor@rosedu.org> 2012 Zan Dobersek <zandobersek@gmail.com> 2012, 2016 SoftAtHome 2012-2013 ChangSeok Oh <shivamidow@gmail.com> 2012-2013 Digia Plc 2012-2013 Michael Pruett <michael@68k.org> 2012-2015 University of Washington 2012-2016 Yann Collet 2013 Adenilson Cavalcanti <cavalcantii@gmail.com> 2013 Andrew Bortz 2013 Carlos Garnacho <carlosg@gnome.org> 2013 Gustavo Noronha Silva <gns@gnome.org> 2013 The MathJax Consortium 2013 Xidorn Quan <quanxunzhen@gmail.com> 2013-2014 Cable Television Labs Inc 2014 Antoine Quint 2014 Dhi Aurrahman <diorahman@rockybars.com> 2014 Gurpreet Kaur <k.gurpreet@samsung.com> 2014 Raspberry Pi Foundation 2014 Saam Barati. <saambarati1@gmail.com> 2014-2015 Frederic Wang <fred.wang@free.fr> 2014-2015 Saam Barati <saambarati1@gmail.com> 2014-2018 Yusuke Suzuki <utatane.tea@gmail.com> 2015 Dominic Szablewski <dominic@phoboslab.org> 2015 Electronic Arts Inc 2015 Jordan Harband 2015 Tobias Reiss <tobi+webkit@basecode.de> 2015, 2018 Andy VanWagoner <andy@vanwagoner.family> 2015-2016 Sukolsak Sakshuwong <sukolsak@gmail.com> 2015-2017 Canon Inc 2015-2020 Devin Rousso <webkit@devinrousso.com> 2016 Caitlin Potter <caitp@igalia.com> 2016 Konstantin Tokavev <annulen@yandex.ru> 2016 Red Hat Inc 2016 Yusuke Suzuki <yusuke.suzuki@sslab.ics.keio.ac.jp> 2016-2018 Akamai Technologies Inc 2016-2019 Oleksandr Skachkov <gskachkov@gmail.com> 2016-2021 Metrological Group B.V 2016-2021 Sony Interactive Entertainment 2017 Caio Lima <ticaiolima@gmail.com> 2017 Endless Mobile Inc 2017 Oleksandr Skachkov <gskackhov@gmail.com> 2018 Google LLC 2018 Yusuke Suzuki <yusukesuzuki@slowstart.org> 2018 mce sys Ltd 2019 Carlos Eduardo Ramalho <cadubentzen@gmail.com> 2019 the V8 project authors 2019-2021 Alexey Shvayka <shvaikalesh@gmail.com> 2020 Cloudinary Inc 2020 Darryl Pogue <darryl@dpogue.ca> 2020 Jan-Michael Brummer <jan.brummer@tabos.org> 2020 WikiMedia Foundation. All Rights Reserve 2021 Purism SPC 2021 Tyler Wilcock <twilco.o@protonmail.com>
mir-1.8.2+dfsg/tests/unit-tests/platforms/mesa/kms/test_gbm_buffer.cpp
/* * Copyright © 2012 Canonical Ltd. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 or 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Authored by: Christopher James Halse Rogers <christopher.halse.rogers@canonical.com> */ #include "mir/graphics/display.h" #include "mir/test/doubles/mock_egl.h" #include "mir/test/doubles/mock_gl.h" #include "mir/test/doubles/mock_drm.h" #include "mir/test/doubles/mock_gbm.h" #include "mir_test_framework/udev_environment.h" #include "src/platforms/mesa/server/kms/platform.h" #include "src/platforms/mesa/server/gbm_buffer.h" #include "src/platforms/mesa/include/native_buffer.h" #include "src/platforms/mesa/server/buffer_allocator.h" #include "mir/graphics/buffer_properties.h" #include "mir/test/doubles/null_emergency_cleanup.h" #include "src/server/report/null_report_factory.h" #include "mir/test/doubles/stub_console_services.h" #include "mir/test/doubles/null_gl_config.h" #include "mir/test/doubles/null_display_configuration_policy.h" #include <gbm.h> #include <gtest/gtest.h> #include <gmock/gmock.h> #include <cstdint> #include <stdexcept> namespace mg=mir::graphics; namespace mgm=mir::graphics::mesa; namespace geom=mir::geometry; namespace mtd=mir::test::doubles; namespace mtf=mir_test_framework; class GBMBufferTest : public ::testing::Test { protected: virtual void SetUp() { using namespace testing; fake_devices.add_standard_device("standard-drm-devices"); size = geom::Size{300, 200}; pf = mir_pixel_format_argb_8888; stride = geom::Stride{4 * size.width.as_uint32_t()}; usage = mg::BufferUsage::hardware; buffer_properties = mg::BufferProperties{size, pf, usage}; ON_CALL(mock_egl, eglChooseConfig(_,_,_,1,_)) .WillByDefault(DoAll(SetArgPointee<2>(mock_egl.fake_configs[0]), SetArgPointee<4>(1), Return(EGL_TRUE))); ON_CALL(mock_egl, eglGetConfigAttrib(_, mock_egl.fake_configs[0], EGL_NATIVE_VISUAL_ID, _)) .WillByDefault( DoAll( SetArgPointee<3>(GBM_FORMAT_XRGB8888), Return(EGL_TRUE))); mock_egl.provide_egl_extensions(); mock_gl.provide_gles_extensions(); ON_CALL(mock_gbm, gbm_bo_get_width(_)) .WillByDefault(Return(size.width.as_uint32_t())); ON_CALL(mock_gbm, gbm_bo_get_height(_)) .WillByDefault(Return(size.height.as_uint32_t())); ON_CALL(mock_gbm, gbm_bo_get_format(_)) .WillByDefault(Return(GBM_BO_FORMAT_ARGB8888)); ON_CALL(mock_gbm, gbm_bo_get_stride(_)) .WillByDefault(Return(stride.as_uint32_t())); platform = std::make_shared<mgm::Platform>( mir::report::null_display_report(), std::make_shared<mtd::StubConsoleServices>(), *std::make_shared<mtd::NullEmergencyCleanup>(), mgm::BypassOption::allowed); auto const display = platform->create_display( std::make_shared<mtd::NullDisplayConfigurationPolicy>(), std::make_shared<mtd::NullGLConfig>()); allocator.reset(new mgm::BufferAllocator( *display, platform->gbm->device, mgm::BypassOption::allowed, mgm::BufferImportMethod::gbm_native_pixmap)); } mir::renderer::gl::TextureSource* as_texture_source(std::shared_ptr<mg::Buffer> const& buffer) { return dynamic_cast<mir::renderer::gl::TextureSource*>(buffer->native_buffer_base()); } ::testing::NiceMock<mtd::MockDRM> mock_drm; ::testing::NiceMock<mtd::MockGBM> mock_gbm; ::testing::NiceMock<mtd::MockEGL> mock_egl; ::testing::NiceMock<mtd::MockGL> mock_gl; std::shared_ptr<mgm::Platform> platform; std::unique_ptr<mgm::BufferAllocator> allocator; // Defaults MirPixelFormat pf; geom::Size size; geom::Stride stride; mg::BufferUsage usage; mg::BufferProperties buffer_properties; mtf::UdevEnvironment fake_devices; }; TEST_F(GBMBufferTest, dimensions_test) { using namespace testing; EXPECT_CALL(mock_gbm, gbm_bo_create(_,_,_,_,_)); EXPECT_CALL(mock_gbm, gbm_bo_destroy(_)); auto buffer = allocator->alloc_buffer(buffer_properties); ASSERT_EQ(size, buffer->size()); } TEST_F(GBMBufferTest, buffer_has_expected_pixel_format) { using namespace testing; EXPECT_CALL(mock_gbm, gbm_bo_create(_,_,_,_,_)); EXPECT_CALL(mock_gbm, gbm_bo_destroy(_)); auto buffer(allocator->alloc_buffer(buffer_properties)); ASSERT_EQ(pf, buffer->pixel_format()); } TEST_F(GBMBufferTest, stride_has_sane_value) { using namespace testing; EXPECT_CALL(mock_gbm, gbm_bo_create(_,_,_,_,_)); EXPECT_CALL(mock_gbm, gbm_bo_destroy(_)); // RGBA 8888 cannot take less than 4 bytes // TODO: is there a *maximum* sane value for stride? geom::Stride minimum(size.width.as_uint32_t() * 4); auto buffer(allocator->alloc_buffer(buffer_properties)); auto native = std::dynamic_pointer_cast<mgm::NativeBuffer>(buffer->native_buffer_handle()); ASSERT_THAT(native, Ne(nullptr)); ASSERT_LE(minimum, geom::Stride{native->stride}); } TEST_F(GBMBufferTest, buffer_native_handle_has_correct_size) { using namespace testing; auto buffer = allocator->alloc_buffer(buffer_properties); auto native_handle = std::dynamic_pointer_cast<mgm::NativeBuffer>(buffer->native_buffer_handle()); ASSERT_THAT(native_handle, Ne(nullptr)); EXPECT_EQ(1, native_handle->fd_items); EXPECT_EQ(0, native_handle->data_items); } MATCHER_P(GEMFlinkHandleIs, value, "") { auto flink = reinterpret_cast<struct drm_gem_flink*>(arg); return flink->handle == value; } ACTION_P(SetGEMFlinkName, value) { auto flink = reinterpret_cast<struct drm_gem_flink*>(arg2); flink->name = value; } TEST_F(GBMBufferTest, buffer_native_handle_contains_correct_data) { using namespace testing; uint32_t prime_fd{0x77}; gbm_bo_handle mock_handle; mock_handle.u32 = 0xdeadbeef; EXPECT_CALL(mock_gbm, gbm_bo_get_handle(_)) .Times(Exactly(1)) .WillOnce(Return(mock_handle)); EXPECT_CALL(mock_drm, drmPrimeHandleToFD(_,mock_handle.u32,_,_)) .Times(Exactly(1)) .WillOnce(DoAll(SetArgPointee<3>(prime_fd), Return(0))); auto buffer = allocator->alloc_buffer(buffer_properties); auto handle = std::dynamic_pointer_cast<mgm::NativeBuffer>(buffer->native_buffer_handle()); ASSERT_THAT(handle, Ne(nullptr)); EXPECT_EQ(prime_fd, static_cast<unsigned int>(handle->fd[0])); EXPECT_EQ(stride.as_uint32_t(), static_cast<unsigned int>(handle->stride)); } TEST_F(GBMBufferTest, buffer_creation_throws_on_prime_fd_failure) { using namespace testing; EXPECT_CALL(mock_drm, drmPrimeHandleToFD(_,_,_,_)) .Times(Exactly(1)) .WillOnce(Return(-1)); EXPECT_THROW({ auto buffer = allocator->alloc_buffer(buffer_properties); }, std::runtime_error); } TEST_F(GBMBufferTest, gl_bind_to_texture_egl_image_creation_failed) { using namespace testing; ON_CALL(mock_egl, eglCreateImageKHR(_,_,_,_,_)) .WillByDefault(Return(EGL_NO_IMAGE_KHR)); EXPECT_THROW({ auto buffer = allocator->alloc_buffer(buffer_properties); as_texture_source(buffer)->gl_bind_to_texture(); }, std::runtime_error); } TEST_F(GBMBufferTest, gl_bind_to_texture_uses_egl_image) { using namespace testing; { InSequence seq; EXPECT_CALL(mock_egl, eglCreateImageKHR(_,_,_,_,_)) .Times(Exactly(1)); EXPECT_CALL(mock_egl, glEGLImageTargetTexture2DOES(_,mock_egl.fake_egl_image)) .Times(Exactly(1)); EXPECT_CALL(mock_egl, eglDestroyImageKHR(_,mock_egl.fake_egl_image)) .Times(Exactly(1)); } EXPECT_NO_THROW({ auto buffer = allocator->alloc_buffer(buffer_properties); as_texture_source(buffer)->gl_bind_to_texture(); }); }
utf-8
1
unknown
unknown
olive-editor-20200620/app/codec/encoder.h
/*** Olive - Non-Linear Video Editor Copyright (C) 2019 Olive Team This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ***/ #ifndef ENCODER_H #define ENCODER_H #include <memory> #include <QString> #include <QXmlStreamWriter> #include "codec/exportcodec.h" #include "codec/exportformat.h" #include "codec/frame.h" #include "common/timerange.h" #include "render/audioparams.h" #include "render/videoparams.h" OLIVE_NAMESPACE_ENTER class Encoder; using EncoderPtr = std::shared_ptr<Encoder>; class EncodingParams { public: EncodingParams(); void SetFilename(const QString& filename); void EnableVideo(const VideoParams& video_params, const ExportCodec::Codec& vcodec); void EnableAudio(const AudioParams& audio_params, const ExportCodec::Codec &acodec); void set_video_option(const QString& key, const QString& value); void set_video_bit_rate(const int64_t& rate); void set_video_max_bit_rate(const int64_t& rate); void set_video_buffer_size(const int64_t& sz); void set_video_threads(const int& threads); const QString& filename() const; bool video_enabled() const; const ExportCodec::Codec& video_codec() const; const VideoParams& video_params() const; const QHash<QString, QString>& video_opts() const; const int64_t& video_bit_rate() const; const int64_t& video_max_bit_rate() const; const int64_t& video_buffer_size() const; const int& video_threads() const; bool audio_enabled() const; const ExportCodec::Codec &audio_codec() const; const AudioParams& audio_params() const; const rational& GetExportLength() const; void SetExportLength(const rational& GetExportLength); virtual void Save(QXmlStreamWriter* writer) const; private: QString filename_; bool video_enabled_; ExportCodec::Codec video_codec_; VideoParams video_params_; QHash<QString, QString> video_opts_; int64_t video_bit_rate_; int64_t video_max_bit_rate_; int64_t video_buffer_size_; int video_threads_; bool audio_enabled_; ExportCodec::Codec audio_codec_; AudioParams audio_params_; rational export_length_; }; class Encoder : public QObject { Q_OBJECT public: Encoder(const EncodingParams& params); /** * @brief Create a Encoder instance using a Encoder ID * * @return * * A Encoder instance or nullptr if a Decoder with this ID does not exist */ static Encoder *CreateFromID(const QString& id, const EncodingParams &params); const EncodingParams& params() const; virtual bool Open() = 0; virtual bool WriteFrame(OLIVE_NAMESPACE::FramePtr frame, OLIVE_NAMESPACE::rational time) = 0; virtual void WriteAudio(OLIVE_NAMESPACE::AudioParams pcm_info, const QString& pcm_filename) = 0; virtual void Close() = 0; private: EncodingParams params_; }; OLIVE_NAMESPACE_EXIT #endif // ENCODER_H
utf-8
1
GPL-3+
2018-2020 Olive Team
nix-2.6.0+dfsg/src/libutil/url.cc
#include "url.hh" #include "url-parts.hh" #include "util.hh" namespace nix { std::regex refRegex(refRegexS, std::regex::ECMAScript); std::regex badGitRefRegex(badGitRefRegexS, std::regex::ECMAScript); std::regex revRegex(revRegexS, std::regex::ECMAScript); std::regex flakeIdRegex(flakeIdRegexS, std::regex::ECMAScript); ParsedURL parseURL(const std::string & url) { static std::regex uriRegex( "((" + schemeRegex + "):" + "(?:(?://(" + authorityRegex + ")(" + absPathRegex + "))|(/?" + pathRegex + ")))" + "(?:\\?(" + queryRegex + "))?" + "(?:#(" + queryRegex + "))?", std::regex::ECMAScript); std::smatch match; if (std::regex_match(url, match, uriRegex)) { auto & base = match[1]; std::string scheme = match[2]; auto authority = match[3].matched ? std::optional<std::string>(match[3]) : std::nullopt; std::string path = match[4].matched ? match[4] : match[5]; auto & query = match[6]; auto & fragment = match[7]; auto isFile = scheme.find("file") != std::string::npos; if (authority && *authority != "" && isFile) throw BadURL("file:// URL '%s' has unexpected authority '%s'", url, *authority); if (isFile && path.empty()) path = "/"; return ParsedURL{ .url = url, .base = base, .scheme = scheme, .authority = authority, .path = path, .query = decodeQuery(query), .fragment = percentDecode(std::string(fragment)) }; } else throw BadURL("'%s' is not a valid URL", url); } std::string percentDecode(std::string_view in) { std::string decoded; for (size_t i = 0; i < in.size(); ) { if (in[i] == '%') { if (i + 2 >= in.size()) throw BadURL("invalid URI parameter '%s'", in); try { decoded += std::stoul(std::string(in, i + 1, 2), 0, 16); i += 3; } catch (...) { throw BadURL("invalid URI parameter '%s'", in); } } else decoded += in[i++]; } return decoded; } std::map<std::string, std::string> decodeQuery(const std::string & query) { std::map<std::string, std::string> result; for (auto s : tokenizeString<Strings>(query, "&")) { auto e = s.find('='); if (e != std::string::npos) result.emplace( s.substr(0, e), percentDecode(std::string_view(s).substr(e + 1))); } return result; } std::string percentEncode(std::string_view s) { std::string res; for (auto & c : s) if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || strchr("-._~!$&'()*+,;=:@", c)) res += c; else res += fmt("%%%02x", (unsigned int) c); return res; } std::string encodeQuery(const std::map<std::string, std::string> & ss) { std::string res; bool first = true; for (auto & [name, value] : ss) { if (!first) res += '&'; first = false; res += percentEncode(name); res += '='; res += percentEncode(value); } return res; } std::string ParsedURL::to_string() const { return scheme + ":" + (authority ? "//" + *authority : "") + path + (query.empty() ? "" : "?" + encodeQuery(query)) + (fragment.empty() ? "" : "#" + percentEncode(fragment)); } bool ParsedURL::operator ==(const ParsedURL & other) const { return scheme == other.scheme && authority == other.authority && path == other.path && query == other.query && fragment == other.fragment; } }
utf-8
1
LGPL-2.1
2006-2020 Eelco Dostra
firefox-97.0/toolkit/xre/dllservices/mozglue/WindowsDllBlocklist.h
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef mozilla_windowsdllblocklist_h #define mozilla_windowsdllblocklist_h #if (defined(_MSC_VER) || defined(__MINGW32__)) && \ (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64)) # include <windows.h> # include "CrashAnnotations.h" # include "mozilla/Attributes.h" # include "mozilla/Types.h" # define HAS_DLL_BLOCKLIST enum DllBlocklistInitFlags { eDllBlocklistInitFlagDefault = 0, eDllBlocklistInitFlagIsChildProcess = 1, eDllBlocklistInitFlagWasBootstrapped = 2 }; // Only available from within firefox.exe # if !defined(IMPL_MFBT) && !defined(MOZILLA_INTERNAL_API) extern uint32_t gBlocklistInitFlags; # endif // !defined(IMPL_MFBT) && !defined(MOZILLA_INTERNAL_API) MFBT_API void DllBlocklist_Initialize( uint32_t aInitFlags = eDllBlocklistInitFlagDefault); MFBT_API void DllBlocklist_WriteNotes(CrashReporter::AnnotationWriter& aWriter); MFBT_API bool DllBlocklist_CheckStatus(); // This export intends to clean up after DllBlocklist_Initialize(). // It's disabled in release builds for performance and to limit callers' ability // to interfere with dll blocking. # ifdef DEBUG MFBT_API void DllBlocklist_Shutdown(); # endif // DEBUG namespace mozilla { namespace glue { namespace detail { // Forward declaration class DllServicesBase; template <size_t N> class WritableBuffer { char mBuffer[N]; size_t mLen; size_t Available() const { return sizeof(mBuffer) - mLen; } public: WritableBuffer() : mBuffer{0}, mLen(0) {} void Write(const char* aData, size_t aLen) { size_t writable_len = std::min(aLen, Available()); memcpy(mBuffer + mLen, aData, writable_len); mLen += writable_len; } size_t Length() const { return mLen; } const char* Data() const { return mBuffer; } }; } // namespace detail } // namespace glue } // namespace mozilla MFBT_API void DllBlocklist_SetFullDllServices( mozilla::glue::detail::DllServicesBase* aSvc); MFBT_API void DllBlocklist_SetBasicDllServices( mozilla::glue::detail::DllServicesBase* aSvc); #endif // defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) #endif // mozilla_windowsdllblocklist_h
utf-8
1
unknown
unknown
chromium-98.0.4758.102/chrome/browser/share/fake_share_history.cc
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/share/fake_share_history.h" #include "base/bind.h" #include "base/threading/sequenced_task_runner_handle.h" namespace sharing { FakeShareHistory::FakeShareHistory() = default; FakeShareHistory::~FakeShareHistory() = default; void FakeShareHistory::AddShareEntry(const std::string& component_name) { NOTIMPLEMENTED(); } void FakeShareHistory::GetFlatShareHistory(GetFlatHistoryCallback callback, int window) { base::SequencedTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(std::move(callback), history_)); } } // namespace sharing
utf-8
1
BSD-3-clause
The Chromium Authors. All rights reserved.
thin-provisioning-tools-0.9.0/thin-provisioning/shared_library_emitter.cc
#include "thin-provisioning/shared_library_emitter.h" #include <dlfcn.h> #include <stdexcept> using namespace std; using namespace thin_provisioning; //---------------------------------------------------------------- emitter::ptr thin_provisioning::create_custom_emitter(string const &shared_lib, ostream &out) { emitter::ptr (*create_fn)(ostream &out); void *handle = dlopen(shared_lib.c_str(), RTLD_LAZY); if (!handle) throw runtime_error(dlerror()); dlerror(); // Clear any existing error create_fn = reinterpret_cast<emitter::ptr (*)(ostream &)>(dlsym(handle, "create_emitter")); char *error = dlerror(); if (error) throw runtime_error(error); return create_fn(out); } //----------------------------------------------------------------
utf-8
1
GPL-3+
2011-2013 Red Hat, Inc
lynx-2.9.0dev.10/WWW/Library/Implementation/HTPlain.c
/* * $LynxId: HTPlain.c,v 1.61 2020/01/21 22:05:46 tom Exp $ * * Plain text object HTWrite.c * ================= * * This version of the stream object just writes to a socket. * The socket is assumed open and left open. * * Bugs: * strings written must be less than buffer size. */ #define HTSTREAM_INTERNAL 1 #include <HTUtils.h> #include <LYCharVals.h> /* S/390 -- gil -- 0288 */ #include <HTPlain.h> #include <HTChunk.h> #include <HText.h> #include <HTStyle.h> #define Lynx_HTML_Handler #include <HTML.h> /* styles[] */ #define BUFFER_SIZE 4096; /* Tradeoff */ #include <HTMLDTD.h> #include <HTCJK.h> #include <UCMap.h> #include <UCDefs.h> #include <UCAux.h> #include <LYCharSets.h> #include <LYStrings.h> #include <LYLeaks.h> static int HTPlain_lastraw = -1; static int HTPlain_bs_pending = 0; /* 1:bs 2:underline 3:underline+bs - kw */ /* HTML Object * ----------- */ struct _HTStream { const HTStreamClass *isa; HText *text; /* * The node_anchor UCInfo and handle for the input (PARSER) stage. - FM */ LYUCcharset *inUCI; int inUCLYhndl; /* * The node_anchor UCInfo and handle for the output (HTEXT) stage. - FM */ LYUCcharset *outUCI; int outUCLYhndl; UTFDecodeState U; UCTransParams T; }; static char replace_buf[64]; /* buffer for replacement strings */ static void HTPlain_getChartransInfo(HTStream *me, HTParentAnchor *anchor) { if (me->inUCLYhndl < 0) { HTAnchor_copyUCInfoStage(anchor, UCT_STAGE_PARSER, UCT_STAGE_MIME, UCT_SETBY_PARSER); me->inUCLYhndl = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_PARSER); } if (me->outUCLYhndl < 0) { int chndl = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_HTEXT); if (chndl < 0) { chndl = current_char_set; HTAnchor_setUCInfoStage(anchor, chndl, UCT_STAGE_HTEXT, UCT_SETBY_DEFAULT); } HTAnchor_setUCInfoStage(anchor, chndl, UCT_STAGE_HTEXT, UCT_SETBY_DEFAULT); me->outUCLYhndl = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_HTEXT); } me->inUCI = HTAnchor_getUCInfoStage(anchor, UCT_STAGE_PARSER); me->outUCI = HTAnchor_getUCInfoStage(anchor, UCT_STAGE_HTEXT); } /* Write the buffer out to the socket * ---------------------------------- */ /*_________________________________________________________________________ * * A C T I O N R O U T I N E S */ static void HTPlain_write(HTStream *me, const char *s, int l); /* Character handling * ------------------ */ static void HTPlain_put_character(HTStream *me, int c) { #ifdef REMOVE_CR_ONLY /* * Throw away \r's. */ if (c != '\r') { HText_appendCharacter(me->text, c); } #else /* * See HTPlain_write() for explanations of the following code (we've been * called via HTPlain_put_string() to do for each character of a terminated * string what HTPlain_write() does via a while loop for each character in * a stream of given length). - FM */ if ((HTPlain_lastraw == '\r') && c == '\n') { HTPlain_lastraw = -1; return; } if (c == '\b' || c == '_' || HTPlain_bs_pending) { char temp[1]; temp[0] = (char) c; HTPlain_write(me, temp, 1); return; } HTPlain_lastraw = UCH(c); if (c == '\r') { HText_appendCharacter(me->text, '\n'); } else if (TOASCII(UCH(c)) >= 127) { /* S/390 -- gil -- 0305 */ char temp[1]; temp[0] = (char) c; /* * For now, don't repeat everything here that has been done below - KW */ HTPlain_write(me, temp, 1); } else if (IS_CJK_TTY) { HText_appendCharacter(me->text, c); } else if (TOASCII(UCH(c)) >= 127 && TOASCII(UCH(c)) < 161 && HTPassHighCtrlRaw) { HText_appendCharacter(me->text, c); #if CH_NBSP < 127 } else if (UCH(c) == CH_NBSP) { /* S/390 -- gil -- 0341 */ HText_appendCharacter(me->text, ' '); #endif #if CH_SHY < 127 } else if (UCH(c) == CH_SHY) { return; #endif } else if ((UCH(c) >= ' ' && TOASCII(UCH(c)) < 127) || c == '\n' || c == '\t') { HText_appendCharacter(me->text, c); } #endif /* REMOVE_CR_ONLY */ } /* String handling * --------------- * */ static void HTPlain_put_string(HTStream *me, const char *s) { #ifdef REMOVE_CR_ONLY HText_appendText(me->text, s); #else const char *p; if (s == NULL) return; for (p = s; *p; p++) { HTPlain_put_character(me, *p); } #endif /* REMOVE_CR_ONLY */ } /* * Entry function for displayed text/plain and WWW_SOURCE strings. - FM * --------------------------------------------------------------- */ static void HTPlain_write(HTStream *me, const char *s, int l) { const char *p; const char *e = s + l; int c; unsigned c_unsign; BOOL chk; UCode_t code, uck = -1; int saved_char_in = '\0'; for (p = s; p < e; p++) { #ifdef REMOVE_CR_ONLY /* * Append the whole string, but remove any \r's. - FM */ if (*p != '\r') { HText_appendCharacter(me->text, *p); } #else if (*p == '\b') { if (HTPlain_lastraw >= UCH(' ') && HTPlain_lastraw != '\r' && HTPlain_lastraw != '\n') { if (!HTPlain_bs_pending) { HTPlain_bs_pending = 1; continue; } else if (HTPlain_bs_pending == 2) { HTPlain_bs_pending = 3; continue; } } if (HTPlain_bs_pending >= 2) HText_appendCharacter(me->text, '_'); HTPlain_bs_pending = 0; } else if (*p == '_') { if (!HTPlain_bs_pending) { HTPlain_bs_pending = 2; HTPlain_lastraw = UCH(*p); continue; } } /* * Try to handle lone LFs, CRLFs and lone CRs as newline, and to deal * with control, ASCII, and 8-bit characters based on best guesses of * what's appropriate. - FM */ if ((HTPlain_lastraw == '\r') && *p == '\n') { HTPlain_lastraw = -1; continue; } if (HTPlain_bs_pending && !(UCH(*p) >= ' ' && *p != '\r' && *p != '\n' && (HTPlain_lastraw == UCH(*p) || HTPlain_lastraw == UCH('_') || *p == '_'))) { if (HTPlain_bs_pending >= 2) HText_appendCharacter(me->text, '_'); HTPlain_bs_pending = 0; } else if (HTPlain_bs_pending == 1) { HTPlain_bs_pending = 0; continue; /* ignore last two of "X\bX" or "X\b_" - kw */ } else if (HTPlain_bs_pending == 3) { if (*p == '_') { HTPlain_bs_pending = 2; continue; /* ignore last two of "_\b_" - kw */ } else { HTPlain_bs_pending = 0; /* ignore first two of "_\bX" - kw */ } } else if (HTPlain_bs_pending == 2) { HText_appendCharacter(me->text, '_'); if (*p == '_') continue; /* keep second of "__" pending - kw */ HTPlain_bs_pending = 0; } else { HTPlain_bs_pending = 0; } HTPlain_lastraw = UCH(*p); if (*p == '\r') { HText_appendCharacter(me->text, '\n'); continue; } /* * Make sure the character is handled as Unicode whenever that's * appropriate. - FM */ c = *p; c_unsign = UCH(c); code = (UCode_t) c_unsign; saved_char_in = '\0'; /* * Combine any UTF-8 multibytes into Unicode to check for special * characters. - FM, TD */ if (me->T.decode_utf8) { switch (HTDecodeUTF8(&(me->U), &c, &code)) { case dUTF8_ok: if (code < 256) { c = FROMASCII((char) code); c_unsign = UCH(c); } break; case dUTF8_err: code = UCS_REPL; strcpy(me->U.utf_buf, "\357\277\275"); me->U.utf_buf_p = (me->U.utf_buf + 3); break; case dUTF8_more: continue; } } /* * Convert characters from non-UTF-8 charsets to Unicode (if * appropriate). - FM */ if (!(me->T.decode_utf8 && UCH(*p) > 127)) { if (me->T.trans_to_uni && (TOASCII(code) >= LYlowest_eightbit[me->inUCLYhndl] || /* S/390 -- gil -- 0389 */ (code < ' ' && code != 0 && me->T.trans_C0_to_uni))) { /* * Convert the octet to Unicode. - FM */ code = (UCode_t) UCTransToUni(c, me->inUCLYhndl); if (code > 0) { saved_char_in = c; if (code < 256) { c = FROMASCII((char) code); c_unsign = UCH(c); } } } else if (code < 32 && code != 0 && me->T.trans_C0_to_uni) { /* * Quote from SGML.c: * "This else if may be too ugly to keep. - KW" */ if (me->T.trans_from_uni && (((code = UCTransToUni(c, me->inUCLYhndl)) >= 32) || (me->T.transp && (code = UCTransToUni(c, me->inUCLYhndl)) > 0))) { saved_char_in = c; if (code < 256) { c = FROMASCII((char) code); c_unsign = UCH(c); } } else { uck = -1; if (me->T.transp) { uck = UCTransCharStr(replace_buf, 60, c, me->inUCLYhndl, me->inUCLYhndl, NO); } if (!me->T.transp || uck < 0) { uck = UCTransCharStr(replace_buf, 60, c, me->inUCLYhndl, me->outUCLYhndl, YES); } if (uck == 0) { continue; } else if (uck < 0) { me->U.utf_buf[0] = '\0'; } else { c = replace_buf[0]; if (c && replace_buf[1]) { HText_appendText(me->text, replace_buf); continue; } } me->U.utf_buf[0] = '\0'; code = UCH(c); } /* Next line end of ugly stuff for C0. - KW */ } else { me->U.utf_buf[0] = '\0'; code = UCH(c); } } /* * At this point we have either code in Unicode (and c in latin1 if * code is in the latin1 range), or code and c will have to be passed * raw. */ /* * If CJK mode is on, we'll assume the document matches the user's * display character set, and if not, the user should toggle off * raw/CJK mode to reload. - FM */ if (IS_CJK_TTY) { HText_appendCharacter(me->text, c); #define PASSHICTRL (me->T.transp || \ code >= LYlowest_eightbit[me->inUCLYhndl]) #define PASS8859SPECL me->T.pass_160_173_raw #define PASSHI8BIT (HTPassEightBitRaw || \ (me->T.do_8bitraw && !me->T.trans_from_uni)) /* * If HTPassHighCtrlRaw is set (e.g., for KOI8-R) assume the * document matches and pass 127-160 8-bit characters. If it * doesn't match, the user should toggle raw/CJK mode off. - FM */ } else if (TOASCII(code) >= 127 && TOASCII(code) < 161 && /* S/390 -- gil -- 0427 */ PASSHICTRL && PASS8859SPECL) { HText_appendCharacter(me->text, c); } else if (code == CH_SHY && PASS8859SPECL) { HText_appendCharacter(me->text, c); /* * If neither HTPassHighCtrlRaw nor CJK is set, play it safe and * treat 160 (nbsp) as an ASCII space (32). - FM */ } else if (code == CH_NBSP) { HText_appendCharacter(me->text, ' '); /* * If neither HTPassHighCtrlRaw nor CJK is set, play it safe and * ignore 173 (shy). - FM * Now only ignore it for color style, which doesn't handle it * anyway. Otherwise pass it on as LY_SOFT_HYPHEN and let HText * deal with it. It should be either ignored, or displayed as a * hyphen if it was indeed at the end of a line. Well it should. * - kw */ } else if (code == CH_SHY) { #ifndef USE_COLOR_STYLE HText_appendCharacter(me->text, LY_SOFT_HYPHEN); #endif continue; /* * If we get to here, pass the displayable ASCII characters. - FM */ } else if ((code >= ' ' && code != UCS_REPL && TOASCII(code) < 127) || (PASSHI8BIT && c >= LYlowest_eightbit[me->outUCLYhndl]) || *p == '\n' || *p == '\t') { HText_appendCharacter(me->text, c); /* * Use an ASCII space (32) for ensp, emsp or thinsp. - FM */ } else if (code == 8194 || code == 8195 || code == 8201) { HText_appendCharacter(me->text, ' '); /* * If we want the raw character, pass it now. - FM */ } else if (me->T.use_raw_char_in && saved_char_in) { HText_appendCharacter(me->text, saved_char_in); /****************************************************************** * I. LATIN-1 OR UCS2 TO DISPLAY CHARSET ******************************************************************/ } else if ((chk = (BOOL) (me->T.trans_from_uni && code >= 160)) && (uck = UCTransUniChar(code, me->outUCLYhndl)) >= ' ' && /* S/390 -- gil -- 0464 */ uck < 256) { CTRACE((tfp, "UCTransUniChar returned 0x%.2" PRI_UCode_t ":'%c'.\n", uck, FROMASCII(UCH(uck)))); HText_appendCharacter(me->text, ((char) (uck & 0xff))); } else if (chk && (uck == -4 || (me->T.repl_translated_C0 && uck > 0 && uck < ' ')) && /* S/390 -- gil -- 0481 */ /* * Not found; look for replacement string. */ (uck = UCTransUniCharStr(replace_buf, 60, code, me->outUCLYhndl, 0) >= 0)) { /* * No further tests for valididy - assume that whoever defined * replacement strings knew what she was doing. */ HText_appendText(me->text, replace_buf); /* * If we get to here, and should have translated, translation has * failed so far. */ } else if (chk && TOASCII(code) > 127 && me->T.output_utf8) { /* S/390 -- gil -- 0498 */ /* * We want UTF-8 output, so do it now. - FM */ if (*me->U.utf_buf) { HText_appendText(me->text, me->U.utf_buf); me->U.utf_buf[0] = '\0'; me->U.utf_buf_p = me->U.utf_buf; } else if (UCConvertUniToUtf8(code, replace_buf)) { HText_appendText(me->text, replace_buf); } else { /* * Out of luck, so use the UHHH notation (ugh). - gil */ /* S/390 -- gil -- 0517 */ sprintf(replace_buf, "U%.2lX", (unsigned long) TOASCII(code)); HText_appendText(me->text, replace_buf); } /* * If we don't actually want the character, make it safe and output * that now. - FM */ } else if ((c_unsign > 0 && (int) c_unsign < LYlowest_eightbit[me->outUCLYhndl]) || (me->T.trans_from_uni && !HTPassEightBitRaw)) { /* * If we do not have the "7-bit approximations" as our output * character set (in which case we did it already) seek a * translation for that. Otherwise, or if the translation fails, * use UHHH notation. - FM */ if ((chk = (BOOL) (me->outUCLYhndl != UCGetLYhndl_byMIME("us-ascii"))) && (uck = UCTransUniChar(code, UCGetLYhndl_byMIME("us-ascii"))) >= ' ' && TOASCII(uck) < 127) { /* S/390 -- gil -- 0535 */ /* * Got an ASCII character (yippey). - FM */ c = FROMASCII((char) uck); HText_appendCharacter(me->text, c); } else if ((chk && uck == -4) && (uck = UCTransUniCharStr(replace_buf, 60, code, UCGetLYhndl_byMIME("us-ascii"), 0) >= 0)) { /* * Got a replacement string (yippey). - FM */ HText_appendText(me->text, replace_buf); } else if (code == 8204 || code == 8205) { /* * Ignore 8204 (zwnj) or 8205 (zwj), if we get to here. - FM */ CTRACE((tfp, "HTPlain_write: Ignoring '%" PRI_UCode_t "'.\n", code)); } else if (code == 8206 || code == 8207) { /* * Ignore 8206 (lrm) or 8207 (rlm), if we get to here. - FM */ CTRACE((tfp, "HTPlain_write: Ignoring '%" PRI_UCode_t "'.\n", code)); } else { /* * Out of luck, so use the UHHH notation (ugh). - FM */ /* do not print UHHH for now sprintf(replace_buf, "U%.2lX", code); HText_appendText(me->text, replace_buf); */ } /* * If we get to here and have a monobyte character, pass it. - FM */ } else if (c_unsign != 0 && c_unsign < 256) { HText_appendCharacter(me->text, c); } #endif /* REMOVE_CR_ONLY */ } } /* Free an HTML object * ------------------- * * Note that the SGML parsing context is freed, but the created object is * not, as it takes on an existence of its own unless explicitly freed. */ static void HTPlain_free(HTStream *me) { if (HTPlain_bs_pending >= 2) HText_appendCharacter(me->text, '_'); FREE(me); } /* End writing */ static void HTPlain_abort(HTStream *me, HTError e GCC_UNUSED) { HTPlain_free(me); } /* Structured Object Class * ----------------------- */ static const HTStreamClass HTPlain = { "PlainPresenter", HTPlain_free, HTPlain_abort, HTPlain_put_character, HTPlain_put_string, HTPlain_write, }; /* New object * ---------- */ HTStream *HTPlainPresent(HTPresentation *pres GCC_UNUSED, HTParentAnchor *anchor, HTStream *sink GCC_UNUSED) { HTStream *me = (HTStream *) malloc(sizeof(*me)); if (me == NULL) outofmem(__FILE__, "HTPlain_new"); me->isa = &HTPlain; HTPlain_lastraw = -1; me->U.utf_count = 0; me->U.utf_char = 0; me->U.utf_buf[0] = me->U.utf_buf[6] = me->U.utf_buf[7] = '\0'; me->U.utf_buf_p = me->U.utf_buf; me->outUCLYhndl = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_HTEXT); me->inUCLYhndl = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_PARSER); HTPlain_getChartransInfo(me, anchor); UCSetTransParams(&me->T, me->inUCLYhndl, me->inUCI, me->outUCLYhndl, HTAnchor_getUCInfoStage(anchor, UCT_STAGE_HTEXT)); me->text = HText_new(anchor); HText_setStyle(me->text, LYstyles(HTML_XMP)); HText_beginAppend(me->text); return (HTStream *) me; }
utf-8
1
GPL-2
1997-2015, Thomas E. Dickey <dickey@invisible-island.net>
vala-0.54.6/codegen/valagtkmodule.c
/* valagtkmodule.c generated by valac, the Vala compiler * generated from valagtkmodule.vala, do not modify */ /* valagtkmodule.vala * * Copyright (C) 2013 Jürg Billeter * Copyright (C) 2013-2014 Luca Bruno * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * Author: * Luca Bruno <lucabru@src.gnome.org> */ #include "valacodegen.h" #include <valagee.h> #include <glib-object.h> #include <stdlib.h> #include <string.h> #include <glib.h> #include <vala.h> #include <glib/gstdio.h> #include <valaccode.h> #define _vala_map_unref0(var) ((var == NULL) ? NULL : (var = (vala_map_unref (var), NULL))) #define _vala_iterable_unref0(var) ((var == NULL) ? NULL : (var = (vala_iterable_unref (var), NULL))) #define _g_free0(var) (var = (g_free (var), NULL)) #define _vala_code_node_unref0(var) ((var == NULL) ? NULL : (var = (vala_code_node_unref (var), NULL))) #define _vala_markup_reader_unref0(var) ((var == NULL) ? NULL : (var = (vala_markup_reader_unref (var), NULL))) #define _g_regex_unref0(var) ((var == NULL) ? NULL : (var = (g_regex_unref (var), NULL))) #define _vala_ccode_node_unref0(var) ((var == NULL) ? NULL : (var = (vala_ccode_node_unref (var), NULL))) struct _ValaGtkModulePrivate { ValaHashMap* type_id_to_vala_map; ValaHashMap* cclass_to_vala_map; ValaHashMap* gresource_to_file_map; ValaHashMap* handler_map; ValaHashMap* current_handler_to_property_map; ValaHashMap* current_handler_to_signal_map; ValaHashMap* current_child_to_class_map; ValaList* current_required_app_classes; }; static gint ValaGtkModule_private_offset; static gpointer vala_gtk_module_parent_class = NULL; static void vala_gtk_module_ensure_type_id_to_vala_map (ValaGtkModule* self); static void vala_gtk_module_recurse_type_id_to_vala_map (ValaGtkModule* self, ValaNamespace* ns); static void vala_gtk_module_ensure_cclass_to_vala_map (ValaGtkModule* self); static void vala_gtk_module_recurse_cclass_to_vala_map (ValaGtkModule* self, ValaNamespace* ns); static void vala_gtk_module_ensure_gresource_to_file_map (ValaGtkModule* self); static void vala_gtk_module_process_current_ui_resource (ValaGtkModule* self, const gchar* ui_resource, ValaCodeNode* node); static gboolean vala_gtk_module_is_gtk_template (ValaGtkModule* self, ValaClass* cl); static void vala_gtk_module_real_generate_class_init (ValaGTypeModule* base, ValaClass* cl); static void vala_gtk_module_real_visit_property (ValaCodeVisitor* base, ValaProperty* prop); static void vala_gtk_module_real_visit_field (ValaCodeVisitor* base, ValaField* f); static void vala_gtk_module_real_visit_method (ValaCodeVisitor* base, ValaMethod* m); static void vala_gtk_module_real_end_instance_init (ValaGTypeModule* base, ValaClass* cl); static void vala_gtk_module_finalize (ValaCodeVisitor * obj); static GType vala_gtk_module_get_type_once (void); static inline gpointer vala_gtk_module_get_instance_private (ValaGtkModule* self) { return G_STRUCT_MEMBER_P (self, ValaGtkModule_private_offset); } static void vala_gtk_module_ensure_type_id_to_vala_map (ValaGtkModule* self) { ValaHashMap* _tmp0_; GHashFunc _tmp1_; GEqualFunc _tmp2_; GEqualFunc _tmp3_; ValaHashMap* _tmp4_; ValaCodeContext* _tmp5_; ValaCodeContext* _tmp6_; ValaNamespace* _tmp7_; ValaNamespace* _tmp8_; g_return_if_fail (self != NULL); _tmp0_ = self->priv->type_id_to_vala_map; if (_tmp0_ != NULL) { return; } _tmp1_ = g_str_hash; _tmp2_ = g_str_equal; _tmp3_ = g_direct_equal; _tmp4_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, VALA_TYPE_CLASS, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp1_, _tmp2_, _tmp3_); _vala_map_unref0 (self->priv->type_id_to_vala_map); self->priv->type_id_to_vala_map = _tmp4_; _tmp5_ = vala_ccode_base_module_get_context ((ValaCCodeBaseModule*) self); _tmp6_ = _tmp5_; _tmp7_ = vala_code_context_get_root (_tmp6_); _tmp8_ = _tmp7_; vala_gtk_module_recurse_type_id_to_vala_map (self, _tmp8_); } static gint string_index_of_char (const gchar* self, gunichar c, gint start_index) { gchar* _result_ = NULL; gchar* _tmp0_; gchar* _tmp1_; gint result = 0; g_return_val_if_fail (self != NULL, 0); _tmp0_ = g_utf8_strchr (((gchar*) self) + start_index, (gssize) -1, c); _result_ = _tmp0_; _tmp1_ = _result_; if (_tmp1_ != NULL) { gchar* _tmp2_; _tmp2_ = _result_; result = (gint) (_tmp2_ - ((gchar*) self)); return result; } else { result = -1; return result; } } static glong string_strnlen (gchar* str, glong maxlen) { gchar* end = NULL; gchar* _tmp0_; gchar* _tmp1_; glong result = 0L; _tmp0_ = memchr (str, 0, (gsize) maxlen); end = _tmp0_; _tmp1_ = end; if (_tmp1_ == NULL) { result = maxlen; return result; } else { gchar* _tmp2_; _tmp2_ = end; result = (glong) (_tmp2_ - str); return result; } } static gchar* string_substring (const gchar* self, glong offset, glong len) { glong string_length = 0L; gboolean _tmp0_ = FALSE; gchar* _tmp3_; gchar* result = NULL; g_return_val_if_fail (self != NULL, NULL); if (offset >= ((glong) 0)) { _tmp0_ = len >= ((glong) 0); } else { _tmp0_ = FALSE; } if (_tmp0_) { string_length = string_strnlen ((gchar*) self, offset + len); } else { gint _tmp1_; gint _tmp2_; _tmp1_ = strlen (self); _tmp2_ = _tmp1_; string_length = (glong) _tmp2_; } if (offset < ((glong) 0)) { offset = string_length + offset; g_return_val_if_fail (offset >= ((glong) 0), NULL); } else { g_return_val_if_fail (offset <= string_length, NULL); } if (len < ((glong) 0)) { len = string_length - offset; } g_return_val_if_fail ((offset + len) <= string_length, NULL); _tmp3_ = g_strndup (((gchar*) self) + offset, (gsize) len); result = _tmp3_; return result; } static gchar* string_strip (const gchar* self) { gchar* _result_ = NULL; gchar* _tmp0_; gchar* result = NULL; g_return_val_if_fail (self != NULL, NULL); _tmp0_ = g_strdup (self); _result_ = _tmp0_; g_strstrip (_result_); result = _result_; return result; } static void vala_gtk_module_recurse_type_id_to_vala_map (ValaGtkModule* self, ValaNamespace* ns) { g_return_if_fail (self != NULL); g_return_if_fail (ns != NULL); { ValaList* _cl_list = NULL; ValaList* _tmp0_; gint _cl_size = 0; ValaList* _tmp1_; gint _tmp2_; gint _tmp3_; gint _cl_index = 0; _tmp0_ = vala_namespace_get_classes (ns); _cl_list = _tmp0_; _tmp1_ = _cl_list; _tmp2_ = vala_collection_get_size ((ValaCollection*) _tmp1_); _tmp3_ = _tmp2_; _cl_size = _tmp3_; _cl_index = -1; while (TRUE) { gint _tmp4_; gint _tmp5_; ValaClass* cl = NULL; ValaList* _tmp6_; gpointer _tmp7_; ValaClass* _tmp8_; gboolean _tmp9_; gboolean _tmp10_; _cl_index = _cl_index + 1; _tmp4_ = _cl_index; _tmp5_ = _cl_size; if (!(_tmp4_ < _tmp5_)) { break; } _tmp6_ = _cl_list; _tmp7_ = vala_list_get (_tmp6_, _cl_index); cl = (ValaClass*) _tmp7_; _tmp8_ = cl; _tmp9_ = vala_class_get_is_compact (_tmp8_); _tmp10_ = _tmp9_; if (!_tmp10_) { gchar* type_id = NULL; ValaClass* _tmp11_; gchar* _tmp12_; const gchar* _tmp13_; gint i = 0; const gchar* _tmp14_; ValaHashMap* _tmp21_; const gchar* _tmp22_; ValaClass* _tmp23_; _tmp11_ = cl; _tmp12_ = vala_get_ccode_type_id ((ValaCodeNode*) _tmp11_); type_id = _tmp12_; _tmp13_ = type_id; if (_tmp13_ == NULL) { _g_free0 (type_id); _vala_code_node_unref0 (cl); continue; } _tmp14_ = type_id; i = string_index_of_char (_tmp14_, (gunichar) '(', 0); if (i > 0) { const gchar* _tmp15_; gchar* _tmp16_; gchar* _tmp17_; gchar* _tmp18_; _tmp15_ = type_id; _tmp16_ = string_substring (_tmp15_, (glong) 0, (glong) (i - 1)); _tmp17_ = _tmp16_; _tmp18_ = string_strip (_tmp17_); _g_free0 (type_id); type_id = _tmp18_; _g_free0 (_tmp17_); } else { const gchar* _tmp19_; gchar* _tmp20_; _tmp19_ = type_id; _tmp20_ = string_strip (_tmp19_); _g_free0 (type_id); type_id = _tmp20_; } _tmp21_ = self->priv->type_id_to_vala_map; _tmp22_ = type_id; _tmp23_ = cl; vala_map_set ((ValaMap*) _tmp21_, _tmp22_, _tmp23_); _g_free0 (type_id); } _vala_code_node_unref0 (cl); } } { ValaList* _inner_list = NULL; ValaList* _tmp24_; gint _inner_size = 0; ValaList* _tmp25_; gint _tmp26_; gint _tmp27_; gint _inner_index = 0; _tmp24_ = vala_namespace_get_namespaces (ns); _inner_list = _tmp24_; _tmp25_ = _inner_list; _tmp26_ = vala_collection_get_size ((ValaCollection*) _tmp25_); _tmp27_ = _tmp26_; _inner_size = _tmp27_; _inner_index = -1; while (TRUE) { gint _tmp28_; gint _tmp29_; ValaNamespace* inner = NULL; ValaList* _tmp30_; gpointer _tmp31_; ValaNamespace* _tmp32_; _inner_index = _inner_index + 1; _tmp28_ = _inner_index; _tmp29_ = _inner_size; if (!(_tmp28_ < _tmp29_)) { break; } _tmp30_ = _inner_list; _tmp31_ = vala_list_get (_tmp30_, _inner_index); inner = (ValaNamespace*) _tmp31_; _tmp32_ = inner; vala_gtk_module_recurse_type_id_to_vala_map (self, _tmp32_); _vala_code_node_unref0 (inner); } } } static void vala_gtk_module_ensure_cclass_to_vala_map (ValaGtkModule* self) { ValaHashMap* _tmp0_; GHashFunc _tmp1_; GEqualFunc _tmp2_; GEqualFunc _tmp3_; ValaHashMap* _tmp4_; ValaCodeContext* _tmp5_; ValaCodeContext* _tmp6_; ValaNamespace* _tmp7_; ValaNamespace* _tmp8_; g_return_if_fail (self != NULL); _tmp0_ = self->priv->cclass_to_vala_map; if (_tmp0_ != NULL) { return; } _tmp1_ = g_str_hash; _tmp2_ = g_str_equal; _tmp3_ = g_direct_equal; _tmp4_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, VALA_TYPE_CLASS, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp1_, _tmp2_, _tmp3_); _vala_map_unref0 (self->priv->cclass_to_vala_map); self->priv->cclass_to_vala_map = _tmp4_; _tmp5_ = vala_ccode_base_module_get_context ((ValaCCodeBaseModule*) self); _tmp6_ = _tmp5_; _tmp7_ = vala_code_context_get_root (_tmp6_); _tmp8_ = _tmp7_; vala_gtk_module_recurse_cclass_to_vala_map (self, _tmp8_); } static void vala_gtk_module_recurse_cclass_to_vala_map (ValaGtkModule* self, ValaNamespace* ns) { g_return_if_fail (self != NULL); g_return_if_fail (ns != NULL); { ValaList* _cl_list = NULL; ValaList* _tmp0_; gint _cl_size = 0; ValaList* _tmp1_; gint _tmp2_; gint _tmp3_; gint _cl_index = 0; _tmp0_ = vala_namespace_get_classes (ns); _cl_list = _tmp0_; _tmp1_ = _cl_list; _tmp2_ = vala_collection_get_size ((ValaCollection*) _tmp1_); _tmp3_ = _tmp2_; _cl_size = _tmp3_; _cl_index = -1; while (TRUE) { gint _tmp4_; gint _tmp5_; ValaClass* cl = NULL; ValaList* _tmp6_; gpointer _tmp7_; ValaClass* _tmp8_; gboolean _tmp9_; gboolean _tmp10_; _cl_index = _cl_index + 1; _tmp4_ = _cl_index; _tmp5_ = _cl_size; if (!(_tmp4_ < _tmp5_)) { break; } _tmp6_ = _cl_list; _tmp7_ = vala_list_get (_tmp6_, _cl_index); cl = (ValaClass*) _tmp7_; _tmp8_ = cl; _tmp9_ = vala_class_get_is_compact (_tmp8_); _tmp10_ = _tmp9_; if (!_tmp10_) { ValaHashMap* _tmp11_; ValaClass* _tmp12_; gchar* _tmp13_; gchar* _tmp14_; ValaClass* _tmp15_; _tmp11_ = self->priv->cclass_to_vala_map; _tmp12_ = cl; _tmp13_ = vala_get_ccode_name ((ValaCodeNode*) _tmp12_); _tmp14_ = _tmp13_; _tmp15_ = cl; vala_map_set ((ValaMap*) _tmp11_, _tmp14_, _tmp15_); _g_free0 (_tmp14_); } _vala_code_node_unref0 (cl); } } { ValaList* _inner_list = NULL; ValaList* _tmp16_; gint _inner_size = 0; ValaList* _tmp17_; gint _tmp18_; gint _tmp19_; gint _inner_index = 0; _tmp16_ = vala_namespace_get_namespaces (ns); _inner_list = _tmp16_; _tmp17_ = _inner_list; _tmp18_ = vala_collection_get_size ((ValaCollection*) _tmp17_); _tmp19_ = _tmp18_; _inner_size = _tmp19_; _inner_index = -1; while (TRUE) { gint _tmp20_; gint _tmp21_; ValaNamespace* inner = NULL; ValaList* _tmp22_; gpointer _tmp23_; ValaNamespace* _tmp24_; _inner_index = _inner_index + 1; _tmp20_ = _inner_index; _tmp21_ = _inner_size; if (!(_tmp20_ < _tmp21_)) { break; } _tmp22_ = _inner_list; _tmp23_ = vala_list_get (_tmp22_, _inner_index); inner = (ValaNamespace*) _tmp23_; _tmp24_ = inner; vala_gtk_module_recurse_cclass_to_vala_map (self, _tmp24_); _vala_code_node_unref0 (inner); } } } static void vala_gtk_module_ensure_gresource_to_file_map (ValaGtkModule* self) { ValaHashMap* _tmp0_; GHashFunc _tmp1_; GEqualFunc _tmp2_; GEqualFunc _tmp3_; ValaHashMap* _tmp4_; ValaCodeContext* _tmp5_; ValaCodeContext* _tmp6_; gchar** _tmp7_; gint _tmp7__length1; gint _tmp8_ = 0; gchar** _tmp9_; gint _tmp9__length1; g_return_if_fail (self != NULL); _tmp0_ = self->priv->gresource_to_file_map; if (_tmp0_ != NULL) { return; } _tmp1_ = g_str_hash; _tmp2_ = g_str_equal; _tmp3_ = g_direct_equal; _tmp4_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, _tmp1_, _tmp2_, _tmp3_); _vala_map_unref0 (self->priv->gresource_to_file_map); self->priv->gresource_to_file_map = _tmp4_; _tmp5_ = vala_ccode_base_module_get_context ((ValaCCodeBaseModule*) self); _tmp6_ = _tmp5_; _tmp7_ = vala_code_context_get_gresources (_tmp6_, &_tmp8_); _tmp7__length1 = _tmp8_; _tmp9_ = _tmp7_; _tmp9__length1 = _tmp7__length1; { gchar** gresource_collection = NULL; gint gresource_collection_length1 = 0; gint _gresource_collection_size_ = 0; gint gresource_it = 0; gresource_collection = _tmp9_; gresource_collection_length1 = _tmp9__length1; for (gresource_it = 0; gresource_it < gresource_collection_length1; gresource_it = gresource_it + 1) { gchar* _tmp10_; gchar* gresource = NULL; _tmp10_ = g_strdup (gresource_collection[gresource_it]); gresource = _tmp10_; { const gchar* _tmp11_; ValaMarkupReader* reader = NULL; const gchar* _tmp13_; ValaMarkupReader* _tmp14_; gint state = 0; gchar* prefix = NULL; gchar* alias = NULL; ValaMarkupTokenType current_token = 0; ValaMarkupReader* _tmp15_; _tmp11_ = gresource; if (!g_file_test (_tmp11_, G_FILE_TEST_EXISTS)) { const gchar* _tmp12_; _tmp12_ = gresource; vala_report_error (NULL, "GResources file `%s' does not exist", _tmp12_); _g_free0 (gresource); continue; } _tmp13_ = gresource; _tmp14_ = vala_markup_reader_new (_tmp13_); reader = _tmp14_; state = 0; prefix = NULL; alias = NULL; _tmp15_ = reader; current_token = vala_markup_reader_read_token (_tmp15_, NULL, NULL); while (TRUE) { gboolean _tmp16_ = FALSE; ValaMarkupReader* _tmp51_; if (!(current_token != VALA_MARKUP_TOKEN_TYPE_EOF)) { break; } if (current_token == VALA_MARKUP_TOKEN_TYPE_START_ELEMENT) { ValaMarkupReader* _tmp17_; const gchar* _tmp18_; const gchar* _tmp19_; _tmp17_ = reader; _tmp18_ = vala_markup_reader_get_name (_tmp17_); _tmp19_ = _tmp18_; _tmp16_ = g_strcmp0 (_tmp19_, "gresource") == 0; } else { _tmp16_ = FALSE; } if (_tmp16_) { ValaMarkupReader* _tmp20_; gchar* _tmp21_; _tmp20_ = reader; _tmp21_ = vala_markup_reader_get_attribute (_tmp20_, "prefix"); _g_free0 (prefix); prefix = _tmp21_; } else { gboolean _tmp22_ = FALSE; if (current_token == VALA_MARKUP_TOKEN_TYPE_START_ELEMENT) { ValaMarkupReader* _tmp23_; const gchar* _tmp24_; const gchar* _tmp25_; _tmp23_ = reader; _tmp24_ = vala_markup_reader_get_name (_tmp23_); _tmp25_ = _tmp24_; _tmp22_ = g_strcmp0 (_tmp25_, "file") == 0; } else { _tmp22_ = FALSE; } if (_tmp22_) { ValaMarkupReader* _tmp26_; gchar* _tmp27_; _tmp26_ = reader; _tmp27_ = vala_markup_reader_get_attribute (_tmp26_, "alias"); _g_free0 (alias); alias = _tmp27_; state = 1; } else { gboolean _tmp28_ = FALSE; if (state == 1) { _tmp28_ = current_token == VALA_MARKUP_TOKEN_TYPE_TEXT; } else { _tmp28_ = FALSE; } if (_tmp28_) { gchar* name = NULL; ValaMarkupReader* _tmp29_; const gchar* _tmp30_; const gchar* _tmp31_; gchar* _tmp32_; gchar* filename = NULL; ValaCodeContext* _tmp33_; ValaCodeContext* _tmp34_; const gchar* _tmp35_; const gchar* _tmp36_; gchar* _tmp37_; const gchar* _tmp38_; ValaHashMap* _tmp45_; const gchar* _tmp46_; const gchar* _tmp47_; gchar* _tmp48_; gchar* _tmp49_; const gchar* _tmp50_; _tmp29_ = reader; _tmp30_ = vala_markup_reader_get_content (_tmp29_); _tmp31_ = _tmp30_; _tmp32_ = g_strdup (_tmp31_); name = _tmp32_; _tmp33_ = vala_ccode_base_module_get_context ((ValaCCodeBaseModule*) self); _tmp34_ = _tmp33_; _tmp35_ = gresource; _tmp36_ = name; _tmp37_ = vala_code_context_get_gresource_path (_tmp34_, _tmp35_, _tmp36_); filename = _tmp37_; _tmp38_ = alias; if (_tmp38_ != NULL) { ValaHashMap* _tmp39_; const gchar* _tmp40_; const gchar* _tmp41_; gchar* _tmp42_; gchar* _tmp43_; const gchar* _tmp44_; _tmp39_ = self->priv->gresource_to_file_map; _tmp40_ = prefix; _tmp41_ = alias; _tmp42_ = g_build_filename (_tmp40_, _tmp41_, NULL); _tmp43_ = _tmp42_; _tmp44_ = filename; vala_map_set ((ValaMap*) _tmp39_, _tmp43_, _tmp44_); _g_free0 (_tmp43_); } _tmp45_ = self->priv->gresource_to_file_map; _tmp46_ = prefix; _tmp47_ = name; _tmp48_ = g_build_filename (_tmp46_, _tmp47_, NULL); _tmp49_ = _tmp48_; _tmp50_ = filename; vala_map_set ((ValaMap*) _tmp45_, _tmp49_, _tmp50_); _g_free0 (_tmp49_); state = 0; _g_free0 (filename); _g_free0 (name); } } } _tmp51_ = reader; current_token = vala_markup_reader_read_token (_tmp51_, NULL, NULL); } _g_free0 (alias); _g_free0 (prefix); _vala_markup_reader_unref0 (reader); _g_free0 (gresource); } } } } static gint string_index_of (const gchar* self, const gchar* needle, gint start_index) { gchar* _result_ = NULL; gchar* _tmp0_; gchar* _tmp1_; gint result = 0; g_return_val_if_fail (self != NULL, 0); g_return_val_if_fail (needle != NULL, 0); _tmp0_ = strstr (((gchar*) self) + start_index, (gchar*) needle); _result_ = _tmp0_; _tmp1_ = _result_; if (_tmp1_ != NULL) { gchar* _tmp2_; _tmp2_ = _result_; result = (gint) (_tmp2_ - ((gchar*) self)); return result; } else { result = -1; return result; } } static gchar* string_replace (const gchar* self, const gchar* old, const gchar* replacement) { gboolean _tmp0_ = FALSE; gboolean _tmp1_ = FALSE; GError* _inner_error0_ = NULL; gchar* result = NULL; g_return_val_if_fail (self != NULL, NULL); g_return_val_if_fail (old != NULL, NULL); g_return_val_if_fail (replacement != NULL, NULL); if ((*((gchar*) self)) == '\0') { _tmp1_ = TRUE; } else { _tmp1_ = (*((gchar*) old)) == '\0'; } if (_tmp1_) { _tmp0_ = TRUE; } else { _tmp0_ = g_strcmp0 (old, replacement) == 0; } if (_tmp0_) { gchar* _tmp2_; _tmp2_ = g_strdup (self); result = _tmp2_; return result; } { GRegex* regex = NULL; gchar* _tmp3_; gchar* _tmp4_; GRegex* _tmp5_; GRegex* _tmp6_; gchar* _tmp7_ = NULL; GRegex* _tmp8_; gchar* _tmp9_; gchar* _tmp10_; _tmp3_ = g_regex_escape_string (old, -1); _tmp4_ = _tmp3_; _tmp5_ = g_regex_new (_tmp4_, 0, 0, &_inner_error0_); _tmp6_ = _tmp5_; _g_free0 (_tmp4_); regex = _tmp6_; if (G_UNLIKELY (_inner_error0_ != NULL)) { if (_inner_error0_->domain == G_REGEX_ERROR) { goto __catch0_g_regex_error; } g_critical ("file %s: line %d: unexpected error: %s (%s, %d)", __FILE__, __LINE__, _inner_error0_->message, g_quark_to_string (_inner_error0_->domain), _inner_error0_->code); g_clear_error (&_inner_error0_); return NULL; } _tmp8_ = regex; _tmp9_ = g_regex_replace_literal (_tmp8_, self, (gssize) -1, 0, replacement, 0, &_inner_error0_); _tmp7_ = _tmp9_; if (G_UNLIKELY (_inner_error0_ != NULL)) { _g_regex_unref0 (regex); if (_inner_error0_->domain == G_REGEX_ERROR) { goto __catch0_g_regex_error; } g_critical ("file %s: line %d: unexpected error: %s (%s, %d)", __FILE__, __LINE__, _inner_error0_->message, g_quark_to_string (_inner_error0_->domain), _inner_error0_->code); g_clear_error (&_inner_error0_); return NULL; } _tmp10_ = _tmp7_; _tmp7_ = NULL; result = _tmp10_; _g_free0 (_tmp7_); _g_regex_unref0 (regex); return result; } goto __finally0; __catch0_g_regex_error: { g_clear_error (&_inner_error0_); g_assert_not_reached (); } __finally0: g_critical ("file %s: line %d: uncaught error: %s (%s, %d)", __FILE__, __LINE__, _inner_error0_->message, g_quark_to_string (_inner_error0_->domain), _inner_error0_->code); g_clear_error (&_inner_error0_); return NULL; } static void vala_gtk_module_process_current_ui_resource (ValaGtkModule* self, const gchar* ui_resource, ValaCodeNode* node) { gchar* ui_file = NULL; ValaHashMap* _tmp0_; gpointer _tmp1_; gboolean _tmp2_ = FALSE; const gchar* _tmp3_; GHashFunc _tmp7_; GEqualFunc _tmp8_; GEqualFunc _tmp9_; ValaHashMap* _tmp10_; GHashFunc _tmp11_; GEqualFunc _tmp12_; GEqualFunc _tmp13_; ValaHashMap* _tmp14_; GHashFunc _tmp15_; GEqualFunc _tmp16_; GEqualFunc _tmp17_; ValaHashMap* _tmp18_; ValaMarkupReader* reader = NULL; const gchar* _tmp19_; ValaMarkupReader* _tmp20_; ValaClass* current_class = NULL; ValaProperty* current_property = NULL; gchar* current_handler = NULL; gboolean template_tag_found = FALSE; ValaMarkupTokenType current_token = 0; ValaMarkupReader* _tmp21_; g_return_if_fail (self != NULL); g_return_if_fail (ui_resource != NULL); g_return_if_fail (node != NULL); vala_gtk_module_ensure_type_id_to_vala_map (self); vala_gtk_module_ensure_cclass_to_vala_map (self); vala_gtk_module_ensure_gresource_to_file_map (self); _vala_map_unref0 (self->priv->current_handler_to_signal_map); self->priv->current_handler_to_signal_map = NULL; _vala_map_unref0 (self->priv->current_child_to_class_map); self->priv->current_child_to_class_map = NULL; _tmp0_ = self->priv->gresource_to_file_map; _tmp1_ = vala_map_get ((ValaMap*) _tmp0_, ui_resource); ui_file = (gchar*) _tmp1_; _tmp3_ = ui_file; if (_tmp3_ == NULL) { _tmp2_ = TRUE; } else { const gchar* _tmp4_; _tmp4_ = ui_file; _tmp2_ = !g_file_test (_tmp4_, G_FILE_TEST_EXISTS); } if (_tmp2_) { ValaSourceReference* _tmp5_; ValaSourceReference* _tmp6_; vala_code_node_set_error (node, TRUE); _tmp5_ = vala_code_node_get_source_reference (node); _tmp6_ = _tmp5_; vala_report_error (_tmp6_, "UI resource not found: `%s'. Please make sure to specify the proper GR" \ "esources xml files with --gresources and alternative search locations " \ "with --gresourcesdir.", ui_resource); _g_free0 (ui_file); return; } _tmp7_ = g_str_hash; _tmp8_ = g_str_equal; _tmp9_ = g_direct_equal; _tmp10_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, _tmp7_, _tmp8_, _tmp9_); _vala_map_unref0 (self->priv->handler_map); self->priv->handler_map = _tmp10_; _tmp11_ = g_str_hash; _tmp12_ = g_str_equal; _tmp13_ = g_direct_equal; _tmp14_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, VALA_TYPE_SIGNAL, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp11_, _tmp12_, _tmp13_); _vala_map_unref0 (self->priv->current_handler_to_signal_map); self->priv->current_handler_to_signal_map = _tmp14_; _tmp15_ = g_str_hash; _tmp16_ = g_str_equal; _tmp17_ = g_direct_equal; _tmp18_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, VALA_TYPE_CLASS, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp15_, _tmp16_, _tmp17_); _vala_map_unref0 (self->priv->current_child_to_class_map); self->priv->current_child_to_class_map = _tmp18_; _tmp19_ = ui_file; _tmp20_ = vala_markup_reader_new (_tmp19_); reader = _tmp20_; current_class = NULL; current_property = NULL; current_handler = NULL; template_tag_found = FALSE; _tmp21_ = reader; current_token = vala_markup_reader_read_token (_tmp21_, NULL, NULL); while (TRUE) { const gchar* current_name = NULL; ValaMarkupReader* _tmp22_; const gchar* _tmp23_; const gchar* _tmp24_; gboolean _tmp25_ = FALSE; ValaMarkupReader* _tmp144_; if (!(current_token != VALA_MARKUP_TOKEN_TYPE_EOF)) { break; } _tmp22_ = reader; _tmp23_ = vala_markup_reader_get_name (_tmp22_); _tmp24_ = _tmp23_; current_name = _tmp24_; if (current_token == VALA_MARKUP_TOKEN_TYPE_START_ELEMENT) { gboolean _tmp26_ = FALSE; const gchar* _tmp27_; _tmp27_ = current_name; if (g_strcmp0 (_tmp27_, "object") == 0) { _tmp26_ = TRUE; } else { const gchar* _tmp28_; _tmp28_ = current_name; _tmp26_ = g_strcmp0 (_tmp28_, "template") == 0; } _tmp25_ = _tmp26_; } else { _tmp25_ = FALSE; } if (_tmp25_) { const gchar* _tmp29_; ValaClass* _tmp37_; ValaClass* _tmp49_; _vala_code_node_unref0 (current_class); current_class = NULL; _tmp29_ = current_name; if (g_strcmp0 (_tmp29_, "object") == 0) { gchar* type_id = NULL; ValaMarkupReader* _tmp30_; gchar* _tmp31_; const gchar* _tmp32_; _tmp30_ = reader; _tmp31_ = vala_markup_reader_get_attribute (_tmp30_, "type-func"); type_id = _tmp31_; _tmp32_ = type_id; if (_tmp32_ != NULL) { ValaHashMap* _tmp33_; const gchar* _tmp34_; gpointer _tmp35_; _tmp33_ = self->priv->type_id_to_vala_map; _tmp34_ = type_id; _tmp35_ = vala_map_get ((ValaMap*) _tmp33_, _tmp34_); _vala_code_node_unref0 (current_class); current_class = (ValaClass*) _tmp35_; } _g_free0 (type_id); } else { const gchar* _tmp36_; _tmp36_ = current_name; if (g_strcmp0 (_tmp36_, "template") == 0) { template_tag_found = TRUE; } } _tmp37_ = current_class; if (_tmp37_ == NULL) { gchar* class_name = NULL; ValaMarkupReader* _tmp38_; gchar* _tmp39_; const gchar* _tmp40_; ValaHashMap* _tmp46_; const gchar* _tmp47_; gpointer _tmp48_; _tmp38_ = reader; _tmp39_ = vala_markup_reader_get_attribute (_tmp38_, "class"); class_name = _tmp39_; _tmp40_ = class_name; if (_tmp40_ == NULL) { ValaSourceReference* _tmp41_; ValaSourceReference* _tmp42_; const gchar* _tmp43_; const gchar* _tmp44_; ValaMarkupReader* _tmp45_; _tmp41_ = vala_code_node_get_source_reference (node); _tmp42_ = _tmp41_; _tmp43_ = current_name; _tmp44_ = ui_file; vala_report_error (_tmp42_, "Invalid %s in ui file `%s'", _tmp43_, _tmp44_); _tmp45_ = reader; current_token = vala_markup_reader_read_token (_tmp45_, NULL, NULL); _g_free0 (class_name); continue; } _tmp46_ = self->priv->cclass_to_vala_map; _tmp47_ = class_name; _tmp48_ = vala_map_get ((ValaMap*) _tmp46_, _tmp47_); _vala_code_node_unref0 (current_class); current_class = (ValaClass*) _tmp48_; _g_free0 (class_name); } _tmp49_ = current_class; if (_tmp49_ != NULL) { gchar* child_name = NULL; ValaMarkupReader* _tmp50_; gchar* _tmp51_; const gchar* _tmp52_; _tmp50_ = reader; _tmp51_ = vala_markup_reader_get_attribute (_tmp50_, "id"); child_name = _tmp51_; _tmp52_ = child_name; if (_tmp52_ != NULL) { ValaHashMap* _tmp53_; const gchar* _tmp54_; ValaClass* _tmp55_; _tmp53_ = self->priv->current_child_to_class_map; _tmp54_ = child_name; _tmp55_ = current_class; vala_map_set ((ValaMap*) _tmp53_, _tmp54_, _tmp55_); } _g_free0 (child_name); } } else { gboolean _tmp56_ = FALSE; gboolean _tmp57_ = FALSE; ValaClass* _tmp58_; _tmp58_ = current_class; if (_tmp58_ != NULL) { _tmp57_ = current_token == VALA_MARKUP_TOKEN_TYPE_START_ELEMENT; } else { _tmp57_ = FALSE; } if (_tmp57_) { const gchar* _tmp59_; _tmp59_ = current_name; _tmp56_ = g_strcmp0 (_tmp59_, "signal") == 0; } else { _tmp56_ = FALSE; } if (_tmp56_) { gchar* signal_name = NULL; ValaMarkupReader* _tmp60_; gchar* _tmp61_; gchar* handler_name = NULL; ValaMarkupReader* _tmp62_; gchar* _tmp63_; ValaClass* _tmp64_; _tmp60_ = reader; _tmp61_ = vala_markup_reader_get_attribute (_tmp60_, "name"); signal_name = _tmp61_; _tmp62_ = reader; _tmp63_ = vala_markup_reader_get_attribute (_tmp62_, "handler"); handler_name = _tmp63_; _tmp64_ = current_class; if (_tmp64_ != NULL) { gboolean _tmp65_ = FALSE; const gchar* _tmp66_; gint sep_idx = 0; const gchar* _tmp81_; ValaSignal* sig = NULL; ValaClass* _tmp84_; const gchar* _tmp85_; gchar* _tmp86_; gchar* _tmp87_; ValaSymbol* _tmp88_; ValaSignal* _tmp89_; ValaSignal* _tmp90_; ValaSignal* _tmp91_; _tmp66_ = signal_name; if (_tmp66_ == NULL) { _tmp65_ = TRUE; } else { const gchar* _tmp67_; _tmp67_ = handler_name; _tmp65_ = _tmp67_ == NULL; } if (_tmp65_) { const gchar* _tmp68_; ValaMarkupReader* _tmp80_; _tmp68_ = signal_name; if (_tmp68_ != NULL) { ValaSourceReference* _tmp69_; ValaSourceReference* _tmp70_; const gchar* _tmp71_; const gchar* _tmp72_; _tmp69_ = vala_code_node_get_source_reference (node); _tmp70_ = _tmp69_; _tmp71_ = signal_name; _tmp72_ = ui_file; vala_report_error (_tmp70_, "Invalid signal `%s' without handler in ui file `%s'", _tmp71_, _tmp72_); } else { const gchar* _tmp73_; _tmp73_ = handler_name; if (_tmp73_ != NULL) { ValaSourceReference* _tmp74_; ValaSourceReference* _tmp75_; const gchar* _tmp76_; _tmp74_ = vala_code_node_get_source_reference (node); _tmp75_ = _tmp74_; _tmp76_ = ui_file; vala_report_error (_tmp75_, "Invalid signal without name in ui file `%s'", _tmp76_); } else { ValaSourceReference* _tmp77_; ValaSourceReference* _tmp78_; const gchar* _tmp79_; _tmp77_ = vala_code_node_get_source_reference (node); _tmp78_ = _tmp77_; _tmp79_ = ui_file; vala_report_error (_tmp78_, "Invalid signal without name and handler in ui file `%s'", _tmp79_); } } _tmp80_ = reader; current_token = vala_markup_reader_read_token (_tmp80_, NULL, NULL); _g_free0 (handler_name); _g_free0 (signal_name); continue; } _tmp81_ = signal_name; sep_idx = string_index_of (_tmp81_, "::", 0); if (sep_idx >= 0) { const gchar* _tmp82_; gchar* _tmp83_; _tmp82_ = signal_name; _tmp83_ = string_substring (_tmp82_, (glong) 0, (glong) sep_idx); _g_free0 (signal_name); signal_name = _tmp83_; } _tmp84_ = current_class; _tmp85_ = signal_name; _tmp86_ = string_replace (_tmp85_, "-", "_"); _tmp87_ = _tmp86_; _tmp88_ = vala_semantic_analyzer_symbol_lookup_inherited ((ValaSymbol*) _tmp84_, _tmp87_); _tmp89_ = G_TYPE_CHECK_INSTANCE_TYPE (_tmp88_, VALA_TYPE_SIGNAL) ? ((ValaSignal*) _tmp88_) : NULL; if (_tmp89_ == NULL) { _vala_code_node_unref0 (_tmp88_); } _tmp90_ = _tmp89_; _g_free0 (_tmp87_); sig = _tmp90_; _tmp91_ = sig; if (_tmp91_ != NULL) { ValaHashMap* _tmp92_; const gchar* _tmp93_; ValaSignal* _tmp94_; _tmp92_ = self->priv->current_handler_to_signal_map; _tmp93_ = handler_name; _tmp94_ = sig; vala_map_set ((ValaMap*) _tmp92_, _tmp93_, _tmp94_); } _vala_code_node_unref0 (sig); } _g_free0 (handler_name); _g_free0 (signal_name); } else { gboolean _tmp95_ = FALSE; gboolean _tmp96_ = FALSE; ValaClass* _tmp97_; _tmp97_ = current_class; if (_tmp97_ != NULL) { _tmp96_ = current_token == VALA_MARKUP_TOKEN_TYPE_START_ELEMENT; } else { _tmp96_ = FALSE; } if (_tmp96_) { const gchar* _tmp98_; _tmp98_ = current_name; _tmp95_ = g_strcmp0 (_tmp98_, "binding") == 0; } else { _tmp95_ = FALSE; } if (_tmp95_) { gchar* property_name = NULL; ValaMarkupReader* _tmp99_; gchar* _tmp100_; const gchar* _tmp101_; const gchar* _tmp106_; gchar* _tmp107_; ValaClass* _tmp108_; const gchar* _tmp109_; ValaSymbol* _tmp110_; ValaProperty* _tmp111_; ValaProperty* _tmp112_; _tmp99_ = reader; _tmp100_ = vala_markup_reader_get_attribute (_tmp99_, "name"); property_name = _tmp100_; _tmp101_ = property_name; if (_tmp101_ == NULL) { ValaSourceReference* _tmp102_; ValaSourceReference* _tmp103_; const gchar* _tmp104_; ValaMarkupReader* _tmp105_; _tmp102_ = vala_code_node_get_source_reference (node); _tmp103_ = _tmp102_; _tmp104_ = ui_file; vala_report_error (_tmp103_, "Invalid binding in ui file `%s'", _tmp104_); _tmp105_ = reader; current_token = vala_markup_reader_read_token (_tmp105_, NULL, NULL); _g_free0 (property_name); continue; } _tmp106_ = property_name; _tmp107_ = string_replace (_tmp106_, "-", "_"); _g_free0 (property_name); property_name = _tmp107_; _tmp108_ = current_class; _tmp109_ = property_name; _tmp110_ = vala_semantic_analyzer_symbol_lookup_inherited ((ValaSymbol*) _tmp108_, _tmp109_); _tmp111_ = G_TYPE_CHECK_INSTANCE_TYPE (_tmp110_, VALA_TYPE_PROPERTY) ? ((ValaProperty*) _tmp110_) : NULL; if (_tmp111_ == NULL) { _vala_code_node_unref0 (_tmp110_); } _vala_code_node_unref0 (current_property); current_property = _tmp111_; _tmp112_ = current_property; if (_tmp112_ == NULL) { ValaSourceReference* _tmp113_; ValaSourceReference* _tmp114_; ValaClass* _tmp115_; gchar* _tmp116_; gchar* _tmp117_; const gchar* _tmp118_; const gchar* _tmp119_; ValaMarkupReader* _tmp120_; _tmp113_ = vala_code_node_get_source_reference (node); _tmp114_ = _tmp113_; _tmp115_ = current_class; _tmp116_ = vala_symbol_get_full_name ((ValaSymbol*) _tmp115_); _tmp117_ = _tmp116_; _tmp118_ = property_name; _tmp119_ = ui_file; vala_report_error (_tmp114_, "Unknown property `%s:%s' for binding in ui file `%s'", _tmp117_, _tmp118_, _tmp119_); _g_free0 (_tmp117_); _tmp120_ = reader; current_token = vala_markup_reader_read_token (_tmp120_, NULL, NULL); _g_free0 (property_name); continue; } _g_free0 (property_name); } else { gboolean _tmp121_ = FALSE; gboolean _tmp122_ = FALSE; ValaClass* _tmp123_; _tmp123_ = current_class; if (_tmp123_ != NULL) { _tmp122_ = current_token == VALA_MARKUP_TOKEN_TYPE_START_ELEMENT; } else { _tmp122_ = FALSE; } if (_tmp122_) { const gchar* _tmp124_; _tmp124_ = current_name; _tmp121_ = g_strcmp0 (_tmp124_, "closure") == 0; } else { _tmp121_ = FALSE; } if (_tmp121_) { gchar* handler_name = NULL; ValaMarkupReader* _tmp125_; gchar* _tmp126_; ValaProperty* _tmp127_; _tmp125_ = reader; _tmp126_ = vala_markup_reader_get_attribute (_tmp125_, "function"); handler_name = _tmp126_; _tmp127_ = current_property; if (_tmp127_ != NULL) { const gchar* _tmp128_; ValaHashMap* _tmp133_; const gchar* _tmp134_; ValaProperty* _tmp135_; const gchar* _tmp136_; gchar* _tmp137_; _tmp128_ = handler_name; if (_tmp128_ == NULL) { ValaSourceReference* _tmp129_; ValaSourceReference* _tmp130_; const gchar* _tmp131_; ValaMarkupReader* _tmp132_; _tmp129_ = vala_code_node_get_source_reference (node); _tmp130_ = _tmp129_; _tmp131_ = ui_file; vala_report_error (_tmp130_, "Invalid closure in ui file `%s'", _tmp131_); _tmp132_ = reader; current_token = vala_markup_reader_read_token (_tmp132_, NULL, NULL); _g_free0 (handler_name); continue; } _tmp133_ = self->priv->current_handler_to_property_map; _tmp134_ = handler_name; _tmp135_ = current_property; vala_map_set ((ValaMap*) _tmp133_, _tmp134_, _tmp135_); _tmp136_ = handler_name; _tmp137_ = g_strdup (_tmp136_); _g_free0 (current_handler); current_handler = _tmp137_; _vala_code_node_unref0 (current_property); current_property = NULL; } else { const gchar* _tmp138_; _tmp138_ = current_handler; if (_tmp138_ != NULL) { ValaHashMap* _tmp139_; const gchar* _tmp140_; const gchar* _tmp141_; const gchar* _tmp142_; gchar* _tmp143_; _tmp139_ = self->priv->handler_map; _tmp140_ = handler_name; _tmp141_ = current_handler; vala_map_set ((ValaMap*) _tmp139_, _tmp140_, _tmp141_); _tmp142_ = handler_name; _tmp143_ = g_strdup (_tmp142_); _g_free0 (current_handler); current_handler = _tmp143_; } } _g_free0 (handler_name); } } } } _tmp144_ = reader; current_token = vala_markup_reader_read_token (_tmp144_, NULL, NULL); } if (!template_tag_found) { ValaSourceReference* _tmp145_; ValaSourceReference* _tmp146_; _tmp145_ = vala_code_node_get_source_reference (node); _tmp146_ = _tmp145_; vala_report_error (_tmp146_, "ui resource `%s' does not describe a valid composite template", ui_resource); } _g_free0 (current_handler); _vala_code_node_unref0 (current_property); _vala_code_node_unref0 (current_class); _vala_markup_reader_unref0 (reader); _g_free0 (ui_file); } static gpointer _vala_code_node_ref0 (gpointer self) { return self ? vala_code_node_ref (self) : NULL; } static gboolean vala_gtk_module_is_gtk_template (ValaGtkModule* self, ValaClass* cl) { ValaAttribute* attr = NULL; ValaAttribute* _tmp0_; ValaAttribute* _tmp1_; ValaAttribute* _tmp2_; gboolean result = FALSE; g_return_val_if_fail (self != NULL, FALSE); g_return_val_if_fail (cl != NULL, FALSE); _tmp0_ = vala_code_node_get_attribute ((ValaCodeNode*) cl, "GtkTemplate"); _tmp1_ = _vala_code_node_ref0 (_tmp0_); attr = _tmp1_; _tmp2_ = attr; if (_tmp2_ != NULL) { gboolean _tmp3_ = FALSE; ValaClass* _tmp4_; _tmp4_ = ((ValaCCodeBaseModule*) self)->gtk_widget_type; if (_tmp4_ == NULL) { _tmp3_ = TRUE; } else { ValaClass* _tmp5_; _tmp5_ = ((ValaCCodeBaseModule*) self)->gtk_widget_type; _tmp3_ = !vala_typesymbol_is_subtype_of ((ValaTypeSymbol*) cl, (ValaTypeSymbol*) _tmp5_); } if (_tmp3_) { gboolean _tmp6_; gboolean _tmp7_; _tmp6_ = vala_code_node_get_error ((ValaCodeNode*) cl); _tmp7_ = _tmp6_; if (!_tmp7_) { ValaAttribute* _tmp8_; ValaSourceReference* _tmp9_; ValaSourceReference* _tmp10_; _tmp8_ = attr; _tmp9_ = vala_code_node_get_source_reference ((ValaCodeNode*) _tmp8_); _tmp10_ = _tmp9_; vala_report_error (_tmp10_, "subclassing Gtk.Widget is required for using Gtk templates"); vala_code_node_set_error ((ValaCodeNode*) cl, TRUE); } result = FALSE; _vala_code_node_unref0 (attr); return result; } result = TRUE; _vala_code_node_unref0 (attr); return result; } result = FALSE; _vala_code_node_unref0 (attr); return result; } static void vala_gtk_module_real_generate_class_init (ValaGTypeModule* base, ValaClass* cl) { ValaGtkModule * self; gboolean _tmp0_ = FALSE; gboolean _tmp1_; gboolean _tmp2_; gchar* ui = NULL; gchar* _tmp3_; const gchar* _tmp4_; const gchar* _tmp7_; ValaCCodeFunctionCall* call = NULL; ValaCCodeIdentifier* _tmp8_; ValaCCodeIdentifier* _tmp9_; ValaCCodeFunctionCall* _tmp10_; ValaCCodeFunctionCall* _tmp11_; ValaCCodeFunctionCall* _tmp12_; ValaCCodeIdentifier* _tmp13_; ValaCCodeIdentifier* _tmp14_; ValaCCodeFunctionCall* _tmp15_; const gchar* _tmp16_; gchar* _tmp17_; gchar* _tmp18_; ValaCCodeConstant* _tmp19_; ValaCCodeConstant* _tmp20_; ValaCCodeFunction* _tmp21_; ValaCCodeFunction* _tmp22_; ValaCCodeFunctionCall* _tmp23_; ValaList* _tmp24_; self = (ValaGtkModule*) base; g_return_if_fail (cl != NULL); VALA_GTYPE_MODULE_CLASS (vala_gtk_module_parent_class)->generate_class_init ((ValaGTypeModule*) G_TYPE_CHECK_INSTANCE_CAST (self, VALA_TYPE_GSIGNAL_MODULE, ValaGSignalModule), cl); _tmp1_ = vala_code_node_get_error ((ValaCodeNode*) cl); _tmp2_ = _tmp1_; if (_tmp2_) { _tmp0_ = TRUE; } else { _tmp0_ = !vala_gtk_module_is_gtk_template (self, cl); } if (_tmp0_) { return; } _tmp3_ = vala_code_node_get_attribute_string ((ValaCodeNode*) cl, "GtkTemplate", "ui", NULL); ui = _tmp3_; _tmp4_ = ui; if (_tmp4_ == NULL) { ValaSourceReference* _tmp5_; ValaSourceReference* _tmp6_; _tmp5_ = vala_code_node_get_source_reference ((ValaCodeNode*) cl); _tmp6_ = _tmp5_; vala_report_error (_tmp6_, "empty ui resource declaration for Gtk widget template"); vala_code_node_set_error ((ValaCodeNode*) cl, TRUE); _g_free0 (ui); return; } _tmp7_ = ui; vala_gtk_module_process_current_ui_resource (self, _tmp7_, (ValaCodeNode*) cl); _tmp8_ = vala_ccode_identifier_new ("gtk_widget_class_set_template_from_resource"); _tmp9_ = _tmp8_; _tmp10_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp9_); _tmp11_ = _tmp10_; _vala_ccode_node_unref0 (_tmp9_); call = _tmp11_; _tmp12_ = call; _tmp13_ = vala_ccode_identifier_new ("GTK_WIDGET_CLASS (klass)"); _tmp14_ = _tmp13_; vala_ccode_function_call_add_argument (_tmp12_, (ValaCCodeExpression*) _tmp14_); _vala_ccode_node_unref0 (_tmp14_); _tmp15_ = call; _tmp16_ = ui; _tmp17_ = g_strdup_printf ("\"%s\"", _tmp16_); _tmp18_ = _tmp17_; _tmp19_ = vala_ccode_constant_new (_tmp18_); _tmp20_ = _tmp19_; vala_ccode_function_call_add_argument (_tmp15_, (ValaCCodeExpression*) _tmp20_); _vala_ccode_node_unref0 (_tmp20_); _g_free0 (_tmp18_); _tmp21_ = vala_ccode_base_module_get_ccode ((ValaCCodeBaseModule*) self); _tmp22_ = _tmp21_; _tmp23_ = call; vala_ccode_function_add_expression (_tmp22_, (ValaCCodeExpression*) _tmp23_); _tmp24_ = self->priv->current_required_app_classes; vala_collection_clear ((ValaCollection*) _tmp24_); _vala_ccode_node_unref0 (call); _g_free0 (ui); } static void vala_gtk_module_real_visit_property (ValaCodeVisitor* base, ValaProperty* prop) { ValaGtkModule * self; gboolean _tmp0_ = FALSE; ValaAttribute* _tmp1_; self = (ValaGtkModule*) base; g_return_if_fail (prop != NULL); _tmp1_ = vala_code_node_get_attribute ((ValaCodeNode*) prop, "GtkChild"); if (_tmp1_ != NULL) { ValaField* _tmp2_; ValaField* _tmp3_; _tmp2_ = vala_property_get_field (prop); _tmp3_ = _tmp2_; _tmp0_ = _tmp3_ == NULL; } else { _tmp0_ = FALSE; } if (_tmp0_) { ValaSourceReference* _tmp4_; ValaSourceReference* _tmp5_; _tmp4_ = vala_code_node_get_source_reference ((ValaCodeNode*) prop); _tmp5_ = _tmp4_; vala_report_error (_tmp5_, "[GtkChild] is only allowed on automatic properties"); } VALA_CODE_VISITOR_CLASS (vala_gtk_module_parent_class)->visit_property ((ValaCodeVisitor*) G_TYPE_CHECK_INSTANCE_CAST (self, VALA_TYPE_GSIGNAL_MODULE, ValaGSignalModule), prop); } static gpointer _vala_ccode_node_ref0 (gpointer self) { return self ? vala_ccode_node_ref (self) : NULL; } static void vala_gtk_module_real_visit_field (ValaCodeVisitor* base, ValaField* f) { ValaGtkModule * self; ValaClass* cl = NULL; ValaClass* _tmp0_; ValaClass* _tmp1_; ValaClass* _tmp2_; gboolean _tmp3_ = FALSE; ValaClass* _tmp4_; gboolean _tmp8_ = FALSE; ValaMemberBinding _tmp9_; ValaMemberBinding _tmp10_; ValaClass* _tmp12_; ValaCCodeBaseModuleEmitContext* _tmp15_; gchar* gtk_name = NULL; const gchar* _tmp16_; const gchar* _tmp17_; gchar* _tmp18_; ValaClass* child_class = NULL; ValaHashMap* _tmp19_; const gchar* _tmp20_; gpointer _tmp21_; ValaClass* _tmp22_; ValaClass* field_class = NULL; ValaDataType* _tmp26_; ValaDataType* _tmp27_; ValaTypeSymbol* _tmp28_; ValaTypeSymbol* _tmp29_; gboolean _tmp30_ = FALSE; ValaClass* _tmp31_; gboolean internal_child = FALSE; ValaCCodeExpression* offset = NULL; ValaCCodeFunctionCall* call = NULL; ValaCCodeIdentifier* _tmp85_; ValaCCodeIdentifier* _tmp86_; ValaCCodeFunctionCall* _tmp87_; ValaCCodeFunctionCall* _tmp88_; ValaCCodeFunctionCall* _tmp89_; ValaCCodeIdentifier* _tmp90_; ValaCCodeIdentifier* _tmp91_; ValaCCodeFunctionCall* _tmp92_; const gchar* _tmp93_; gchar* _tmp94_; gchar* _tmp95_; ValaCCodeConstant* _tmp96_; ValaCCodeConstant* _tmp97_; const gchar* _tmp98_ = NULL; ValaCCodeFunctionCall* _tmp99_; ValaCCodeConstant* _tmp100_; ValaCCodeConstant* _tmp101_; ValaCCodeFunctionCall* _tmp102_; ValaCCodeExpression* _tmp103_; ValaCCodeFunction* _tmp104_; ValaCCodeFunction* _tmp105_; ValaCCodeFunctionCall* _tmp106_; gboolean _tmp107_ = FALSE; ValaClass* _tmp108_; gboolean _tmp109_; gboolean _tmp110_; self = (ValaGtkModule*) base; g_return_if_fail (f != NULL); VALA_CODE_VISITOR_CLASS (vala_gtk_module_parent_class)->visit_field ((ValaCodeVisitor*) G_TYPE_CHECK_INSTANCE_CAST (self, VALA_TYPE_GSIGNAL_MODULE, ValaGSignalModule), f); _tmp0_ = vala_ccode_base_module_get_current_class ((ValaCCodeBaseModule*) self); _tmp1_ = _tmp0_; _tmp2_ = _vala_code_node_ref0 (_tmp1_); cl = _tmp2_; _tmp4_ = cl; if (_tmp4_ == NULL) { _tmp3_ = TRUE; } else { ValaClass* _tmp5_; gboolean _tmp6_; gboolean _tmp7_; _tmp5_ = cl; _tmp6_ = vala_code_node_get_error ((ValaCodeNode*) _tmp5_); _tmp7_ = _tmp6_; _tmp3_ = _tmp7_; } if (_tmp3_) { _vala_code_node_unref0 (cl); return; } _tmp9_ = vala_field_get_binding (f); _tmp10_ = _tmp9_; if (_tmp10_ != VALA_MEMBER_BINDING_INSTANCE) { _tmp8_ = TRUE; } else { ValaAttribute* _tmp11_; _tmp11_ = vala_code_node_get_attribute ((ValaCodeNode*) f, "GtkChild"); _tmp8_ = _tmp11_ == NULL; } if (_tmp8_) { _vala_code_node_unref0 (cl); return; } _tmp12_ = cl; if (!vala_gtk_module_is_gtk_template (self, _tmp12_)) { ValaSourceReference* _tmp13_; ValaSourceReference* _tmp14_; _tmp13_ = vala_code_node_get_source_reference ((ValaCodeNode*) f); _tmp14_ = _tmp13_; vala_report_error (_tmp14_, "[GtkChild] is only allowed in classes with a [GtkTemplate] attribute"); _vala_code_node_unref0 (cl); return; } _tmp15_ = ((ValaCCodeBaseModule*) self)->class_init_context; vala_ccode_base_module_push_context ((ValaCCodeBaseModule*) self, _tmp15_); _tmp16_ = vala_symbol_get_name ((ValaSymbol*) f); _tmp17_ = _tmp16_; _tmp18_ = vala_code_node_get_attribute_string ((ValaCodeNode*) f, "GtkChild", "name", _tmp17_); gtk_name = _tmp18_; _tmp19_ = self->priv->current_child_to_class_map; _tmp20_ = gtk_name; _tmp21_ = vala_map_get ((ValaMap*) _tmp19_, _tmp20_); child_class = (ValaClass*) _tmp21_; _tmp22_ = child_class; if (_tmp22_ == NULL) { ValaSourceReference* _tmp23_; ValaSourceReference* _tmp24_; const gchar* _tmp25_; _tmp23_ = vala_code_node_get_source_reference ((ValaCodeNode*) f); _tmp24_ = _tmp23_; _tmp25_ = gtk_name; vala_report_error (_tmp24_, "could not find child `%s'", _tmp25_); _vala_code_node_unref0 (child_class); _g_free0 (gtk_name); _vala_code_node_unref0 (cl); return; } _tmp26_ = vala_variable_get_variable_type ((ValaVariable*) f); _tmp27_ = _tmp26_; _tmp28_ = vala_data_type_get_type_symbol (_tmp27_); _tmp29_ = _tmp28_; field_class = G_TYPE_CHECK_INSTANCE_TYPE (_tmp29_, VALA_TYPE_CLASS) ? ((ValaClass*) _tmp29_) : NULL; _tmp31_ = field_class; if (_tmp31_ == NULL) { _tmp30_ = TRUE; } else { ValaClass* _tmp32_; ValaClass* _tmp33_; _tmp32_ = child_class; _tmp33_ = field_class; _tmp30_ = !vala_typesymbol_is_subtype_of ((ValaTypeSymbol*) _tmp32_, (ValaTypeSymbol*) _tmp33_); } if (_tmp30_) { ValaSourceReference* _tmp34_; ValaSourceReference* _tmp35_; ValaClass* _tmp36_; gchar* _tmp37_; gchar* _tmp38_; ValaClass* _tmp39_; gchar* _tmp40_; gchar* _tmp41_; _tmp34_ = vala_code_node_get_source_reference ((ValaCodeNode*) f); _tmp35_ = _tmp34_; _tmp36_ = child_class; _tmp37_ = vala_symbol_get_full_name ((ValaSymbol*) _tmp36_); _tmp38_ = _tmp37_; _tmp39_ = field_class; _tmp40_ = vala_symbol_get_full_name ((ValaSymbol*) _tmp39_); _tmp41_ = _tmp40_; vala_report_error (_tmp35_, "cannot convert from Gtk child type `%s' to `%s'", _tmp38_, _tmp41_); _g_free0 (_tmp41_); _g_free0 (_tmp38_); _vala_code_node_unref0 (child_class); _g_free0 (gtk_name); _vala_code_node_unref0 (cl); return; } internal_child = vala_code_node_get_attribute_bool ((ValaCodeNode*) f, "GtkChild", "internal", FALSE); if (vala_symbol_is_private_symbol ((ValaSymbol*) f)) { ValaCCodeFunctionCall* private_field_offset = NULL; ValaCCodeIdentifier* _tmp42_; ValaCCodeIdentifier* _tmp43_; ValaCCodeFunctionCall* _tmp44_; ValaCCodeFunctionCall* _tmp45_; ValaCCodeFunctionCall* _tmp46_; ValaClass* _tmp47_; gchar* _tmp48_; gchar* _tmp49_; gchar* _tmp50_; gchar* _tmp51_; ValaCCodeIdentifier* _tmp52_; ValaCCodeIdentifier* _tmp53_; ValaCCodeFunctionCall* _tmp54_; gchar* _tmp55_; gchar* _tmp56_; ValaCCodeIdentifier* _tmp57_; ValaCCodeIdentifier* _tmp58_; ValaClass* _tmp59_; gchar* _tmp60_; gchar* _tmp61_; gchar* _tmp62_; gchar* _tmp63_; ValaCCodeIdentifier* _tmp64_; ValaCCodeIdentifier* _tmp65_; ValaCCodeFunctionCall* _tmp66_; ValaCCodeBinaryExpression* _tmp67_; _tmp42_ = vala_ccode_identifier_new ("G_STRUCT_OFFSET"); _tmp43_ = _tmp42_; _tmp44_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp43_); _tmp45_ = _tmp44_; _vala_ccode_node_unref0 (_tmp43_); private_field_offset = _tmp45_; _tmp46_ = private_field_offset; _tmp47_ = cl; _tmp48_ = vala_get_ccode_name ((ValaCodeNode*) _tmp47_); _tmp49_ = _tmp48_; _tmp50_ = g_strdup_printf ("%sPrivate", _tmp49_); _tmp51_ = _tmp50_; _tmp52_ = vala_ccode_identifier_new (_tmp51_); _tmp53_ = _tmp52_; vala_ccode_function_call_add_argument (_tmp46_, (ValaCCodeExpression*) _tmp53_); _vala_ccode_node_unref0 (_tmp53_); _g_free0 (_tmp51_); _g_free0 (_tmp49_); _tmp54_ = private_field_offset; _tmp55_ = vala_get_ccode_name ((ValaCodeNode*) f); _tmp56_ = _tmp55_; _tmp57_ = vala_ccode_identifier_new (_tmp56_); _tmp58_ = _tmp57_; vala_ccode_function_call_add_argument (_tmp54_, (ValaCCodeExpression*) _tmp58_); _vala_ccode_node_unref0 (_tmp58_); _g_free0 (_tmp56_); _tmp59_ = cl; _tmp60_ = vala_get_ccode_name ((ValaCodeNode*) _tmp59_); _tmp61_ = _tmp60_; _tmp62_ = g_strdup_printf ("%s_private_offset", _tmp61_); _tmp63_ = _tmp62_; _tmp64_ = vala_ccode_identifier_new (_tmp63_); _tmp65_ = _tmp64_; _tmp66_ = private_field_offset; _tmp67_ = vala_ccode_binary_expression_new (VALA_CCODE_BINARY_OPERATOR_PLUS, (ValaCCodeExpression*) _tmp65_, (ValaCCodeExpression*) _tmp66_); _vala_ccode_node_unref0 (offset); offset = (ValaCCodeExpression*) _tmp67_; _vala_ccode_node_unref0 (_tmp65_); _g_free0 (_tmp63_); _g_free0 (_tmp61_); _vala_ccode_node_unref0 (private_field_offset); } else { ValaCCodeFunctionCall* offset_call = NULL; ValaCCodeIdentifier* _tmp68_; ValaCCodeIdentifier* _tmp69_; ValaCCodeFunctionCall* _tmp70_; ValaCCodeFunctionCall* _tmp71_; ValaCCodeFunctionCall* _tmp72_; ValaClass* _tmp73_; gchar* _tmp74_; gchar* _tmp75_; ValaCCodeIdentifier* _tmp76_; ValaCCodeIdentifier* _tmp77_; ValaCCodeFunctionCall* _tmp78_; gchar* _tmp79_; gchar* _tmp80_; ValaCCodeIdentifier* _tmp81_; ValaCCodeIdentifier* _tmp82_; ValaCCodeFunctionCall* _tmp83_; ValaCCodeExpression* _tmp84_; _tmp68_ = vala_ccode_identifier_new ("G_STRUCT_OFFSET"); _tmp69_ = _tmp68_; _tmp70_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp69_); _tmp71_ = _tmp70_; _vala_ccode_node_unref0 (_tmp69_); offset_call = _tmp71_; _tmp72_ = offset_call; _tmp73_ = cl; _tmp74_ = vala_get_ccode_name ((ValaCodeNode*) _tmp73_); _tmp75_ = _tmp74_; _tmp76_ = vala_ccode_identifier_new (_tmp75_); _tmp77_ = _tmp76_; vala_ccode_function_call_add_argument (_tmp72_, (ValaCCodeExpression*) _tmp77_); _vala_ccode_node_unref0 (_tmp77_); _g_free0 (_tmp75_); _tmp78_ = offset_call; _tmp79_ = vala_get_ccode_name ((ValaCodeNode*) f); _tmp80_ = _tmp79_; _tmp81_ = vala_ccode_identifier_new (_tmp80_); _tmp82_ = _tmp81_; vala_ccode_function_call_add_argument (_tmp78_, (ValaCCodeExpression*) _tmp82_); _vala_ccode_node_unref0 (_tmp82_); _g_free0 (_tmp80_); _tmp83_ = offset_call; _tmp84_ = _vala_ccode_node_ref0 ((ValaCCodeExpression*) _tmp83_); _vala_ccode_node_unref0 (offset); offset = _tmp84_; _vala_ccode_node_unref0 (offset_call); } _tmp85_ = vala_ccode_identifier_new ("gtk_widget_class_bind_template_child_full"); _tmp86_ = _tmp85_; _tmp87_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp86_); _tmp88_ = _tmp87_; _vala_ccode_node_unref0 (_tmp86_); call = _tmp88_; _tmp89_ = call; _tmp90_ = vala_ccode_identifier_new ("GTK_WIDGET_CLASS (klass)"); _tmp91_ = _tmp90_; vala_ccode_function_call_add_argument (_tmp89_, (ValaCCodeExpression*) _tmp91_); _vala_ccode_node_unref0 (_tmp91_); _tmp92_ = call; _tmp93_ = gtk_name; _tmp94_ = g_strdup_printf ("\"%s\"", _tmp93_); _tmp95_ = _tmp94_; _tmp96_ = vala_ccode_constant_new (_tmp95_); _tmp97_ = _tmp96_; vala_ccode_function_call_add_argument (_tmp92_, (ValaCCodeExpression*) _tmp97_); _vala_ccode_node_unref0 (_tmp97_); _g_free0 (_tmp95_); if (internal_child) { _tmp98_ = "TRUE"; } else { _tmp98_ = "FALSE"; } _tmp99_ = call; _tmp100_ = vala_ccode_constant_new (_tmp98_); _tmp101_ = _tmp100_; vala_ccode_function_call_add_argument (_tmp99_, (ValaCCodeExpression*) _tmp101_); _vala_ccode_node_unref0 (_tmp101_); _tmp102_ = call; _tmp103_ = offset; vala_ccode_function_call_add_argument (_tmp102_, _tmp103_); _tmp104_ = vala_ccode_base_module_get_ccode ((ValaCCodeBaseModule*) self); _tmp105_ = _tmp104_; _tmp106_ = call; vala_ccode_function_add_expression (_tmp105_, (ValaCCodeExpression*) _tmp106_); vala_ccode_base_module_pop_context ((ValaCCodeBaseModule*) self); _tmp108_ = field_class; _tmp109_ = vala_symbol_get_external ((ValaSymbol*) _tmp108_); _tmp110_ = _tmp109_; if (!_tmp110_) { ValaClass* _tmp111_; gboolean _tmp112_; gboolean _tmp113_; _tmp111_ = field_class; _tmp112_ = vala_symbol_get_external_package ((ValaSymbol*) _tmp111_); _tmp113_ = _tmp112_; _tmp107_ = !_tmp113_; } else { _tmp107_ = FALSE; } if (_tmp107_) { ValaList* _tmp114_; ValaClass* _tmp115_; _tmp114_ = self->priv->current_required_app_classes; _tmp115_ = field_class; vala_collection_add ((ValaCollection*) _tmp114_, _tmp115_); } _vala_ccode_node_unref0 (call); _vala_ccode_node_unref0 (offset); _vala_code_node_unref0 (child_class); _g_free0 (gtk_name); _vala_code_node_unref0 (cl); } static void vala_gtk_module_real_visit_method (ValaCodeVisitor* base, ValaMethod* m) { ValaGtkModule * self; ValaClass* cl = NULL; ValaClass* _tmp0_; ValaClass* _tmp1_; ValaClass* _tmp2_; gboolean _tmp3_ = FALSE; gboolean _tmp4_ = FALSE; ValaClass* _tmp5_; gboolean _tmp10_ = FALSE; ValaMemberBinding _tmp11_; ValaMemberBinding _tmp12_; gchar* handler_name = NULL; const gchar* _tmp14_; const gchar* _tmp15_; gchar* _tmp16_; gchar* callback = NULL; ValaHashMap* _tmp17_; const gchar* _tmp18_; gpointer _tmp19_; ValaSignal* sig = NULL; ValaHashMap* _tmp20_; const gchar* _tmp21_; gpointer _tmp22_; ValaProperty* prop = NULL; ValaHashMap* _tmp23_; const gchar* _tmp24_; gpointer _tmp25_; gboolean _tmp26_ = FALSE; gboolean _tmp27_ = FALSE; const gchar* _tmp28_; ValaCCodeBaseModuleEmitContext* _tmp34_; ValaSignal* _tmp35_; gboolean _tmp86_ = FALSE; ValaProperty* _tmp87_; self = (ValaGtkModule*) base; g_return_if_fail (m != NULL); VALA_CODE_VISITOR_CLASS (vala_gtk_module_parent_class)->visit_method ((ValaCodeVisitor*) G_TYPE_CHECK_INSTANCE_CAST (self, VALA_TYPE_GSIGNAL_MODULE, ValaGSignalModule), m); _tmp0_ = vala_ccode_base_module_get_current_class ((ValaCCodeBaseModule*) self); _tmp1_ = _tmp0_; _tmp2_ = _vala_code_node_ref0 (_tmp1_); cl = _tmp2_; _tmp5_ = cl; if (_tmp5_ == NULL) { _tmp4_ = TRUE; } else { ValaClass* _tmp6_; gboolean _tmp7_; gboolean _tmp8_; _tmp6_ = cl; _tmp7_ = vala_code_node_get_error ((ValaCodeNode*) _tmp6_); _tmp8_ = _tmp7_; _tmp4_ = _tmp8_; } if (_tmp4_) { _tmp3_ = TRUE; } else { ValaClass* _tmp9_; _tmp9_ = cl; _tmp3_ = !vala_gtk_module_is_gtk_template (self, _tmp9_); } if (_tmp3_) { _vala_code_node_unref0 (cl); return; } _tmp11_ = vala_method_get_binding (m); _tmp12_ = _tmp11_; if (_tmp12_ != VALA_MEMBER_BINDING_INSTANCE) { _tmp10_ = TRUE; } else { ValaAttribute* _tmp13_; _tmp13_ = vala_code_node_get_attribute ((ValaCodeNode*) m, "GtkCallback"); _tmp10_ = _tmp13_ == NULL; } if (_tmp10_) { _vala_code_node_unref0 (cl); return; } _tmp14_ = vala_symbol_get_name ((ValaSymbol*) m); _tmp15_ = _tmp14_; _tmp16_ = vala_code_node_get_attribute_string ((ValaCodeNode*) m, "GtkCallback", "name", _tmp15_); handler_name = _tmp16_; _tmp17_ = self->priv->handler_map; _tmp18_ = handler_name; _tmp19_ = vala_map_get ((ValaMap*) _tmp17_, _tmp18_); callback = (gchar*) _tmp19_; _tmp20_ = self->priv->current_handler_to_signal_map; _tmp21_ = handler_name; _tmp22_ = vala_map_get ((ValaMap*) _tmp20_, _tmp21_); sig = (ValaSignal*) _tmp22_; _tmp23_ = self->priv->current_handler_to_property_map; _tmp24_ = handler_name; _tmp25_ = vala_map_get ((ValaMap*) _tmp23_, _tmp24_); prop = (ValaProperty*) _tmp25_; _tmp28_ = callback; if (_tmp28_ == NULL) { ValaSignal* _tmp29_; _tmp29_ = sig; _tmp27_ = _tmp29_ == NULL; } else { _tmp27_ = FALSE; } if (_tmp27_) { ValaProperty* _tmp30_; _tmp30_ = prop; _tmp26_ = _tmp30_ == NULL; } else { _tmp26_ = FALSE; } if (_tmp26_) { ValaSourceReference* _tmp31_; ValaSourceReference* _tmp32_; const gchar* _tmp33_; _tmp31_ = vala_code_node_get_source_reference ((ValaCodeNode*) m); _tmp32_ = _tmp31_; _tmp33_ = handler_name; vala_report_error (_tmp32_, "could not find signal or property for handler `%s'", _tmp33_); _vala_code_node_unref0 (prop); _vala_code_node_unref0 (sig); _g_free0 (callback); _g_free0 (handler_name); _vala_code_node_unref0 (cl); return; } _tmp34_ = ((ValaCCodeBaseModule*) self)->class_init_context; vala_ccode_base_module_push_context ((ValaCCodeBaseModule*) self, _tmp34_); _tmp35_ = sig; if (_tmp35_ != NULL) { ValaSignal* _tmp36_; ValaCodeContext* _tmp37_; ValaCodeContext* _tmp38_; ValaMethodType* method_type = NULL; ValaMethodType* _tmp39_; ValaSignalType* signal_type = NULL; ValaSignal* _tmp40_; ValaSignalType* _tmp41_; ValaDelegateType* delegate_type = NULL; ValaSignalType* _tmp42_; ValaDelegateType* _tmp43_; ValaMethodType* _tmp44_; ValaDelegateType* _tmp45_; _tmp36_ = sig; _tmp37_ = vala_ccode_base_module_get_context ((ValaCCodeBaseModule*) self); _tmp38_ = _tmp37_; vala_code_node_check ((ValaCodeNode*) _tmp36_, _tmp38_); _tmp39_ = vala_method_type_new (m); method_type = _tmp39_; _tmp40_ = sig; _tmp41_ = vala_signal_type_new (_tmp40_); signal_type = _tmp41_; _tmp42_ = signal_type; _tmp43_ = vala_signal_type_get_handler_type (_tmp42_); delegate_type = _tmp43_; _tmp44_ = method_type; _tmp45_ = delegate_type; if (!vala_data_type_compatible ((ValaDataType*) _tmp44_, (ValaDataType*) _tmp45_)) { ValaSourceReference* _tmp46_; ValaSourceReference* _tmp47_; ValaMethodType* _tmp48_; gchar* _tmp49_; gchar* _tmp50_; ValaDelegateType* _tmp51_; gchar* _tmp52_; gchar* _tmp53_; ValaDelegateType* _tmp54_; const gchar* _tmp55_; const gchar* _tmp56_; gchar* _tmp57_; gchar* _tmp58_; _tmp46_ = vala_code_node_get_source_reference ((ValaCodeNode*) m); _tmp47_ = _tmp46_; _tmp48_ = method_type; _tmp49_ = vala_code_node_to_string ((ValaCodeNode*) _tmp48_); _tmp50_ = _tmp49_; _tmp51_ = delegate_type; _tmp52_ = vala_code_node_to_string ((ValaCodeNode*) _tmp51_); _tmp53_ = _tmp52_; _tmp54_ = delegate_type; _tmp55_ = vala_symbol_get_name ((ValaSymbol*) m); _tmp56_ = _tmp55_; _tmp57_ = vala_data_type_to_prototype_string ((ValaDataType*) _tmp54_, _tmp56_); _tmp58_ = _tmp57_; vala_report_error (_tmp47_, "method `%s' is incompatible with signal `%s', expected `%s'", _tmp50_, _tmp53_, _tmp58_); _g_free0 (_tmp58_); _g_free0 (_tmp53_); _g_free0 (_tmp50_); } else { gchar* wrapper = NULL; ValaSignalType* _tmp59_; ValaDelegateType* _tmp60_; ValaDelegateType* _tmp61_; gchar* _tmp62_; gchar* _tmp63_; ValaCCodeFunctionCall* call = NULL; ValaCCodeIdentifier* _tmp64_; ValaCCodeIdentifier* _tmp65_; ValaCCodeFunctionCall* _tmp66_; ValaCCodeFunctionCall* _tmp67_; ValaCCodeFunctionCall* _tmp68_; ValaCCodeIdentifier* _tmp69_; ValaCCodeIdentifier* _tmp70_; ValaCCodeFunctionCall* _tmp71_; const gchar* _tmp72_; gchar* _tmp73_; gchar* _tmp74_; ValaCCodeConstant* _tmp75_; ValaCCodeConstant* _tmp76_; ValaCCodeFunctionCall* _tmp77_; const gchar* _tmp78_; gchar* _tmp79_; gchar* _tmp80_; ValaCCodeIdentifier* _tmp81_; ValaCCodeIdentifier* _tmp82_; ValaCCodeFunction* _tmp83_; ValaCCodeFunction* _tmp84_; ValaCCodeFunctionCall* _tmp85_; _tmp59_ = signal_type; _tmp60_ = vala_signal_type_get_handler_type (_tmp59_); _tmp61_ = _tmp60_; _tmp62_ = vala_ccode_delegate_module_generate_delegate_wrapper ((ValaCCodeDelegateModule*) self, m, _tmp61_, (ValaCodeNode*) m); _tmp63_ = _tmp62_; _vala_code_node_unref0 (_tmp61_); wrapper = _tmp63_; _tmp64_ = vala_ccode_identifier_new ("gtk_widget_class_bind_template_callback_full"); _tmp65_ = _tmp64_; _tmp66_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp65_); _tmp67_ = _tmp66_; _vala_ccode_node_unref0 (_tmp65_); call = _tmp67_; _tmp68_ = call; _tmp69_ = vala_ccode_identifier_new ("GTK_WIDGET_CLASS (klass)"); _tmp70_ = _tmp69_; vala_ccode_function_call_add_argument (_tmp68_, (ValaCCodeExpression*) _tmp70_); _vala_ccode_node_unref0 (_tmp70_); _tmp71_ = call; _tmp72_ = handler_name; _tmp73_ = g_strdup_printf ("\"%s\"", _tmp72_); _tmp74_ = _tmp73_; _tmp75_ = vala_ccode_constant_new (_tmp74_); _tmp76_ = _tmp75_; vala_ccode_function_call_add_argument (_tmp71_, (ValaCCodeExpression*) _tmp76_); _vala_ccode_node_unref0 (_tmp76_); _g_free0 (_tmp74_); _tmp77_ = call; _tmp78_ = wrapper; _tmp79_ = g_strdup_printf ("G_CALLBACK(%s)", _tmp78_); _tmp80_ = _tmp79_; _tmp81_ = vala_ccode_identifier_new (_tmp80_); _tmp82_ = _tmp81_; vala_ccode_function_call_add_argument (_tmp77_, (ValaCCodeExpression*) _tmp82_); _vala_ccode_node_unref0 (_tmp82_); _g_free0 (_tmp80_); _tmp83_ = vala_ccode_base_module_get_ccode ((ValaCCodeBaseModule*) self); _tmp84_ = _tmp83_; _tmp85_ = call; vala_ccode_function_add_expression (_tmp84_, (ValaCCodeExpression*) _tmp85_); _vala_ccode_node_unref0 (call); _g_free0 (wrapper); } _vala_code_node_unref0 (delegate_type); _vala_code_node_unref0 (signal_type); _vala_code_node_unref0 (method_type); } _tmp87_ = prop; if (_tmp87_ != NULL) { _tmp86_ = TRUE; } else { const gchar* _tmp88_; _tmp88_ = callback; _tmp86_ = _tmp88_ != NULL; } if (_tmp86_) { ValaProperty* _tmp89_; ValaCCodeFunctionCall* call = NULL; ValaCCodeIdentifier* _tmp93_; ValaCCodeIdentifier* _tmp94_; ValaCCodeFunctionCall* _tmp95_; ValaCCodeFunctionCall* _tmp96_; ValaCCodeFunctionCall* _tmp97_; ValaCCodeIdentifier* _tmp98_; ValaCCodeIdentifier* _tmp99_; ValaCCodeFunctionCall* _tmp100_; const gchar* _tmp101_; gchar* _tmp102_; gchar* _tmp103_; ValaCCodeConstant* _tmp104_; ValaCCodeConstant* _tmp105_; ValaCCodeFunctionCall* _tmp106_; gchar* _tmp107_; gchar* _tmp108_; gchar* _tmp109_; gchar* _tmp110_; ValaCCodeIdentifier* _tmp111_; ValaCCodeIdentifier* _tmp112_; ValaCCodeFunction* _tmp113_; ValaCCodeFunction* _tmp114_; ValaCCodeFunctionCall* _tmp115_; _tmp89_ = prop; if (_tmp89_ != NULL) { ValaProperty* _tmp90_; ValaCodeContext* _tmp91_; ValaCodeContext* _tmp92_; _tmp90_ = prop; _tmp91_ = vala_ccode_base_module_get_context ((ValaCCodeBaseModule*) self); _tmp92_ = _tmp91_; vala_code_node_check ((ValaCodeNode*) _tmp90_, _tmp92_); } _tmp93_ = vala_ccode_identifier_new ("gtk_widget_class_bind_template_callback_full"); _tmp94_ = _tmp93_; _tmp95_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp94_); _tmp96_ = _tmp95_; _vala_ccode_node_unref0 (_tmp94_); call = _tmp96_; _tmp97_ = call; _tmp98_ = vala_ccode_identifier_new ("GTK_WIDGET_CLASS (klass)"); _tmp99_ = _tmp98_; vala_ccode_function_call_add_argument (_tmp97_, (ValaCCodeExpression*) _tmp99_); _vala_ccode_node_unref0 (_tmp99_); _tmp100_ = call; _tmp101_ = handler_name; _tmp102_ = g_strdup_printf ("\"%s\"", _tmp101_); _tmp103_ = _tmp102_; _tmp104_ = vala_ccode_constant_new (_tmp103_); _tmp105_ = _tmp104_; vala_ccode_function_call_add_argument (_tmp100_, (ValaCCodeExpression*) _tmp105_); _vala_ccode_node_unref0 (_tmp105_); _g_free0 (_tmp103_); _tmp106_ = call; _tmp107_ = vala_get_ccode_name ((ValaCodeNode*) m); _tmp108_ = _tmp107_; _tmp109_ = g_strdup_printf ("G_CALLBACK(%s)", _tmp108_); _tmp110_ = _tmp109_; _tmp111_ = vala_ccode_identifier_new (_tmp110_); _tmp112_ = _tmp111_; vala_ccode_function_call_add_argument (_tmp106_, (ValaCCodeExpression*) _tmp112_); _vala_ccode_node_unref0 (_tmp112_); _g_free0 (_tmp110_); _g_free0 (_tmp108_); _tmp113_ = vala_ccode_base_module_get_ccode ((ValaCCodeBaseModule*) self); _tmp114_ = _tmp113_; _tmp115_ = call; vala_ccode_function_add_expression (_tmp114_, (ValaCCodeExpression*) _tmp115_); _vala_ccode_node_unref0 (call); } vala_ccode_base_module_pop_context ((ValaCCodeBaseModule*) self); _vala_code_node_unref0 (prop); _vala_code_node_unref0 (sig); _g_free0 (callback); _g_free0 (handler_name); _vala_code_node_unref0 (cl); } static void vala_gtk_module_real_end_instance_init (ValaGTypeModule* base, ValaClass* cl) { ValaGtkModule * self; gboolean _tmp0_ = FALSE; gboolean _tmp1_ = FALSE; ValaCCodeFunctionCall* call = NULL; ValaCCodeIdentifier* _tmp25_; ValaCCodeIdentifier* _tmp26_; ValaCCodeFunctionCall* _tmp27_; ValaCCodeFunctionCall* _tmp28_; ValaCCodeFunctionCall* _tmp29_; ValaCCodeIdentifier* _tmp30_; ValaCCodeIdentifier* _tmp31_; ValaCCodeFunction* _tmp32_; ValaCCodeFunction* _tmp33_; ValaCCodeFunctionCall* _tmp34_; self = (ValaGtkModule*) base; g_return_if_fail (cl != NULL); if (cl == NULL) { _tmp1_ = TRUE; } else { gboolean _tmp2_; gboolean _tmp3_; _tmp2_ = vala_code_node_get_error ((ValaCodeNode*) cl); _tmp3_ = _tmp2_; _tmp1_ = _tmp3_; } if (_tmp1_) { _tmp0_ = TRUE; } else { _tmp0_ = !vala_gtk_module_is_gtk_template (self, cl); } if (_tmp0_) { return; } { ValaList* _req_list = NULL; ValaList* _tmp4_; gint _req_size = 0; ValaList* _tmp5_; gint _tmp6_; gint _tmp7_; gint _req_index = 0; _tmp4_ = self->priv->current_required_app_classes; _req_list = _tmp4_; _tmp5_ = _req_list; _tmp6_ = vala_collection_get_size ((ValaCollection*) _tmp5_); _tmp7_ = _tmp6_; _req_size = _tmp7_; _req_index = -1; while (TRUE) { gint _tmp8_; gint _tmp9_; ValaClass* req = NULL; ValaList* _tmp10_; gpointer _tmp11_; ValaCCodeFunctionCall* call = NULL; ValaCCodeIdentifier* _tmp12_; ValaCCodeIdentifier* _tmp13_; ValaCCodeFunctionCall* _tmp14_; ValaCCodeFunctionCall* _tmp15_; ValaCCodeFunctionCall* _tmp16_; ValaClass* _tmp17_; ValaDataType* _tmp18_; ValaDataType* _tmp19_; ValaCCodeExpression* _tmp20_; ValaCCodeExpression* _tmp21_; ValaCCodeFunction* _tmp22_; ValaCCodeFunction* _tmp23_; ValaCCodeFunctionCall* _tmp24_; _req_index = _req_index + 1; _tmp8_ = _req_index; _tmp9_ = _req_size; if (!(_tmp8_ < _tmp9_)) { break; } _tmp10_ = _req_list; _tmp11_ = vala_list_get (_tmp10_, _req_index); req = (ValaClass*) _tmp11_; _tmp12_ = vala_ccode_identifier_new ("g_type_ensure"); _tmp13_ = _tmp12_; _tmp14_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp13_); _tmp15_ = _tmp14_; _vala_ccode_node_unref0 (_tmp13_); call = _tmp15_; _tmp16_ = call; _tmp17_ = req; _tmp18_ = vala_semantic_analyzer_get_data_type_for_symbol ((ValaSymbol*) _tmp17_); _tmp19_ = _tmp18_; _tmp20_ = vala_ccode_base_module_get_type_id_expression ((ValaCCodeBaseModule*) self, _tmp19_, FALSE); _tmp21_ = _tmp20_; vala_ccode_function_call_add_argument (_tmp16_, _tmp21_); _vala_ccode_node_unref0 (_tmp21_); _vala_code_node_unref0 (_tmp19_); _tmp22_ = vala_ccode_base_module_get_ccode ((ValaCCodeBaseModule*) self); _tmp23_ = _tmp22_; _tmp24_ = call; vala_ccode_function_add_expression (_tmp23_, (ValaCCodeExpression*) _tmp24_); _vala_ccode_node_unref0 (call); _vala_code_node_unref0 (req); } } _tmp25_ = vala_ccode_identifier_new ("gtk_widget_init_template"); _tmp26_ = _tmp25_; _tmp27_ = vala_ccode_function_call_new ((ValaCCodeExpression*) _tmp26_); _tmp28_ = _tmp27_; _vala_ccode_node_unref0 (_tmp26_); call = _tmp28_; _tmp29_ = call; _tmp30_ = vala_ccode_identifier_new ("GTK_WIDGET (self)"); _tmp31_ = _tmp30_; vala_ccode_function_call_add_argument (_tmp29_, (ValaCCodeExpression*) _tmp31_); _vala_ccode_node_unref0 (_tmp31_); _tmp32_ = vala_ccode_base_module_get_ccode ((ValaCCodeBaseModule*) self); _tmp33_ = _tmp32_; _tmp34_ = call; vala_ccode_function_add_expression (_tmp33_, (ValaCCodeExpression*) _tmp34_); _vala_ccode_node_unref0 (call); } ValaGtkModule* vala_gtk_module_construct (GType object_type) { ValaGtkModule* self = NULL; self = (ValaGtkModule*) vala_gsignal_module_construct (object_type); return self; } ValaGtkModule* vala_gtk_module_new (void) { return vala_gtk_module_construct (VALA_TYPE_GTK_MODULE); } static void vala_gtk_module_class_init (ValaGtkModuleClass * klass, gpointer klass_data) { vala_gtk_module_parent_class = g_type_class_peek_parent (klass); ((ValaCodeVisitorClass *) klass)->finalize = vala_gtk_module_finalize; g_type_class_adjust_private_offset (klass, &ValaGtkModule_private_offset); ((ValaGTypeModuleClass *) klass)->generate_class_init = (void (*) (ValaGTypeModule*, ValaClass*)) vala_gtk_module_real_generate_class_init; ((ValaCodeVisitorClass *) klass)->visit_property = (void (*) (ValaCodeVisitor*, ValaProperty*)) vala_gtk_module_real_visit_property; ((ValaCodeVisitorClass *) klass)->visit_field = (void (*) (ValaCodeVisitor*, ValaField*)) vala_gtk_module_real_visit_field; ((ValaCodeVisitorClass *) klass)->visit_method = (void (*) (ValaCodeVisitor*, ValaMethod*)) vala_gtk_module_real_visit_method; ((ValaGTypeModuleClass *) klass)->end_instance_init = (void (*) (ValaGTypeModule*, ValaClass*)) vala_gtk_module_real_end_instance_init; } static void vala_gtk_module_instance_init (ValaGtkModule * self, gpointer klass) { GHashFunc _tmp0_; GEqualFunc _tmp1_; GEqualFunc _tmp2_; ValaHashMap* _tmp3_; GHashFunc _tmp4_; GEqualFunc _tmp5_; GEqualFunc _tmp6_; ValaHashMap* _tmp7_; GHashFunc _tmp8_; GEqualFunc _tmp9_; GEqualFunc _tmp10_; ValaHashMap* _tmp11_; GHashFunc _tmp12_; GEqualFunc _tmp13_; GEqualFunc _tmp14_; ValaHashMap* _tmp15_; GEqualFunc _tmp16_; ValaArrayList* _tmp17_; self->priv = vala_gtk_module_get_instance_private (self); self->priv->type_id_to_vala_map = NULL; self->priv->cclass_to_vala_map = NULL; self->priv->gresource_to_file_map = NULL; _tmp0_ = g_str_hash; _tmp1_ = g_str_equal; _tmp2_ = g_direct_equal; _tmp3_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, _tmp0_, _tmp1_, _tmp2_); self->priv->handler_map = _tmp3_; _tmp4_ = g_str_hash; _tmp5_ = g_str_equal; _tmp6_ = g_direct_equal; _tmp7_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, VALA_TYPE_PROPERTY, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp4_, _tmp5_, _tmp6_); self->priv->current_handler_to_property_map = _tmp7_; _tmp8_ = g_str_hash; _tmp9_ = g_str_equal; _tmp10_ = g_direct_equal; _tmp11_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, VALA_TYPE_SIGNAL, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp8_, _tmp9_, _tmp10_); self->priv->current_handler_to_signal_map = _tmp11_; _tmp12_ = g_str_hash; _tmp13_ = g_str_equal; _tmp14_ = g_direct_equal; _tmp15_ = vala_hash_map_new (G_TYPE_STRING, (GBoxedCopyFunc) g_strdup, (GDestroyNotify) g_free, VALA_TYPE_CLASS, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp12_, _tmp13_, _tmp14_); self->priv->current_child_to_class_map = _tmp15_; _tmp16_ = g_direct_equal; _tmp17_ = vala_array_list_new (VALA_TYPE_CLASS, (GBoxedCopyFunc) vala_code_node_ref, (GDestroyNotify) vala_code_node_unref, _tmp16_); self->priv->current_required_app_classes = (ValaList*) _tmp17_; } static void vala_gtk_module_finalize (ValaCodeVisitor * obj) { ValaGtkModule * self; self = G_TYPE_CHECK_INSTANCE_CAST (obj, VALA_TYPE_GTK_MODULE, ValaGtkModule); _vala_map_unref0 (self->priv->type_id_to_vala_map); _vala_map_unref0 (self->priv->cclass_to_vala_map); _vala_map_unref0 (self->priv->gresource_to_file_map); _vala_map_unref0 (self->priv->handler_map); _vala_map_unref0 (self->priv->current_handler_to_property_map); _vala_map_unref0 (self->priv->current_handler_to_signal_map); _vala_map_unref0 (self->priv->current_child_to_class_map); _vala_iterable_unref0 (self->priv->current_required_app_classes); VALA_CODE_VISITOR_CLASS (vala_gtk_module_parent_class)->finalize (obj); } static GType vala_gtk_module_get_type_once (void) { static const GTypeInfo g_define_type_info = { sizeof (ValaGtkModuleClass), (GBaseInitFunc) NULL, (GBaseFinalizeFunc) NULL, (GClassInitFunc) vala_gtk_module_class_init, (GClassFinalizeFunc) NULL, NULL, sizeof (ValaGtkModule), 0, (GInstanceInitFunc) vala_gtk_module_instance_init, NULL }; GType vala_gtk_module_type_id; vala_gtk_module_type_id = g_type_register_static (VALA_TYPE_GSIGNAL_MODULE, "ValaGtkModule", &g_define_type_info, 0); ValaGtkModule_private_offset = g_type_add_instance_private (vala_gtk_module_type_id, sizeof (ValaGtkModulePrivate)); return vala_gtk_module_type_id; } GType vala_gtk_module_get_type (void) { static volatile gsize vala_gtk_module_type_id__volatile = 0; if (g_once_init_enter (&vala_gtk_module_type_id__volatile)) { GType vala_gtk_module_type_id; vala_gtk_module_type_id = vala_gtk_module_get_type_once (); g_once_init_leave (&vala_gtk_module_type_id__volatile, vala_gtk_module_type_id); } return vala_gtk_module_type_id__volatile; }
utf-8
1
LGPL-2.1+
2016-2017 Rico Tzschichholz 2008-2015 Florian Brosch 2010-2011 Luca Bruno 2010 Marc-Andre Lureau 2006-2012 Jürg Billeter 2006-2009 Raffaele Sandrini 2006-2007 Michael Lawrence 2007 Alberto Ruiz 2003-2005 Novell, Inc. 2005 Matthias Clasen 2005 David Waite 2001-2003 Mike Kestner 2003 Martin Willemoes Hansen
eclipse-cdt-9.9.0/core/org.eclipse.cdt.ui.tests/resources/constalign/rightUnchanged/After.cpp
void f(int const * const); void f(int const * const) { } int main(int argc, char **argv) { int const &dsa { 2 }; int const j { 8 }; int const * const klz; int const l { 2 }; bool yes = false; int const k { 42 }; int const volatile m = 21; using volatile_int = int; volatile_int const volatile v = 89; using const_int = int; const_int const w = 73; }
utf-8
1
EPL-2.0
2013 AdaCore and others 2009-2016 Alena Laskavaia 2009-2016 Alena Laskavaia and others 2009, 2015 Alena Laskavaia, Tomasz Wesolowski 2008-2009, 2011-2013, 2015-2016 Andrew Gvozdev 2008-2019 Andrew Gvozdev and others 2009, 2013 Andrew Gvozdev (Quoin Inc.) 2005, 2009-2013 Andrew Gvozdev (Quoin Inc.) and others 2011-2012, 2014-2015 Anton Gorenkov 2011-2013, 2015 Anton Gorenkov and others 2007-2009, 2012, 2015-2016 ARM Limited and others 2010, 2013 Atmel Corporation and others 2010, 2011 Axel Mueller and others 2006, 2017 Ben Konrath <ben@bagu.org> and others 2003, 2015 Berthold Daum and others 2004, 2011, 2016 BitMethods Inc and others 2014 BlackBerry Limited and others 2008-2013, 2015-2017 Broadcom Corporation and others 2010-2012, 2014 CodeSourcery and others 2015 Colin Leitner 2016 COSEDA Technologies GmbH 2016 COSEDA Technologies GmbH and others 2007, 2010 Dakshinamurthy Karra, IBM Corporation and others 2010 Eclipse CDT Project and others 2000, 2010, 2012, 2016 Eclipse contributors and others 2015 Ericson and others 2008-2009, 2011, 2013-2017 Ericsson 2007, 2013-2016 Ericsson AB and others 2006-2018 Ericsson and others 2015, 2017 Ericsson, EfficiOS Inc. and others 2009, 2011, 2013, 2014, Ericsson, Inc. and others 2003-2011 Fabrizio Giustina 2018 Felix Morgner 2005, 2007, 2010, 2012, 2015, 2016 Freescale Semiconductor, Inc 2007-2016 Freescale Semiconductor, Inc. and others 2010-2011, 2013, 2015 Gil Barash 2011 Google Inc 2007-2017 Google, Inc and others 2016, 2018 IAR Systems AB 2000-2019 IBM Corp. and others 2000-2001, 2005-2006, 2012-2015 IBM Corporation 2000, 2012 IBM Corporation, Freescale Semiconductor and others 2000, 2003, 2009, 2011, 2013 IBM Corporation, QNX Software Systems, and others 2008, 2009, 2014 IBM Wind River Systems, Inc. and others 2016 Ingenico 2005, 2011 Innoopract Informationssysteme GmbH and others 2014, 2017 Institute for Software 2006-2019 Institute for Software, HSR Hochschule fuer Technik 2005-2007 Intel Corporation 2004-2019 Intel Corporation and others 2007-2010, 2012, 2015 Intel Corporation, QNX Software Systems, and others 2011 Jeff Johnston (Red Hat Inc.) and others 2011-2012, 2014 Jens Elmenthaler and others 2018 Kichwa Coders 2016 Kichwa Coders AB and others 2015-2019 Kichwa Coders and others 2019 Kichwa Coders Canada and others 2011, 2014, 2017 Kichwa Coders Ltd and others 2018 Manish Khurana , Nathan Ridge and others 2011-2012, 2018-2019 Marc-Andre Laperle 2010-2016, 2018-2019 Marc-Andre Laperle and others 2012 Marc-Andre Laperle, Inc. and others 2019 Marco Stornelli 2012, 2014, 2016 Mathias Kunter and others 2010-2012 Meisam Fathi and others 2011-2016, 2018-2019 Mentor Graphics and others 2015 Mentor Graphics Corporation 2010, 2012, 2015, 2016 Mentor Graphics Corporation and others 2008-2009, 2015 MontaVista Software, Inc. and others 2012-2017 Nathan Ridge 2013-2018 Nathan Ridge and others 2006-2016, 2018-2019 Nokia and others 2005, 2008-2009, 2011-2012, 2015-2016, Nokia Corporation 2008, 2015 NOKIA Inc 2010 Nokia, Inc. and others 2010-2016, Nokia Siemens Networks Oyj, Finland 2010, 2013 Nokia Siemens Networks Oyj, Finland and others 2006, 2008, 2014, 2016 Norbert Ploett and others 2011 Obeo 2006-2007, 2009-2010, 2012, 2016, 2018 PalmSource, Inc. and others 2006, 2010 PalmSource, Inc., Wind River Systems, Inc. and others 2015 Patrick Hofer 2011-2012, 2015 Patrick Hofer and others 2017 Pavel Marek 2000, 2002-2019 QNX Software Systems and others 2010, 2015 QNX Software Systems, Freescale Semiconductor and others 2002-2003, 2005-2006, 2009-2016 QNX Software Systems Ltd. 2002-2004, 2010-2011, 2016 Rational Software Corporation and others 2006-2018 Red Hat, Inc 2006-2019 Red Hat Inc. and others 2014, 2017 Renesas Electronics and others 2010, 2012 Sage Electronic Engineering and others 2012, 2014 Sage Electronic Engineering, LLC. and others 2013 Sebastian Bauer and others 2013 Serge Beauchamp and others 2010, 2013 Severin Gehwolf 2006, 2010-2011, 2013, Siemens AG 2006, 2012, 2017 Siemens AG and others 2017 Simeon Andreev and others 2000, 2004-2005, 2019 Space Codesign Systems and others 2011 Stefan Ghiaus 2011, 2017 Stefan Ghiaus and others 2017, 2018 STMicroelectronics and others 2006, 2015 Symbian Corporation and others 2005-2012 Symbian Ltd and others 2006, 2012-2013 Symbian Software and others 2005-2008, 2014 Symbian Software Limited 2005, 2007-2017 Symbian Software Limited and others 2006-2017 Symbian Software Systems and others 2011 Tensilica and others 2009, 2012 Texas Instruments 2010, 2015-2016 Texas Instruments, Freescale Semiconductor and others 2005-2006, 2009-2012, 2014-2016 Texas Instruments Inc. and others 2009, 2011-2016 Tilera Corporation and others 2004, 2006, 2010-2012, 2015, 2017 TimeSys Corporation and others 2010, 2012-2013, 2017 Tomasz Wesolowski 2010-2013, 2015-2016 Tomasz Wesolowski and others 2010, 2017 TUBITAK BILGEM-ITI and others 2010, 2011 University of Applied Sciences Rapperswil (HSR) 2010 University of Florida 2010-2011 University of Florida and others 2012 Veaceslav Bacu (Freescale Semiconductor Inc.) and others 2010-2011 Verigy and others 2018 Vlad Ivanov 2014 vogella GmbH and others 2015 Wei Li 2004-2006, 2008-2009, 2012, 2014-2015, 2018, Wind River Systems, Inc 2002, 2004-2018 Wind River Systems, Inc. and others 2006, 2015 Wind River Systems, Nokia and others 2009, 2014 Zeligsoft Limited and others
clhep-2.1.4.1+dfsg/Matrix/src/SymMatrix.cc
// -*- C++ -*- // --------------------------------------------------------------------------- // // This file is a part of the CLHEP - a Class Library for High Energy Physics. // #ifdef GNUPRAGMA #pragma implementation #endif #include <string.h> #include <float.h> // for DBL_EPSILON #include "CLHEP/Matrix/defs.h" #include "CLHEP/Random/Random.h" #include "CLHEP/Matrix/SymMatrix.h" #include "CLHEP/Matrix/Matrix.h" #include "CLHEP/Matrix/DiagMatrix.h" #include "CLHEP/Matrix/Vector.h" #ifdef HEP_DEBUG_INLINE #include "CLHEP/Matrix/SymMatrix.icc" #endif namespace CLHEP { // Simple operation for all elements #define SIMPLE_UOP(OPER) \ HepMatrix::mIter a=m.begin(); \ HepMatrix::mIter e=m.begin()+num_size(); \ for(;a<e; a++) (*a) OPER t; #define SIMPLE_BOP(OPER) \ HepMatrix::mIter a=m.begin(); \ HepMatrix::mcIter b=hm2.m.begin(); \ HepMatrix::mcIter e=m.begin()+num_size(); \ for(;a<e; a++, b++) (*a) OPER (*b); #define SIMPLE_TOP(OPER) \ HepMatrix::mcIter a=hm1.m.begin(); \ HepMatrix::mcIter b=hm2.m.begin(); \ HepMatrix::mIter t=mret.m.begin(); \ HepMatrix::mcIter e=hm1.m.begin()+hm1.num_size(); \ for( ;a<e; a++, b++, t++) (*t) = (*a) OPER (*b); #define CHK_DIM_2(r1,r2,c1,c2,fun) \ if (r1!=r2 || c1!=c2) { \ HepGenMatrix::error("Range error in SymMatrix function " #fun "(1)."); \ } #define CHK_DIM_1(c1,r2,fun) \ if (c1!=r2) { \ HepGenMatrix::error("Range error in SymMatrix function " #fun "(2)."); \ } // Constructors. (Default constructors are inlined and in .icc file) HepSymMatrix::HepSymMatrix(int p) : m(p*(p+1)/2), nrow(p) { size_ = nrow * (nrow+1) / 2; m.assign(size_,0); } HepSymMatrix::HepSymMatrix(int p, int init) : m(p*(p+1)/2), nrow(p) { size_ = nrow * (nrow+1) / 2; m.assign(size_,0); switch(init) { case 0: break; case 1: { HepMatrix::mIter a; for(int i=0;i<nrow;++i) { a = m.begin() + (i+1)*i/2 + i; *a = 1.0; } break; } default: error("SymMatrix: initialization must be either 0 or 1."); } } HepSymMatrix::HepSymMatrix(int p, HepRandom &r) : m(p*(p+1)/2), nrow(p) { size_ = nrow * (nrow+1) / 2; HepMatrix::mIter a = m.begin(); HepMatrix::mIter b = m.begin() + size_; for(;a<b;a++) *a = r(); } // // Destructor // HepSymMatrix::~HepSymMatrix() { } HepSymMatrix::HepSymMatrix(const HepSymMatrix &hm1) : HepGenMatrix(hm1), m(hm1.size_), nrow(hm1.nrow), size_(hm1.size_) { m = hm1.m; } HepSymMatrix::HepSymMatrix(const HepDiagMatrix &hm1) : m(hm1.nrow*(hm1.nrow+1)/2), nrow(hm1.nrow) { size_ = nrow * (nrow+1) / 2; int n = num_row(); m.assign(size_,0); HepMatrix::mIter mrr = m.begin(); HepMatrix::mcIter mr = hm1.m.begin(); for(int r=1;r<=n;r++) { *mrr = *(mr++); if(r<n) mrr += (r+1); } } // // // Sub matrix // // HepSymMatrix HepSymMatrix::sub(int min_row, int max_row) const #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(max_row-min_row+1); { #else { HepSymMatrix mret(max_row-min_row+1); #endif if(max_row > num_row()) error("HepSymMatrix::sub: Index out of range"); HepMatrix::mIter a = mret.m.begin(); HepMatrix::mcIter b1 = m.begin() + (min_row+2)*(min_row-1)/2; int rowsize=mret.num_row(); for(int irow=1; irow<=rowsize; irow++) { HepMatrix::mcIter b = b1; for(int icol=0; icol<irow; ++icol) { *(a++) = *(b++); } if(irow<rowsize) b1 += irow+min_row-1; } return mret; } HepSymMatrix HepSymMatrix::sub(int min_row, int max_row) { HepSymMatrix mret(max_row-min_row+1); if(max_row > num_row()) error("HepSymMatrix::sub: Index out of range"); HepMatrix::mIter a = mret.m.begin(); HepMatrix::mIter b1 = m.begin() + (min_row+2)*(min_row-1)/2; int rowsize=mret.num_row(); for(int irow=1; irow<=rowsize; irow++) { HepMatrix::mIter b = b1; for(int icol=0; icol<irow; ++icol) { *(a++) = *(b++); } if(irow<rowsize) b1 += irow+min_row-1; } return mret; } void HepSymMatrix::sub(int row,const HepSymMatrix &hm1) { if(row <1 || row+hm1.num_row()-1 > num_row() ) error("HepSymMatrix::sub: Index out of range"); HepMatrix::mcIter a = hm1.m.begin(); HepMatrix::mIter b1 = m.begin() + (row+2)*(row-1)/2; int rowsize=hm1.num_row(); for(int irow=1; irow<=rowsize; ++irow) { HepMatrix::mIter b = b1; for(int icol=0; icol<irow; ++icol) { *(b++) = *(a++); } if(irow<rowsize) b1 += irow+row-1; } } // // Direct sum of two matricies // HepSymMatrix dsum(const HepSymMatrix &hm1, const HepSymMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row() + hm2.num_row(), 0); { #else { HepSymMatrix mret(hm1.num_row() + hm2.num_row(), 0); #endif mret.sub(1,hm1); mret.sub(hm1.num_row()+1,hm2); return mret; } /* ----------------------------------------------------------------------- This section contains support routines for matrix.h. This section contains The two argument functions +,-. They call the copy constructor and +=,-=. ----------------------------------------------------------------------- */ HepSymMatrix HepSymMatrix::operator- () const #ifdef HEP_GNU_OPTIMIZED_RETURN return hm2(nrow); { #else { HepSymMatrix hm2(nrow); #endif HepMatrix::mcIter a=m.begin(); HepMatrix::mIter b=hm2.m.begin(); HepMatrix::mcIter e=m.begin()+num_size(); for(;a<e; a++, b++) (*b) = -(*a); return hm2; } HepMatrix operator+(const HepMatrix &hm1,const HepSymMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1); { #else { HepMatrix mret(hm1); #endif CHK_DIM_2(hm1.num_row(),hm2.num_row(), hm1.num_col(),hm2.num_col(),+); mret += hm2; return mret; } HepMatrix operator+(const HepSymMatrix &hm1,const HepMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm2); { #else { HepMatrix mret(hm2); #endif CHK_DIM_2(hm1.num_row(),hm2.num_row(),hm1.num_col(),hm2.num_col(),+); mret += hm1; return mret; } HepSymMatrix operator+(const HepSymMatrix &hm1,const HepSymMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.nrow); { #else { HepSymMatrix mret(hm1.nrow); #endif CHK_DIM_1(hm1.nrow, hm2.nrow,+); SIMPLE_TOP(+) return mret; } // // operator - // HepMatrix operator-(const HepMatrix &hm1,const HepSymMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1); { #else { HepMatrix mret(hm1); #endif CHK_DIM_2(hm1.num_row(),hm2.num_row(), hm1.num_col(),hm2.num_col(),-); mret -= hm2; return mret; } HepMatrix operator-(const HepSymMatrix &hm1,const HepMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1); { #else { HepMatrix mret(hm1); #endif CHK_DIM_2(hm1.num_row(),hm2.num_row(), hm1.num_col(),hm2.num_col(),-); mret -= hm2; return mret; } HepSymMatrix operator-(const HepSymMatrix &hm1,const HepSymMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row()); { #else { HepSymMatrix mret(hm1.num_row()); #endif CHK_DIM_1(hm1.num_row(),hm2.num_row(),-); SIMPLE_TOP(-) return mret; } /* ----------------------------------------------------------------------- This section contains support routines for matrix.h. This file contains The two argument functions *,/. They call copy constructor and then /=,*=. ----------------------------------------------------------------------- */ HepSymMatrix operator/( const HepSymMatrix &hm1,double t) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1); { #else { HepSymMatrix mret(hm1); #endif mret /= t; return mret; } HepSymMatrix operator*(const HepSymMatrix &hm1,double t) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1); { #else { HepSymMatrix mret(hm1); #endif mret *= t; return mret; } HepSymMatrix operator*(double t,const HepSymMatrix &hm1) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1); { #else { HepSymMatrix mret(hm1); #endif mret *= t; return mret; } HepMatrix operator*(const HepMatrix &hm1,const HepSymMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row(),hm2.num_col()); { #else { HepMatrix mret(hm1.num_row(),hm2.num_col()); #endif CHK_DIM_1(hm1.num_col(),hm2.num_row(),*); HepMatrix::mcIter mit1, mit2, sp,snp; //mit2=0 double temp; HepMatrix::mIter mir=mret.m.begin(); for(mit1=hm1.m.begin(); mit1<hm1.m.begin()+hm1.num_row()*hm1.num_col(); mit1 = mit2) { snp=hm2.m.begin(); for(int step=1;step<=hm2.num_row();++step) { mit2=mit1; sp=snp; snp+=step; temp=0; while(sp<snp) temp+=*(sp++)*(*(mit2++)); if( step<hm2.num_row() ) { // only if we aren't on the last row sp+=step-1; for(int stept=step+1;stept<=hm2.num_row();stept++) { temp+=*sp*(*(mit2++)); if(stept<hm2.num_row()) sp+=stept; } } // if(step *(mir++)=temp; } // for(step } // for(mit1 return mret; } HepMatrix operator*(const HepSymMatrix &hm1,const HepMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row(),hm2.num_col()); { #else { HepMatrix mret(hm1.num_row(),hm2.num_col()); #endif CHK_DIM_1(hm1.num_col(),hm2.num_row(),*); int step,stept; HepMatrix::mcIter mit1,mit2,sp,snp; double temp; HepMatrix::mIter mir=mret.m.begin(); for(step=1,snp=hm1.m.begin();step<=hm1.num_row();snp+=step++) for(mit1=hm2.m.begin();mit1<hm2.m.begin()+hm2.num_col();mit1++) { mit2=mit1; sp=snp; temp=0; while(sp<snp+step) { temp+=*mit2*(*(sp++)); if( hm2.num_size()-(mit2-hm2.m.begin())>hm2.num_col() ){ mit2+=hm2.num_col(); } } if(step<hm1.num_row()) { // only if we aren't on the last row sp+=step-1; for(stept=step+1;stept<=hm1.num_row();stept++) { temp+=*mit2*(*sp); if(stept<hm1.num_row()) { mit2+=hm2.num_col(); sp+=stept; } } } // if(step *(mir++)=temp; } // for(mit1 return mret; } HepMatrix operator*(const HepSymMatrix &hm1,const HepSymMatrix &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row(),hm1.num_row()); { #else { HepMatrix mret(hm1.num_row(),hm1.num_row()); #endif CHK_DIM_1(hm1.num_col(),hm2.num_row(),*); int step1,stept1,step2,stept2; HepMatrix::mcIter snp1,sp1,snp2,sp2; double temp; HepMatrix::mIter mr = mret.m.begin(); snp1=hm1.m.begin(); for(step1=1;step1<=hm1.num_row();++step1) { snp2=hm2.m.begin(); for(step2=1;step2<=hm2.num_row();++step2) { sp1=snp1; sp2=snp2; snp2+=step2; temp=0; if(step1<step2) { while(sp1<snp1+step1) { temp+=(*(sp1++))*(*(sp2++)); } sp1+=step1-1; for(stept1=step1+1;stept1!=step2+1;++stept1) { temp+=(*sp1)*(*(sp2++)); if(stept1<hm2.num_row()) sp1+=stept1; } if(step2<hm2.num_row()) { // only if we aren't on the last row sp2+=step2-1; for(stept2=step2+1;stept2<=hm2.num_row();stept1++,stept2++) { temp+=(*sp1)*(*sp2); if(stept2<hm2.num_row()) { sp1+=stept1; sp2+=stept2; } } // for(stept2 } // if(step2 } // step1<step2 else { while(sp2<snp2) { temp+=(*(sp1++))*(*(sp2++)); } if(step2<hm2.num_row()) { // only if we aren't on the last row sp2+=step2-1; for(stept2=step2+1;stept2!=step1+1;stept2++) { temp+=(*(sp1++))*(*sp2); if(stept2<hm1.num_row()) sp2+=stept2; } if(step1<hm1.num_row()) { // only if we aren't on the last row sp1+=step1-1; for(stept1=step1+1;stept1<=hm1.num_row();stept1++,stept2++) { temp+=(*sp1)*(*sp2); if(stept1<hm1.num_row()) { sp1+=stept1; sp2+=stept2; } } // for(stept1 } // if(step1 } // if(step2 } // else *(mr++)=temp; } // for(step2 if(step1<hm1.num_row()) snp1+=step1; } // for(step1 return mret; } HepVector operator*(const HepSymMatrix &hm1,const HepVector &hm2) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row()); { #else { HepVector mret(hm1.num_row()); #endif CHK_DIM_1(hm1.num_col(),hm2.num_row(),*); HepMatrix::mcIter sp,snp,vpt; double temp; int step,stept; HepMatrix::mIter vrp=mret.m.begin(); for(step=1,snp=hm1.m.begin();step<=hm1.num_row();++step) { sp=snp; vpt=hm2.m.begin(); snp+=step; temp=0; while(sp<snp) temp+=*(sp++)*(*(vpt++)); if(step<hm1.num_row()) sp+=step-1; for(stept=step+1;stept<=hm1.num_row();stept++) { temp+=*sp*(*(vpt++)); if(stept<hm1.num_row()) sp+=stept; } *(vrp++)=temp; } // for(step return mret; } HepSymMatrix vT_times_v(const HepVector &v) #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(v.num_row()); { #else { HepSymMatrix mret(v.num_row()); #endif HepMatrix::mIter mr=mret.m.begin(); HepMatrix::mcIter vt1,vt2; for(vt1=v.m.begin();vt1<v.m.begin()+v.num_row();vt1++) for(vt2=v.m.begin();vt2<=vt1;vt2++) *(mr++)=(*vt1)*(*vt2); return mret; } /* ----------------------------------------------------------------------- This section contains the assignment and inplace operators =,+=,-=,*=,/=. ----------------------------------------------------------------------- */ HepMatrix & HepMatrix::operator+=(const HepSymMatrix &hm2) { CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),+=); HepMatrix::mcIter sjk = hm2.m.begin(); // j >= k for(int j=0; j!=nrow; ++j) { for(int k=0; k<=j; ++k) { m[j*ncol+k] += *sjk; // make sure this is not a diagonal element if(k!=j) m[k*nrow+j] += *sjk; ++sjk; } } return (*this); } HepSymMatrix & HepSymMatrix::operator+=(const HepSymMatrix &hm2) { CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),+=); SIMPLE_BOP(+=) return (*this); } HepMatrix & HepMatrix::operator-=(const HepSymMatrix &hm2) { CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),-=); HepMatrix::mcIter sjk = hm2.m.begin(); // j >= k for(int j=0; j!=nrow; ++j) { for(int k=0; k<=j; ++k) { m[j*ncol+k] -= *sjk; // make sure this is not a diagonal element if(k!=j) m[k*nrow+j] -= *sjk; ++sjk; } } return (*this); } HepSymMatrix & HepSymMatrix::operator-=(const HepSymMatrix &hm2) { CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),-=); SIMPLE_BOP(-=) return (*this); } HepSymMatrix & HepSymMatrix::operator/=(double t) { SIMPLE_UOP(/=) return (*this); } HepSymMatrix & HepSymMatrix::operator*=(double t) { SIMPLE_UOP(*=) return (*this); } HepMatrix & HepMatrix::operator=(const HepSymMatrix &hm1) { // define size, rows, and columns of *this nrow = ncol = hm1.nrow; if(nrow*ncol != size_) { size_ = nrow*ncol; m.resize(size_); } // begin copy mcIter sjk = hm1.m.begin(); // j >= k for(int j=0; j!=nrow; ++j) { for(int k=0; k<=j; ++k) { m[j*ncol+k] = *sjk; // we could copy the diagonal element twice or check // doing the check may be a tiny bit faster, // so we choose that option for now if(k!=j) m[k*nrow+j] = *sjk; ++sjk; } } return (*this); } HepSymMatrix & HepSymMatrix::operator=(const HepSymMatrix &hm1) { if(hm1.nrow != nrow) { nrow = hm1.nrow; size_ = hm1.size_; m.resize(size_); } m = hm1.m; return (*this); } HepSymMatrix & HepSymMatrix::operator=(const HepDiagMatrix &hm1) { if(hm1.nrow != nrow) { nrow = hm1.nrow; size_ = nrow * (nrow+1) / 2; m.resize(size_); } m.assign(size_,0); HepMatrix::mIter mrr = m.begin(); HepMatrix::mcIter mr = hm1.m.begin(); for(int r=1; r<=nrow; r++) { *mrr = *(mr++); if(r<nrow) mrr += (r+1); } return (*this); } // Print the Matrix. std::ostream& operator<<(std::ostream &os, const HepSymMatrix &q) { os << std::endl; /* Fixed format needs 3 extra characters for field, while scientific needs 7 */ int width; if(os.flags() & std::ios::fixed) width = os.precision()+3; else width = os.precision()+7; for(int irow = 1; irow<= q.num_row(); irow++) { for(int icol = 1; icol <= q.num_col(); icol++) { os.width(width); os << q(irow,icol) << " "; } os << std::endl; } return os; } HepSymMatrix HepSymMatrix:: apply(double (*f)(double, int, int)) const #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(num_row()); { #else { HepSymMatrix mret(num_row()); #endif HepMatrix::mcIter a = m.begin(); HepMatrix::mIter b = mret.m.begin(); for(int ir=1;ir<=num_row();ir++) { for(int ic=1;ic<=ir;ic++) { *(b++) = (*f)(*(a++), ir, ic); } } return mret; } void HepSymMatrix::assign (const HepMatrix &hm1) { if(hm1.nrow != nrow) { nrow = hm1.nrow; size_ = nrow * (nrow+1) / 2; m.resize(size_); } HepMatrix::mcIter a = hm1.m.begin(); HepMatrix::mIter b = m.begin(); for(int r=1;r<=nrow;r++) { HepMatrix::mcIter d = a; for(int c=1;c<=r;c++) { *(b++) = *(d++); } if(r<nrow) a += nrow; } } HepSymMatrix HepSymMatrix::similarity(const HepMatrix &hm1) const #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row()); { #else { HepSymMatrix mret(hm1.num_row()); #endif HepMatrix temp = hm1*(*this); // If hm1*(*this) has correct dimensions, then so will the hm1.T multiplication. // So there is no need to check dimensions again. int n = hm1.num_col(); HepMatrix::mIter mr = mret.m.begin(); HepMatrix::mIter tempr1 = temp.m.begin(); for(int r=1;r<=mret.num_row();r++) { HepMatrix::mcIter hm1c1 = hm1.m.begin(); for(int c=1;c<=r;c++) { register double tmp = 0.0; HepMatrix::mIter tempri = tempr1; HepMatrix::mcIter hm1ci = hm1c1; for(int i=1;i<=hm1.num_col();i++) { tmp+=(*(tempri++))*(*(hm1ci++)); } *(mr++) = tmp; hm1c1 += n; } tempr1 += n; } return mret; } HepSymMatrix HepSymMatrix::similarity(const HepSymMatrix &hm1) const #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_row()); { #else { HepSymMatrix mret(hm1.num_row()); #endif HepMatrix temp = hm1*(*this); int n = hm1.num_col(); HepMatrix::mIter mr = mret.m.begin(); HepMatrix::mIter tempr1 = temp.m.begin(); for(int r=1;r<=mret.num_row();r++) { HepMatrix::mcIter hm1c1 = hm1.m.begin(); int c; for(c=1;c<=r;c++) { register double tmp = 0.0; HepMatrix::mIter tempri = tempr1; HepMatrix::mcIter hm1ci = hm1c1; int i; for(i=1;i<c;i++) { tmp+=(*(tempri++))*(*(hm1ci++)); } for(i=c;i<=hm1.num_col();i++) { tmp+=(*(tempri++))*(*(hm1ci)); if(i<hm1.num_col()) hm1ci += i; } *(mr++) = tmp; hm1c1 += c; } tempr1 += n; } return mret; } double HepSymMatrix::similarity(const HepVector &hm1) const { register double mret = 0.0; HepVector temp = (*this) *hm1; // If hm1*(*this) has correct dimensions, then so will the hm1.T multiplication. // So there is no need to check dimensions again. HepMatrix::mIter a=temp.m.begin(); HepMatrix::mcIter b=hm1.m.begin(); HepMatrix::mIter e=a+hm1.num_row(); for(;a<e;) mret += (*(a++)) * (*(b++)); return mret; } HepSymMatrix HepSymMatrix::similarityT(const HepMatrix &hm1) const #ifdef HEP_GNU_OPTIMIZED_RETURN return mret(hm1.num_col()); { #else { HepSymMatrix mret(hm1.num_col()); #endif HepMatrix temp = (*this)*hm1; int n = hm1.num_col(); HepMatrix::mIter mrc = mret.m.begin(); HepMatrix::mIter temp1r = temp.m.begin(); for(int r=1;r<=mret.num_row();r++) { HepMatrix::mcIter m11c = hm1.m.begin(); for(int c=1;c<=r;c++) { register double tmp = 0.0; for(int i=1;i<=hm1.num_row();i++) { HepMatrix::mIter tempir = temp1r + n*(i-1); HepMatrix::mcIter hm1ic = m11c + n*(i-1); tmp+=(*(tempir))*(*(hm1ic)); } *(mrc++) = tmp; m11c++; } temp1r++; } return mret; } void HepSymMatrix::invert(int &ifail) { ifail = 0; switch(nrow) { case 3: { double det, temp; double t1, t2, t3; double c11,c12,c13,c22,c23,c33; c11 = (*(m.begin()+2)) * (*(m.begin()+5)) - (*(m.begin()+4)) * (*(m.begin()+4)); c12 = (*(m.begin()+4)) * (*(m.begin()+3)) - (*(m.begin()+1)) * (*(m.begin()+5)); c13 = (*(m.begin()+1)) * (*(m.begin()+4)) - (*(m.begin()+2)) * (*(m.begin()+3)); c22 = (*(m.begin()+5)) * (*m.begin()) - (*(m.begin()+3)) * (*(m.begin()+3)); c23 = (*(m.begin()+3)) * (*(m.begin()+1)) - (*(m.begin()+4)) * (*m.begin()); c33 = (*m.begin()) * (*(m.begin()+2)) - (*(m.begin()+1)) * (*(m.begin()+1)); t1 = fabs(*m.begin()); t2 = fabs(*(m.begin()+1)); t3 = fabs(*(m.begin()+3)); if (t1 >= t2) { if (t3 >= t1) { temp = *(m.begin()+3); det = c23*c12-c22*c13; } else { temp = *m.begin(); det = c22*c33-c23*c23; } } else if (t3 >= t2) { temp = *(m.begin()+3); det = c23*c12-c22*c13; } else { temp = *(m.begin()+1); det = c13*c23-c12*c33; } if (det==0) { ifail = 1; return; } { double ds = temp/det; HepMatrix::mIter hmm = m.begin(); *(hmm++) = ds*c11; *(hmm++) = ds*c12; *(hmm++) = ds*c22; *(hmm++) = ds*c13; *(hmm++) = ds*c23; *(hmm) = ds*c33; } } break; case 2: { double det, temp, ds; det = (*m.begin())*(*(m.begin()+2)) - (*(m.begin()+1))*(*(m.begin()+1)); if (det==0) { ifail = 1; return; } ds = 1.0/det; *(m.begin()+1) *= -ds; temp = ds*(*(m.begin()+2)); *(m.begin()+2) = ds*(*m.begin()); *m.begin() = temp; break; } case 1: { if ((*m.begin())==0) { ifail = 1; return; } *m.begin() = 1.0/(*m.begin()); break; } case 5: { invert5(ifail); return; } case 6: { invert6(ifail); return; } case 4: { invert4(ifail); return; } default: { invertBunchKaufman(ifail); return; } } return; // inversion successful } double HepSymMatrix::determinant() const { static const int max_array = 20; // ir must point to an array which is ***1 longer than*** nrow static std::vector<int> ir_vec (max_array+1); if (ir_vec.size() <= static_cast<unsigned int>(nrow)) ir_vec.resize(nrow+1); int * ir = &ir_vec[0]; double det; HepMatrix mt(*this); int i = mt.dfact_matrix(det, ir); if(i==0) return det; return 0.0; } double HepSymMatrix::trace() const { double t = 0.0; for (int i=0; i<nrow; i++) t += *(m.begin() + (i+3)*i/2); return t; } void HepSymMatrix::invertBunchKaufman(int &ifail) { // Bunch-Kaufman diagonal pivoting method // It is decribed in J.R. Bunch, L. Kaufman (1977). // "Some Stable Methods for Calculating Inertia and Solving Symmetric // Linear Systems", Math. Comp. 31, p. 162-179. or in Gene H. Golub, // Charles F. van Loan, "Matrix Computations" (the second edition // has a bug.) and implemented in "lapack" // Mario Stanke, 09/97 int i, j, k, is; int pivrow; // Establish the two working-space arrays needed: x and piv are // used as pointers to arrays of doubles and ints respectively, each // of length nrow. We do not want to reallocate each time through // unless the size needs to grow. We do not want to leak memory, even // by having a new without a delete that is only done once. static const int max_array = 25; #ifdef DISABLE_ALLOC static std::vector<double> xvec (max_array); static std::vector<int> pivv (max_array); typedef std::vector<int>::iterator pivIter; #else static std::vector<double,Alloc<double,25> > xvec (max_array); static std::vector<int, Alloc<int, 25> > pivv (max_array); typedef std::vector<int,Alloc<int,25> >::iterator pivIter; #endif if (xvec.size() < static_cast<unsigned int>(nrow)) xvec.resize(nrow); if (pivv.size() < static_cast<unsigned int>(nrow)) pivv.resize(nrow); // Note - resize shuld do nothing if the size is already larger than nrow, // but on VC++ there are indications that it does so we check. // Note - the data elements in a vector are guaranteed to be contiguous, // so x[i] and piv[i] are optimally fast. mIter x = xvec.begin(); // x[i] is used as helper storage, needs to have at least size nrow. pivIter piv = pivv.begin(); // piv[i] is used to store details of exchanges double temp1, temp2; HepMatrix::mIter ip, mjj, iq; double lambda, sigma; const double alpha = .6404; // = (1+sqrt(17))/8 const double epsilon = 32*DBL_EPSILON; // whenever a sum of two doubles is below or equal to epsilon // it is set to zero. // this constant could be set to zero but then the algorithm // doesn't neccessarily detect that a matrix is singular for (i = 0; i < nrow; ++i) piv[i] = i+1; ifail = 0; // compute the factorization P*A*P^T = L * D * L^T // L is unit lower triangular, D is direct sum of 1x1 and 2x2 matrices // L and D^-1 are stored in A = *this, P is stored in piv[] for (j=1; j < nrow; j+=is) // main loop over columns { mjj = m.begin() + j*(j-1)/2 + j-1; lambda = 0; // compute lambda = max of A(j+1:n,j) pivrow = j+1; //ip = m.begin() + (j+1)*j/2 + j-1; for (i=j+1; i <= nrow ; ++i) { // calculate ip to avoid going off end of storage array ip = m.begin() + (i-1)*i/2 + j-1; if (fabs(*ip) > lambda) { lambda = fabs(*ip); pivrow = i; } } // for i if (lambda == 0 ) { if (*mjj == 0) { ifail = 1; return; } is=1; *mjj = 1./ *mjj; } else { // lambda == 0 if (fabs(*mjj) >= lambda*alpha) { is=1; pivrow=j; } else { // fabs(*mjj) >= lambda*alpha sigma = 0; // compute sigma = max A(pivrow, j:pivrow-1) ip = m.begin() + pivrow*(pivrow-1)/2+j-1; for (k=j; k < pivrow; k++) { if (fabs(*ip) > sigma) sigma = fabs(*ip); ip++; } // for k if (sigma * fabs(*mjj) >= alpha * lambda * lambda) { is=1; pivrow = j; } else if (fabs(*(m.begin()+pivrow*(pivrow-1)/2+pivrow-1)) >= alpha * sigma) { is=1; } else { is=2; } // if sigma... } // fabs(*mjj) >= lambda*alpha if (pivrow == j) { // no permutation neccessary piv[j-1] = pivrow; if (*mjj == 0) { ifail=1; return; } temp2 = *mjj = 1./ *mjj; // invert D(j,j) // update A(j+1:n, j+1,n) for (i=j+1; i <= nrow; i++) { temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2; ip = m.begin()+i*(i-1)/2+j; for (k=j+1; k<=i; k++) { *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1); if (fabs(*ip) <= epsilon) *ip=0; ip++; } } // for i // update L //ip = m.begin() + (j+1)*j/2 + j-1; for (i=j+1; i <= nrow; ++i) { // calculate ip to avoid going off end of storage array ip = m.begin() + (i-1)*i/2 + j-1; *ip *= temp2; } } else if (is==1) { // 1x1 pivot piv[j-1] = pivrow; // interchange rows and columns j and pivrow in // submatrix (j:n,j:n) ip = m.begin() + pivrow*(pivrow-1)/2 + j; for (i=j+1; i < pivrow; i++, ip++) { temp1 = *(m.begin() + i*(i-1)/2 + j-1); *(m.begin() + i*(i-1)/2 + j-1)= *ip; *ip = temp1; } // for i temp1 = *mjj; *mjj = *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1); *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1) = temp1; ip = m.begin() + (pivrow+1)*pivrow/2 + j-1; iq = ip + pivrow-j; for (i = pivrow+1; i <= nrow; ip += i, iq += i++) { temp1 = *iq; *iq = *ip; *ip = temp1; } // for i if (*mjj == 0) { ifail = 1; return; } // *mjj == 0 temp2 = *mjj = 1./ *mjj; // invert D(j,j) // update A(j+1:n, j+1:n) for (i = j+1; i <= nrow; i++) { temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2; ip = m.begin()+i*(i-1)/2+j; for (k=j+1; k<=i; k++) { *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1); if (fabs(*ip) <= epsilon) *ip=0; ip++; } // for k } // for i // update L //ip = m.begin() + (j+1)*j/2 + j-1; for (i=j+1; i <= nrow; ++i) { // calculate ip to avoid going off end of storage array ip = m.begin() + (i-1)*i/2 + j-1; *ip *= temp2; } } else { // is=2, ie use a 2x2 pivot piv[j-1] = -pivrow; piv[j] = 0; // that means this is the second row of a 2x2 pivot if (j+1 != pivrow) { // interchange rows and columns j+1 and pivrow in // submatrix (j:n,j:n) ip = m.begin() + pivrow*(pivrow-1)/2 + j+1; for (i=j+2; i < pivrow; i++, ip++) { temp1 = *(m.begin() + i*(i-1)/2 + j); *(m.begin() + i*(i-1)/2 + j) = *ip; *ip = temp1; } // for i temp1 = *(mjj + j + 1); *(mjj + j + 1) = *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1); *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1; temp1 = *(mjj + j); *(mjj + j) = *(m.begin() + pivrow*(pivrow-1)/2 + j-1); *(m.begin() + pivrow*(pivrow-1)/2 + j-1) = temp1; ip = m.begin() + (pivrow+1)*pivrow/2 + j; iq = ip + pivrow-(j+1); for (i = pivrow+1; i <= nrow; ip += i, iq += i++) { temp1 = *iq; *iq = *ip; *ip = temp1; } // for i } // j+1 != pivrow // invert D(j:j+1,j:j+1) temp2 = *mjj * *(mjj + j + 1) - *(mjj + j) * *(mjj + j); if (temp2 == 0) { std::cerr << "SymMatrix::bunch_invert: error in pivot choice" << std::endl; } temp2 = 1. / temp2; // this quotient is guaranteed to exist by the choice // of the pivot temp1 = *mjj; *mjj = *(mjj + j + 1) * temp2; *(mjj + j + 1) = temp1 * temp2; *(mjj + j) = - *(mjj + j) * temp2; if (j < nrow-1) { // otherwise do nothing // update A(j+2:n, j+2:n) for (i=j+2; i <= nrow ; i++) { ip = m.begin() + i*(i-1)/2 + j-1; temp1 = *ip * *mjj + *(ip + 1) * *(mjj + j); if (fabs(temp1 ) <= epsilon) temp1 = 0; temp2 = *ip * *(mjj + j) + *(ip + 1) * *(mjj + j + 1); if (fabs(temp2 ) <= epsilon) temp2 = 0; for (k = j+2; k <= i ; k++) { ip = m.begin() + i*(i-1)/2 + k-1; iq = m.begin() + k*(k-1)/2 + j-1; *ip -= temp1 * *iq + temp2 * *(iq+1); if (fabs(*ip) <= epsilon) *ip = 0; } // for k } // for i // update L for (i=j+2; i <= nrow ; i++) { ip = m.begin() + i*(i-1)/2 + j-1; temp1 = *ip * *mjj + *(ip+1) * *(mjj + j); if (fabs(temp1) <= epsilon) temp1 = 0; *(ip+1) = *ip * *(mjj + j) + *(ip+1) * *(mjj + j + 1); if (fabs(*(ip+1)) <= epsilon) *(ip+1) = 0; *ip = temp1; } // for k } // j < nrow-1 } } } // end of main loop over columns if (j == nrow) { // the the last pivot is 1x1 mjj = m.begin() + j*(j-1)/2 + j-1; if (*mjj == 0) { ifail = 1; return; } else { *mjj = 1. / *mjj; } } // end of last pivot code // computing the inverse from the factorization for (j = nrow ; j >= 1 ; j -= is) // loop over columns { mjj = m.begin() + j*(j-1)/2 + j-1; if (piv[j-1] > 0) { // 1x1 pivot, compute column j of inverse is = 1; if (j < nrow) { //ip = m.begin() + (j+1)*j/2 + j-1; //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; ip = m.begin() + (j+1)*j/2 - 1; for (i=0; i < nrow-j; ++i) { ip += i + j; x[i] = *ip; } for (i=j+1; i<=nrow ; i++) { temp2=0; ip = m.begin() + i*(i-1)/2 + j; for (k=0; k <= i-j-1; k++) temp2 += *ip++ * x[k]; // avoid setting ip outside the bounds of the storage array ip -= 1; // using the value of k from the previous loop for ( ; k < nrow-j; ++k) { ip += j+k; temp2 += *ip * x[k]; } *(m.begin()+ i*(i-1)/2 + j-1) = -temp2; } // for i temp2 = 0; //ip = m.begin() + (j+1)*j/2 + j-1; //for (k=0; k < nrow-j; ip += 1+j+k++) //temp2 += x[k] * *ip; ip = m.begin() + (j+1)*j/2 - 1; for (k=0; k < nrow-j; ++k) { ip += j+k; temp2 += x[k] * *ip; } *mjj -= temp2; } // j < nrow } else { //2x2 pivot, compute columns j and j-1 of the inverse if (piv[j-1] != 0) std::cerr << "error in piv" << piv[j-1] << std::endl; is=2; if (j < nrow) { //ip = m.begin() + (j+1)*j/2 + j-1; //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; ip = m.begin() + (j+1)*j/2 - 1; for (i=0; i < nrow-j; ++i) { ip += i + j; x[i] = *ip; } for (i=j+1; i<=nrow ; i++) { temp2 = 0; ip = m.begin() + i*(i-1)/2 + j; for (k=0; k <= i-j-1; k++) temp2 += *ip++ * x[k]; for (ip += i-1; k < nrow-j; ip += 1+j+k++) temp2 += *ip * x[k]; *(m.begin()+ i*(i-1)/2 + j-1) = -temp2; } // for i temp2 = 0; //ip = m.begin() + (j+1)*j/2 + j-1; //for (k=0; k < nrow-j; ip += 1+j+k++) temp2 += x[k] * *ip; ip = m.begin() + (j+1)*j/2 - 1; for (k=0; k < nrow-j; ++k) { ip += k + j; temp2 += x[k] * *ip; } *mjj -= temp2; temp2 = 0; //ip = m.begin() + (j+1)*j/2 + j-2; //for (i=j+1; i <= nrow; ip += i++) temp2 += *ip * *(ip+1); ip = m.begin() + (j+1)*j/2 - 2; for (i=j+1; i <= nrow; ++i) { ip += i - 1; temp2 += *ip * *(ip+1); } *(mjj-1) -= temp2; //ip = m.begin() + (j+1)*j/2 + j-2; //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; ip = m.begin() + (j+1)*j/2 - 2; for (i=0; i < nrow-j; ++i) { ip += i + j; x[i] = *ip; } for (i=j+1; i <= nrow ; i++) { temp2 = 0; ip = m.begin() + i*(i-1)/2 + j; for (k=0; k <= i-j-1; k++) temp2 += *ip++ * x[k]; for (ip += i-1; k < nrow-j; ip += 1+j+k++) temp2 += *ip * x[k]; *(m.begin()+ i*(i-1)/2 + j-2)= -temp2; } // for i temp2 = 0; //ip = m.begin() + (j+1)*j/2 + j-2; //for (k=0; k < nrow-j; ip += 1+j+k++) // temp2 += x[k] * *ip; ip = m.begin() + (j+1)*j/2 - 2; for (k=0; k < nrow-j; ++k) { ip += k + j; temp2 += x[k] * *ip; } *(mjj-j) -= temp2; } // j < nrow } // else piv[j-1] > 0 // interchange rows and columns j and piv[j-1] // or rows and columns j and -piv[j-2] pivrow = (piv[j-1]==0)? -piv[j-2] : piv[j-1]; ip = m.begin() + pivrow*(pivrow-1)/2 + j; for (i=j+1;i < pivrow; i++, ip++) { temp1 = *(m.begin() + i*(i-1)/2 + j-1); *(m.begin() + i*(i-1)/2 + j-1) = *ip; *ip = temp1; } // for i temp1 = *mjj; *mjj = *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1); *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1; if (is==2) { temp1 = *(mjj-1); *(mjj-1) = *( m.begin() + pivrow*(pivrow-1)/2 + j-2); *( m.begin() + pivrow*(pivrow-1)/2 + j-2) = temp1; } // is==2 // problem right here if( pivrow < nrow ) { ip = m.begin() + (pivrow+1)*pivrow/2 + j-1; // &A(i,j) // adding parenthesis for VC++ iq = ip + (pivrow-j); for (i = pivrow+1; i <= nrow; i++) { temp1 = *iq; *iq = *ip; *ip = temp1; if( i < nrow ) { ip += i; iq += i; } } // for i } // pivrow < nrow } // end of loop over columns (in computing inverse from factorization) return; // inversion successful } } // namespace CLHEP
utf-8
1
unknown
unknown
paraview-5.10.0~rc1/VTK/ThirdParty/fides/vtkfides/thirdparty/rapidjson/fidesrapidjson/include/fidesrapidjson/rapidjson.h
// Tencent is pleased to support the open source community by making RapidJSON available. // // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://opensource.org/licenses/MIT // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef RAPIDJSON_RAPIDJSON_H_ #define RAPIDJSON_RAPIDJSON_H_ /*!\file rapidjson.h \brief common definitions and configuration \see RAPIDJSON_CONFIG */ /*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration \brief Configuration macros for library features Some RapidJSON features are configurable to adapt the library to a wide variety of platforms, environments and usage scenarios. Most of the features can be configured in terms of overridden or predefined preprocessor macros at compile-time. Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs. \note These macros should be given on the compiler command-line (where applicable) to avoid inconsistent values when compiling different translation units of a single application. */ #include <cstdlib> // malloc(), realloc(), free(), size_t #include <cstring> // memset(), memcpy(), memmove(), memcmp() /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_VERSION_STRING // // ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt. // //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN // token stringification #define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x) #define RAPIDJSON_DO_STRINGIFY(x) #x // token concatenation #define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y) #define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y) #define RAPIDJSON_DO_JOIN2(X, Y) X##Y //!@endcond /*! \def RAPIDJSON_MAJOR_VERSION \ingroup RAPIDJSON_CONFIG \brief Major version of RapidJSON in integer. */ /*! \def RAPIDJSON_MINOR_VERSION \ingroup RAPIDJSON_CONFIG \brief Minor version of RapidJSON in integer. */ /*! \def RAPIDJSON_PATCH_VERSION \ingroup RAPIDJSON_CONFIG \brief Patch version of RapidJSON in integer. */ /*! \def RAPIDJSON_VERSION_STRING \ingroup RAPIDJSON_CONFIG \brief Version of RapidJSON in "<major>.<minor>.<patch>" string format. */ #define RAPIDJSON_MAJOR_VERSION 1 #define RAPIDJSON_MINOR_VERSION 1 #define RAPIDJSON_PATCH_VERSION 0 #define RAPIDJSON_VERSION_STRING \ RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION) /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_NAMESPACE_(BEGIN|END) /*! \def RAPIDJSON_NAMESPACE \ingroup RAPIDJSON_CONFIG \brief provide custom rapidjson namespace In order to avoid symbol clashes and/or "One Definition Rule" errors between multiple inclusions of (different versions of) RapidJSON in a single binary, users can customize the name of the main RapidJSON namespace. In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref RAPIDJSON_NAMESPACE_END need to be defined as well: \code // in some .cpp file #define RAPIDJSON_NAMESPACE my::rapidjson #define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson { #define RAPIDJSON_NAMESPACE_END } } #include "rapidjson/..." \endcode \see rapidjson */ /*! \def RAPIDJSON_NAMESPACE_BEGIN \ingroup RAPIDJSON_CONFIG \brief provide custom rapidjson namespace (opening expression) \see RAPIDJSON_NAMESPACE */ /*! \def RAPIDJSON_NAMESPACE_END \ingroup RAPIDJSON_CONFIG \brief provide custom rapidjson namespace (closing expression) \see RAPIDJSON_NAMESPACE */ #ifndef RAPIDJSON_NAMESPACE #define RAPIDJSON_NAMESPACE rapidjson #endif #ifndef RAPIDJSON_NAMESPACE_BEGIN #define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE { #endif #ifndef RAPIDJSON_NAMESPACE_END #define RAPIDJSON_NAMESPACE_END } #endif /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_HAS_STDSTRING #ifndef RAPIDJSON_HAS_STDSTRING #ifdef RAPIDJSON_DOXYGEN_RUNNING #define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation #else #define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default #endif /*! \def RAPIDJSON_HAS_STDSTRING \ingroup RAPIDJSON_CONFIG \brief Enable RapidJSON support for \c std::string By defining this preprocessor symbol to \c 1, several convenience functions for using \ref rapidjson::GenericValue with \c std::string are enabled, especially for construction and comparison. \hideinitializer */ #endif // !defined(RAPIDJSON_HAS_STDSTRING) #if RAPIDJSON_HAS_STDSTRING #include <string> #endif // RAPIDJSON_HAS_STDSTRING /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_NO_INT64DEFINE /*! \def RAPIDJSON_NO_INT64DEFINE \ingroup RAPIDJSON_CONFIG \brief Use external 64-bit integer types. RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types to be available at global scope. If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to prevent RapidJSON from defining its own types. */ #ifndef RAPIDJSON_NO_INT64DEFINE //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN #if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013 #include "msinttypes/stdint.h" #include "msinttypes/inttypes.h" #else // Other compilers should have this. #include <stdint.h> #include <inttypes.h> #endif //!@endcond #ifdef RAPIDJSON_DOXYGEN_RUNNING #define RAPIDJSON_NO_INT64DEFINE #endif #endif // RAPIDJSON_NO_INT64TYPEDEF /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_FORCEINLINE #ifndef RAPIDJSON_FORCEINLINE //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN #if defined(_MSC_VER) && defined(NDEBUG) #define RAPIDJSON_FORCEINLINE __forceinline #elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG) #define RAPIDJSON_FORCEINLINE __attribute__((always_inline)) #else #define RAPIDJSON_FORCEINLINE #endif //!@endcond #endif // RAPIDJSON_FORCEINLINE /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_ENDIAN #define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine #define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine //! Endianness of the machine. /*! \def RAPIDJSON_ENDIAN \ingroup RAPIDJSON_CONFIG GCC 4.6 provided macro for detecting endianness of the target machine. But other compilers may not have this. User can define RAPIDJSON_ENDIAN to either \ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN. Default detection implemented with reference to \li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html \li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp */ #ifndef RAPIDJSON_ENDIAN // Detect with GCC 4.6's macro # ifdef __BYTE_ORDER__ # if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN # elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN # else # error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. # endif // __BYTE_ORDER__ // Detect with GLIBC's endian.h # elif defined(__GLIBC__) # include <endian.h> # if (__BYTE_ORDER == __LITTLE_ENDIAN) # define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN # elif (__BYTE_ORDER == __BIG_ENDIAN) # define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN # else # error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. # endif // __GLIBC__ // Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro # elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) # define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN # elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) # define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN // Detect with architecture macros # elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__) # define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN # elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__) # define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN # elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) # define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN # elif defined(RAPIDJSON_DOXYGEN_RUNNING) # define RAPIDJSON_ENDIAN # else # error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. # endif #endif // RAPIDJSON_ENDIAN /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_64BIT //! Whether using 64-bit architecture #ifndef RAPIDJSON_64BIT #if defined(__LP64__) || (defined(__x86_64__) && defined(__ILP32__)) || defined(_WIN64) || defined(__EMSCRIPTEN__) #define RAPIDJSON_64BIT 1 #else #define RAPIDJSON_64BIT 0 #endif #endif // RAPIDJSON_64BIT /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_ALIGN //! Data alignment of the machine. /*! \ingroup RAPIDJSON_CONFIG \param x pointer to align Some machines require strict data alignment. The default is 8 bytes. User can customize by defining the RAPIDJSON_ALIGN function macro. */ #ifndef RAPIDJSON_ALIGN #define RAPIDJSON_ALIGN(x) (((x) + static_cast<size_t>(7u)) & ~static_cast<size_t>(7u)) #endif /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_UINT64_C2 //! Construct a 64-bit literal by a pair of 32-bit integer. /*! 64-bit literal with or without ULL suffix is prone to compiler warnings. UINT64_C() is C macro which cause compilation problems. Use this macro to define 64-bit constants by a pair of 32-bit integer. */ #ifndef RAPIDJSON_UINT64_C2 #define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast<uint64_t>(high32) << 32) | static_cast<uint64_t>(low32)) #endif /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_48BITPOINTER_OPTIMIZATION //! Use only lower 48-bit address for some pointers. /*! \ingroup RAPIDJSON_CONFIG This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address. The higher 16-bit can be used for storing other data. \c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture. */ #ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION #if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) #define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1 #else #define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0 #endif #endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION #if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1 #if RAPIDJSON_64BIT != 1 #error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1 #endif #define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast<type *>((reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast<uintptr_t>(reinterpret_cast<const void*>(x)))) #define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast<type *>(reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF)))) #else #define RAPIDJSON_SETPOINTER(type, p, x) (p = (x)) #define RAPIDJSON_GETPOINTER(type, p) (p) #endif /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_NEON/RAPIDJSON_SIMD /*! \def RAPIDJSON_SIMD \ingroup RAPIDJSON_CONFIG \brief Enable SSE2/SSE4.2/Neon optimization. RapidJSON supports optimized implementations for some parsing operations based on the SSE2, SSE4.2 or NEon SIMD extensions on modern Intel or ARM compatible processors. To enable these optimizations, three different symbols can be defined; \code // Enable SSE2 optimization. #define RAPIDJSON_SSE2 // Enable SSE4.2 optimization. #define RAPIDJSON_SSE42 \endcode // Enable ARM Neon optimization. #define RAPIDJSON_NEON \endcode \c RAPIDJSON_SSE42 takes precedence over SSE2, if both are defined. If any of these symbols is defined, RapidJSON defines the macro \c RAPIDJSON_SIMD to indicate the availability of the optimized code. */ #if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \ || defined(RAPIDJSON_NEON) || defined(RAPIDJSON_DOXYGEN_RUNNING) #define RAPIDJSON_SIMD #endif /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_NO_SIZETYPEDEFINE #ifndef RAPIDJSON_NO_SIZETYPEDEFINE /*! \def RAPIDJSON_NO_SIZETYPEDEFINE \ingroup RAPIDJSON_CONFIG \brief User-provided \c SizeType definition. In order to avoid using 32-bit size types for indexing strings and arrays, define this preprocessor symbol and provide the type rapidjson::SizeType before including RapidJSON: \code #define RAPIDJSON_NO_SIZETYPEDEFINE namespace rapidjson { typedef ::std::size_t SizeType; } #include "rapidjson/..." \endcode \see rapidjson::SizeType */ #ifdef RAPIDJSON_DOXYGEN_RUNNING #define RAPIDJSON_NO_SIZETYPEDEFINE #endif RAPIDJSON_NAMESPACE_BEGIN //! Size type (for string lengths, array sizes, etc.) /*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms, instead of using \c size_t. Users may override the SizeType by defining \ref RAPIDJSON_NO_SIZETYPEDEFINE. */ typedef unsigned SizeType; RAPIDJSON_NAMESPACE_END #endif // always import std::size_t to rapidjson namespace RAPIDJSON_NAMESPACE_BEGIN using std::size_t; RAPIDJSON_NAMESPACE_END /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_ASSERT //! Assertion. /*! \ingroup RAPIDJSON_CONFIG By default, rapidjson uses C \c assert() for internal assertions. User can override it by defining RAPIDJSON_ASSERT(x) macro. \note Parsing errors are handled and can be customized by the \ref RAPIDJSON_ERRORS APIs. */ #ifndef RAPIDJSON_ASSERT #include <cassert> #define RAPIDJSON_ASSERT(x) assert(x) #endif // RAPIDJSON_ASSERT /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_STATIC_ASSERT // Prefer C++11 static_assert, if available #ifndef RAPIDJSON_STATIC_ASSERT #if __cplusplus >= 201103L || ( defined(_MSC_VER) && _MSC_VER >= 1800 ) #define RAPIDJSON_STATIC_ASSERT(x) \ static_assert(x, RAPIDJSON_STRINGIFY(x)) #endif // C++11 #endif // RAPIDJSON_STATIC_ASSERT // Adopt C++03 implementation from boost #ifndef RAPIDJSON_STATIC_ASSERT #ifndef __clang__ //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN #endif RAPIDJSON_NAMESPACE_BEGIN template <bool x> struct STATIC_ASSERTION_FAILURE; template <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; }; template <size_t x> struct StaticAssertTest {}; RAPIDJSON_NAMESPACE_END #if defined(__GNUC__) || defined(__clang__) #define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused)) #else #define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE #endif #ifndef __clang__ //!@endcond #endif /*! \def RAPIDJSON_STATIC_ASSERT \brief (Internal) macro to check for conditions at compile-time \param x compile-time condition \hideinitializer */ #define RAPIDJSON_STATIC_ASSERT(x) \ typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \ sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE<bool(x) >)> \ RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE #endif // RAPIDJSON_STATIC_ASSERT /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY //! Compiler branching hint for expression with high probability to be true. /*! \ingroup RAPIDJSON_CONFIG \param x Boolean expression likely to be true. */ #ifndef RAPIDJSON_LIKELY #if defined(__GNUC__) || defined(__clang__) #define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1) #else #define RAPIDJSON_LIKELY(x) (x) #endif #endif //! Compiler branching hint for expression with low probability to be true. /*! \ingroup RAPIDJSON_CONFIG \param x Boolean expression unlikely to be true. */ #ifndef RAPIDJSON_UNLIKELY #if defined(__GNUC__) || defined(__clang__) #define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0) #else #define RAPIDJSON_UNLIKELY(x) (x) #endif #endif /////////////////////////////////////////////////////////////////////////////// // Helpers //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN #define RAPIDJSON_MULTILINEMACRO_BEGIN do { #define RAPIDJSON_MULTILINEMACRO_END \ } while((void)0, 0) // adopted from Boost #define RAPIDJSON_VERSION_CODE(x,y,z) \ (((x)*100000) + ((y)*100) + (z)) #if defined(__has_builtin) #define RAPIDJSON_HAS_BUILTIN(x) __has_builtin(x) #else #define RAPIDJSON_HAS_BUILTIN(x) 0 #endif /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF #if defined(__GNUC__) #define RAPIDJSON_GNUC \ RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) #endif #if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0)) #define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x)) #define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x) #define RAPIDJSON_DIAG_OFF(x) \ RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x))) // push/pop support in Clang and GCC>=4.6 #if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) #define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) #define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) #else // GCC >= 4.2, < 4.6 #define RAPIDJSON_DIAG_PUSH /* ignored */ #define RAPIDJSON_DIAG_POP /* ignored */ #endif #elif defined(_MSC_VER) // pragma (MSVC specific) #define RAPIDJSON_PRAGMA(x) __pragma(x) #define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x)) #define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x) #define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) #define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) #else #define RAPIDJSON_DIAG_OFF(x) /* ignored */ #define RAPIDJSON_DIAG_PUSH /* ignored */ #define RAPIDJSON_DIAG_POP /* ignored */ #endif // RAPIDJSON_DIAG_* /////////////////////////////////////////////////////////////////////////////// // C++11 features #ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS #if defined(__clang__) #if __has_feature(cxx_rvalue_references) && \ (defined(_MSC_VER) || defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306) #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 #else #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 #endif #elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ (defined(_MSC_VER) && _MSC_VER >= 1600) || \ (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 #else #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 #endif #endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS #ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT #if defined(__clang__) #define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept) #elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ (defined(_MSC_VER) && _MSC_VER >= 1900) || \ (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) #define RAPIDJSON_HAS_CXX11_NOEXCEPT 1 #else #define RAPIDJSON_HAS_CXX11_NOEXCEPT 0 #endif #endif #if RAPIDJSON_HAS_CXX11_NOEXCEPT #define RAPIDJSON_NOEXCEPT noexcept #else #define RAPIDJSON_NOEXCEPT /* noexcept */ #endif // RAPIDJSON_HAS_CXX11_NOEXCEPT // no automatic detection, yet #ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS #if (defined(_MSC_VER) && _MSC_VER >= 1700) #define RAPIDJSON_HAS_CXX11_TYPETRAITS 1 #else #define RAPIDJSON_HAS_CXX11_TYPETRAITS 0 #endif #endif #ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR #if defined(__clang__) #define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for) #elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ (defined(_MSC_VER) && _MSC_VER >= 1700) || \ (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) #define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 #else #define RAPIDJSON_HAS_CXX11_RANGE_FOR 0 #endif #endif // RAPIDJSON_HAS_CXX11_RANGE_FOR /////////////////////////////////////////////////////////////////////////////// // C++17 features #if defined(__has_cpp_attribute) # if __has_cpp_attribute(fallthrough) # define RAPIDJSON_DELIBERATE_FALLTHROUGH [[fallthrough]] # else # define RAPIDJSON_DELIBERATE_FALLTHROUGH # endif #else # define RAPIDJSON_DELIBERATE_FALLTHROUGH #endif //!@endcond //! Assertion (in non-throwing contexts). /*! \ingroup RAPIDJSON_CONFIG Some functions provide a \c noexcept guarantee, if the compiler supports it. In these cases, the \ref RAPIDJSON_ASSERT macro cannot be overridden to throw an exception. This macro adds a separate customization point for such cases. Defaults to C \c assert() (as \ref RAPIDJSON_ASSERT), if \c noexcept is supported, and to \ref RAPIDJSON_ASSERT otherwise. */ /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_NOEXCEPT_ASSERT #ifndef RAPIDJSON_NOEXCEPT_ASSERT #ifdef RAPIDJSON_ASSERT_THROWS #if RAPIDJSON_HAS_CXX11_NOEXCEPT #define RAPIDJSON_NOEXCEPT_ASSERT(x) #else #include <cassert> #define RAPIDJSON_NOEXCEPT_ASSERT(x) assert(x) #endif // RAPIDJSON_HAS_CXX11_NOEXCEPT #else #define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x) #endif // RAPIDJSON_ASSERT_THROWS #endif // RAPIDJSON_NOEXCEPT_ASSERT /////////////////////////////////////////////////////////////////////////////// // new/delete #ifndef RAPIDJSON_NEW ///! customization point for global \c new #define RAPIDJSON_NEW(TypeName) new TypeName #endif #ifndef RAPIDJSON_DELETE ///! customization point for global \c delete #define RAPIDJSON_DELETE(x) delete x #endif /////////////////////////////////////////////////////////////////////////////// // Type /*! \namespace rapidjson \brief main RapidJSON namespace \see RAPIDJSON_NAMESPACE */ RAPIDJSON_NAMESPACE_BEGIN //! Type of JSON value enum Type { kNullType = 0, //!< null kFalseType = 1, //!< false kTrueType = 2, //!< true kObjectType = 3, //!< object kArrayType = 4, //!< array kStringType = 5, //!< string kNumberType = 6 //!< number }; RAPIDJSON_NAMESPACE_END #endif // RAPIDJSON_RAPIDJSON_H_
utf-8
1
BSD-3-clause
2000-2005 Kitware Inc. 28 Corporate Drive, Suite 204, Clifton Park, NY, 12065, USA. 2000-2005 Kitware Inc. 28 Corporate Drive, Suite 204, Clifton Park, NY, 12065, USA.
foot-1.11.0/csi.c
#include "csi.h" #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #if defined(_DEBUG) #include <stdio.h> #endif #include <sys/timerfd.h> #define LOG_MODULE "csi" #define LOG_ENABLE_DBG 0 #include "log.h" #include "char32.h" #include "config.h" #include "debug.h" #include "grid.h" #include "selection.h" #include "sixel.h" #include "util.h" #include "version.h" #include "vt.h" #include "xmalloc.h" #include "xsnprintf.h" #define UNHANDLED() LOG_DBG("unhandled: %s", csi_as_string(term, final, -1)) #define UNHANDLED_SGR(idx) LOG_DBG("unhandled: %s", csi_as_string(term, 'm', idx)) static void sgr_reset(struct terminal *term) { memset(&term->vt.attrs, 0, sizeof(term->vt.attrs)); } static const char * csi_as_string(struct terminal *term, uint8_t final, int idx) { static char msg[1024]; int c = snprintf(msg, sizeof(msg), "CSI: "); for (size_t i = idx >= 0 ? idx : 0; i < (idx >= 0 ? idx + 1 : term->vt.params.idx); i++) { c += snprintf(&msg[c], sizeof(msg) - c, "%u", term->vt.params.v[i].value); for (size_t j = 0; j < term->vt.params.v[i].sub.idx; j++) { c += snprintf(&msg[c], sizeof(msg) - c, ":%u", term->vt.params.v[i].sub.value[j]); } c += snprintf(&msg[c], sizeof(msg) - c, "%s", i == term->vt.params.idx - 1 ? "" : ";"); } for (size_t i = 0; i < sizeof(term->vt.private); i++) { char value = (term->vt.private >> (i * 8)) & 0xff; if (value == 0) break; c += snprintf(&msg[c], sizeof(msg) - c, "%c", value); } snprintf(&msg[c], sizeof(msg) - c, "%c (%u parameters)", final, idx >= 0 ? 1 : term->vt.params.idx); return msg; } static void csi_sgr(struct terminal *term) { if (term->vt.params.idx == 0) { sgr_reset(term); return; } for (size_t i = 0; i < term->vt.params.idx; i++) { const int param = term->vt.params.v[i].value; switch (param) { case 0: sgr_reset(term); break; case 1: term->vt.attrs.bold = true; break; case 2: term->vt.attrs.dim = true; break; case 3: term->vt.attrs.italic = true; break; case 4: term->vt.attrs.underline = true; break; case 5: term->vt.attrs.blink = true; break; case 6: LOG_WARN("ignored: rapid blink"); break; case 7: term->vt.attrs.reverse = true; break; case 8: term->vt.attrs.conceal = true; break; case 9: term->vt.attrs.strikethrough = true; break; case 21: break; /* double-underline, not implemented */ case 22: term->vt.attrs.bold = term->vt.attrs.dim = false; break; case 23: term->vt.attrs.italic = false; break; case 24: term->vt.attrs.underline = false; break; case 25: term->vt.attrs.blink = false; break; case 26: break; /* rapid blink, ignored */ case 27: term->vt.attrs.reverse = false; break; case 28: term->vt.attrs.conceal = false; break; case 29: term->vt.attrs.strikethrough = false; break; /* Regular foreground colors */ case 30: case 31: case 32: case 33: case 34: case 35: case 36: case 37: term->vt.attrs.fg_src = COLOR_BASE16; term->vt.attrs.fg = param - 30; break; case 38: { /* Indexed: 38;5;<idx> */ if (term->vt.params.idx - i - 1 >= 2 && term->vt.params.v[i + 1].value == 5) { term->vt.attrs.fg_src = COLOR_BASE256; term->vt.attrs.fg = term->vt.params.v[i + 2].value; i += 2; } /* RGB: 38;2;<r>;<g>;<b> */ else if (term->vt.params.idx - i - 1 >= 4 && term->vt.params.v[i + 1].value == 2) { uint8_t r = term->vt.params.v[i + 2].value; uint8_t g = term->vt.params.v[i + 3].value; uint8_t b = term->vt.params.v[i + 4].value; term->vt.attrs.fg_src = COLOR_RGB; term->vt.attrs.fg = r << 16 | g << 8 | b; i += 4; } /* Indexed: 38:5:<idx> */ else if (term->vt.params.v[i].sub.idx >= 2 && term->vt.params.v[i].sub.value[0] == 5) { const struct vt_param *param = &term->vt.params.v[i]; term->vt.attrs.fg_src = COLOR_BASE256; term->vt.attrs.fg = param->sub.value[1]; } /* * RGB: 38:2:<color-space>:r:g:b[:ignored:tolerance:tolerance-color-space] * RGB: 38:2:r:g:b * * The second version is a "bastard" version - many * programs "forget" the color space ID * parameter... *sigh* */ else if (term->vt.params.v[i].sub.idx >= 4 && term->vt.params.v[i].sub.value[0] == 2) { const struct vt_param *param = &term->vt.params.v[i]; bool have_color_space_id = param->sub.idx >= 5; /* 0 - color space (ignored) */ int r_idx = 2 - !have_color_space_id; int g_idx = 3 - !have_color_space_id; int b_idx = 4 - !have_color_space_id; /* 5 - unused */ /* 6 - CS tolerance */ /* 7 - color space associated with tolerance */ uint8_t r = param->sub.value[r_idx]; uint8_t g = param->sub.value[g_idx]; uint8_t b = param->sub.value[b_idx]; term->vt.attrs.fg_src = COLOR_RGB; term->vt.attrs.fg = r << 16 | g << 8 | b; } /* Transparent: 38:1 */ /* CMY: 38:3:<color-space>:c:m:y[:tolerance:tolerance-color-space] */ /* CMYK: 38:4:<color-space>:c:m:y:k[:tolerance:tolerance-color-space] */ /* Unrecognized */ else UNHANDLED_SGR(i); break; } case 39: term->vt.attrs.fg_src = COLOR_DEFAULT; break; /* Regular background colors */ case 40: case 41: case 42: case 43: case 44: case 45: case 46: case 47: term->vt.attrs.bg_src = COLOR_BASE16; term->vt.attrs.bg = param - 40; break; case 48: { /* Indexed: 48;5;<idx> */ if (term->vt.params.idx - i - 1 >= 2 && term->vt.params.v[i + 1].value == 5) { term->vt.attrs.bg_src = COLOR_BASE256; term->vt.attrs.bg = term->vt.params.v[i + 2].value; i += 2; } /* RGB: 48;2;<r>;<g>;<b> */ else if (term->vt.params.idx - i - 1 >= 4 && term->vt.params.v[i + 1].value == 2) { uint8_t r = term->vt.params.v[i + 2].value; uint8_t g = term->vt.params.v[i + 3].value; uint8_t b = term->vt.params.v[i + 4].value; term->vt.attrs.bg_src = COLOR_RGB; term->vt.attrs.bg = r << 16 | g << 8 | b; i += 4; } /* Indexed: 48:5:<idx> */ else if (term->vt.params.v[i].sub.idx >= 2 && term->vt.params.v[i].sub.value[0] == 5) { const struct vt_param *param = &term->vt.params.v[i]; term->vt.attrs.bg_src = COLOR_BASE256; term->vt.attrs.bg = param->sub.value[1]; } /* * RGB: 48:2:<color-space>:r:g:b[:ignored:tolerance:tolerance-color-space] * RGB: 48:2:r:g:b * * The second version is a "bastard" version - many * programs "forget" the color space ID * parameter... *sigh* */ else if (term->vt.params.v[i].sub.idx >= 4 && term->vt.params.v[i].sub.value[0] == 2) { const struct vt_param *param = &term->vt.params.v[i]; bool have_color_space_id = param->sub.idx >= 5; /* 0 - color space (ignored) */ int r_idx = 2 - !have_color_space_id; int g_idx = 3 - !have_color_space_id; int b_idx = 4 - !have_color_space_id; /* 5 - unused */ /* 6 - CS tolerance */ /* 7 - color space associated with tolerance */ uint8_t r = param->sub.value[r_idx]; uint8_t g = param->sub.value[g_idx]; uint8_t b = param->sub.value[b_idx]; term->vt.attrs.bg_src = COLOR_RGB; term->vt.attrs.bg = r << 16 | g << 8 | b; } /* Transparent: 48:1 */ /* CMY: 48:3:<color-space>:c:m:y[:tolerance:tolerance-color-space] */ /* CMYK: 48:4:<color-space>:c:m:y:k[:tolerance:tolerance-color-space] */ else UNHANDLED_SGR(i); break; } case 49: term->vt.attrs.bg_src = COLOR_DEFAULT; break; /* Bright foreground colors */ case 90: case 91: case 92: case 93: case 94: case 95: case 96: case 97: term->vt.attrs.fg_src = COLOR_BASE16; term->vt.attrs.fg = param - 90 + 8; break; /* Bright background colors */ case 100: case 101: case 102: case 103: case 104: case 105: case 106: case 107: term->vt.attrs.bg_src = COLOR_BASE16; term->vt.attrs.bg = param - 100 + 8; break; default: UNHANDLED_SGR(i); break; } } } static void decset_decrst(struct terminal *term, unsigned param, bool enable) { #if defined(_DEBUG) /* For UNHANDLED() */ int UNUSED final = enable ? 'h' : 'l'; #endif /* Note: update XTSAVE/XTRESTORE if adding/removing things here */ switch (param) { case 1: /* DECCKM */ term->cursor_keys_mode = enable ? CURSOR_KEYS_APPLICATION : CURSOR_KEYS_NORMAL; break; case 3: /* DECCOLM */ if (enable) LOG_WARN("unimplemented: 132 column mode (DECCOLM)"); term_erase(term, 0, 0, term->rows - 1, term->cols - 1); term_cursor_home(term); break; case 4: /* DECSCLM - Smooth scroll */ if (enable) LOG_WARN("unimplemented: Smooth (Slow) Scroll (DECSCLM)"); break; case 5: /* DECSCNM */ term->reverse = enable; term_damage_all(term); term_damage_margins(term); break; case 6: { /* DECOM */ term->origin = enable ? ORIGIN_RELATIVE : ORIGIN_ABSOLUTE; term_cursor_home(term); break; } case 7: /* DECAWM */ term->auto_margin = enable; term->grid->cursor.lcf = false; break; case 9: if (enable) LOG_WARN("unimplemented: X10 mouse tracking mode"); #if 0 else if (term->mouse_tracking == MOUSE_X10) term->mouse_tracking = MOUSE_NONE; #endif break; case 12: term->cursor_blink.decset = enable; term_cursor_blink_update(term); break; case 25: /* DECTCEM */ term->hide_cursor = !enable; break; case 45: term->reverse_wrap = enable; break; case 80: term->sixel.scrolling = !enable; break; case 1000: if (enable) term->mouse_tracking = MOUSE_CLICK; else if (term->mouse_tracking == MOUSE_CLICK) term->mouse_tracking = MOUSE_NONE; term_xcursor_update(term); break; case 1001: if (enable) LOG_WARN("unimplemented: highlight mouse tracking"); break; case 1002: if (enable) term->mouse_tracking = MOUSE_DRAG; else if (term->mouse_tracking == MOUSE_DRAG) term->mouse_tracking = MOUSE_NONE; term_xcursor_update(term); break; case 1003: if (enable) term->mouse_tracking = MOUSE_MOTION; else if (term->mouse_tracking == MOUSE_MOTION) term->mouse_tracking = MOUSE_NONE; term_xcursor_update(term); break; case 1004: term->focus_events = enable; break; case 1005: if (enable) LOG_WARN("unimplemented: mouse reporting mode: UTF-8"); #if 0 else if (term->mouse_reporting == MOUSE_UTF8) term->mouse_reporting = MOUSE_NONE; #endif break; case 1006: if (enable) term->mouse_reporting = MOUSE_SGR; else if (term->mouse_reporting == MOUSE_SGR) term->mouse_reporting = MOUSE_NORMAL; break; case 1007: term->alt_scrolling = enable; break; case 1015: if (enable) term->mouse_reporting = MOUSE_URXVT; else if (term->mouse_reporting == MOUSE_URXVT) term->mouse_reporting = MOUSE_NORMAL; break; case 1016: if (enable) term->mouse_reporting = MOUSE_SGR_PIXELS; else if (term->mouse_reporting == MOUSE_SGR_PIXELS) term->mouse_reporting = MOUSE_NORMAL; break; case 1034: /* smm */ LOG_DBG("%s 8-bit meta mode", enable ? "enabling" : "disabling"); term->meta.eight_bit = enable; break; case 1035: /* numLock */ LOG_DBG("%s Num Lock modifier", enable ? "enabling" : "disabling"); term->num_lock_modifier = enable; break; case 1036: /* metaSendsEscape */ LOG_DBG("%s meta-sends-escape", enable ? "enabling" : "disabling"); term->meta.esc_prefix = enable; break; case 1042: term->bell_action_enabled = enable; break; #if 0 case 1043: LOG_WARN("unimplemented: raise window on ctrl-g"); break; #endif case 1048: if (enable) term_save_cursor(term); else term_restore_cursor(term, &term->grid->saved_cursor); break; case 47: case 1047: case 1049: if (enable && term->grid != &term->alt) { selection_cancel(term); if (param == 1049) term_save_cursor(term); term->grid = &term->alt; /* Cursor retains its position from the normal grid */ term_cursor_to( term, min(term->normal.cursor.point.row, term->rows - 1), min(term->normal.cursor.point.col, term->cols - 1)); tll_free(term->normal.scroll_damage); term_erase(term, 0, 0, term->rows - 1, term->cols - 1); } else if (!enable && term->grid == &term->alt) { selection_cancel(term); term->grid = &term->normal; /* Cursor retains its position from the alt grid */ term_cursor_to( term, min(term->alt.cursor.point.row, term->rows - 1), min(term->alt.cursor.point.col, term->cols - 1)); if (param == 1049) term_restore_cursor(term, &term->grid->saved_cursor); /* Delete all sixel images on the alt screen */ tll_foreach(term->alt.sixel_images, it) { sixel_destroy(&it->item); tll_remove(term->alt.sixel_images, it); } tll_free(term->alt.scroll_damage); term_damage_all(term); } term_update_ascii_printer(term); break; case 1070: term->sixel.use_private_palette = enable; break; case 2004: term->bracketed_paste = enable; break; case 2026: if (enable) term_enable_app_sync_updates(term); else term_disable_app_sync_updates(term); break; case 8452: term->sixel.cursor_right_of_graphics = enable; break; case 27127: term->modify_escape_key = enable; break; case 737769: if (enable) term_ime_enable(term); else term_ime_disable(term); break; default: UNHANDLED(); break; } } static void decset(struct terminal *term, unsigned param) { decset_decrst(term, param, true); } static void decrst(struct terminal *term, unsigned param) { decset_decrst(term, param, false); } static bool decrqm(const struct terminal *term, unsigned param, bool *enabled) { switch (param) { case 1: *enabled = term->cursor_keys_mode == CURSOR_KEYS_APPLICATION; return true; case 3: *enabled = false; return true; case 4: *enabled = false; return true; case 5: *enabled = term->reverse; return true; case 6: *enabled = term->origin; return true; case 7: *enabled = term->auto_margin; return true; case 9: *enabled = false; /* term->mouse_tracking == MOUSE_X10; */ return true; case 12: *enabled = term->cursor_blink.decset; return true; case 25: *enabled = !term->hide_cursor; return true; case 45: *enabled = term->reverse_wrap; return true; case 80: *enabled = !term->sixel.scrolling; return true; case 1000: *enabled = term->mouse_tracking == MOUSE_CLICK; return true; case 1001: *enabled = false; return true; case 1002: *enabled = term->mouse_tracking == MOUSE_DRAG; return true; case 1003: *enabled = term->mouse_tracking == MOUSE_MOTION; return true; case 1004: *enabled = term->focus_events; return true; case 1005: *enabled = false; /* term->mouse_reporting == MOUSE_UTF8; */ return true; case 1006: *enabled = term->mouse_reporting == MOUSE_SGR; return true; case 1007: *enabled = term->alt_scrolling; return true; case 1015: *enabled = term->mouse_reporting == MOUSE_URXVT; return true; case 1016: *enabled = term->mouse_reporting == MOUSE_SGR_PIXELS; return true; case 1034: *enabled = term->meta.eight_bit; return true; case 1035: *enabled = term->num_lock_modifier; return true; case 1036: *enabled = term->meta.esc_prefix; return true; case 1042: *enabled = term->bell_action_enabled; return true; case 47: /* FALLTHROUGH */ case 1047: /* FALLTHROUGH */ case 1049: *enabled = term->grid == &term->alt; return true; case 1079: *enabled = term->sixel.use_private_palette; return true; case 2004: *enabled = term->bracketed_paste; return true; case 2026: *enabled = term->render.app_sync_updates.enabled; return true; case 8452: *enabled = term->sixel.cursor_right_of_graphics; return true; case 27127: *enabled = term->modify_escape_key; return true; case 737769: *enabled = term_ime_is_enabled(term); return true; } return false; } static void xtsave(struct terminal *term, unsigned param) { switch (param) { case 1: term->xtsave.application_cursor_keys = term->cursor_keys_mode == CURSOR_KEYS_APPLICATION; break; case 3: break; case 4: break; case 5: term->xtsave.reverse = term->reverse; break; case 6: term->xtsave.origin = term->origin; break; case 7: term->xtsave.auto_margin = term->auto_margin; break; case 9: /* term->xtsave.mouse_x10 = term->mouse_tracking == MOUSE_X10; */ break; case 12: term->xtsave.cursor_blink = term->cursor_blink.decset; break; case 25: term->xtsave.show_cursor = !term->hide_cursor; break; case 45: term->xtsave.reverse_wrap = term->reverse_wrap; break; case 47: term->xtsave.alt_screen = term->grid == &term->alt; break; case 80: term->xtsave.sixel_display_mode = !term->sixel.scrolling; break; case 1000: term->xtsave.mouse_click = term->mouse_tracking == MOUSE_CLICK; break; case 1001: break; case 1002: term->xtsave.mouse_drag = term->mouse_tracking == MOUSE_DRAG; break; case 1003: term->xtsave.mouse_motion = term->mouse_tracking == MOUSE_MOTION; break; case 1004: term->xtsave.focus_events = term->focus_events; break; case 1005: /* term->xtsave.mouse_utf8 = term->mouse_reporting == MOUSE_UTF8; */ break; case 1006: term->xtsave.mouse_sgr = term->mouse_reporting == MOUSE_SGR; break; case 1007: term->xtsave.alt_scrolling = term->alt_scrolling; break; case 1015: term->xtsave.mouse_urxvt = term->mouse_reporting == MOUSE_URXVT; break; case 1016: term->xtsave.mouse_sgr_pixels = term->mouse_reporting == MOUSE_SGR_PIXELS; break; case 1034: term->xtsave.meta_eight_bit = term->meta.eight_bit; break; case 1035: term->xtsave.num_lock_modifier = term->num_lock_modifier; break; case 1036: term->xtsave.meta_esc_prefix = term->meta.esc_prefix; break; case 1042: term->xtsave.bell_action_enabled = term->bell_action_enabled; break; case 1047: term->xtsave.alt_screen = term->grid == &term->alt; break; case 1048: term_save_cursor(term); break; case 1049: term->xtsave.alt_screen = term->grid == &term->alt; break; case 1070: term->xtsave.sixel_private_palette = term->sixel.use_private_palette; break; case 2004: term->xtsave.bracketed_paste = term->bracketed_paste; break; case 2026: term->xtsave.app_sync_updates = term->render.app_sync_updates.enabled; break; case 8452: term->xtsave.sixel_cursor_right_of_graphics = term->sixel.cursor_right_of_graphics; break; case 27127: term->xtsave.modify_escape_key = term->modify_escape_key; break; case 737769: term->xtsave.ime = term_ime_is_enabled(term); break; } } static void xtrestore(struct terminal *term, unsigned param) { bool enable; switch (param) { case 1: enable = term->xtsave.application_cursor_keys; break; case 3: return; case 4: return; case 5: enable = term->xtsave.reverse; break; case 6: enable = term->xtsave.origin; break; case 7: enable = term->xtsave.auto_margin; break; case 9: /* enable = term->xtsave.mouse_x10; break; */ return; case 12: enable = term->xtsave.cursor_blink; break; case 25: enable = term->xtsave.show_cursor; break; case 45: enable = term->xtsave.reverse_wrap; break; case 47: enable = term->xtsave.alt_screen; break; case 80: enable = term->xtsave.sixel_display_mode; break; case 1000: enable = term->xtsave.mouse_click; break; case 1001: return; case 1002: enable = term->xtsave.mouse_drag; break; case 1003: enable = term->xtsave.mouse_motion; break; case 1004: enable = term->xtsave.focus_events; break; case 1005: /* enable = term->xtsave.mouse_utf8; break; */ return; case 1006: enable = term->xtsave.mouse_sgr; break; case 1007: enable = term->xtsave.alt_scrolling; break; case 1015: enable = term->xtsave.mouse_urxvt; break; case 1016: enable = term->xtsave.mouse_sgr_pixels; break; case 1034: enable = term->xtsave.meta_eight_bit; break; case 1035: enable = term->xtsave.num_lock_modifier; break; case 1036: enable = term->xtsave.meta_esc_prefix; break; case 1042: enable = term->xtsave.bell_action_enabled; break; case 1047: enable = term->xtsave.alt_screen; break; case 1048: enable = true; break; case 1049: enable = term->xtsave.alt_screen; break; case 1070: enable = term->xtsave.sixel_private_palette; break; case 2004: enable = term->xtsave.bracketed_paste; break; case 2026: enable = term->xtsave.app_sync_updates; break; case 8452: enable = term->xtsave.sixel_cursor_right_of_graphics; break; case 27127: enable = term->xtsave.modify_escape_key; break; case 737769: enable = term->xtsave.ime; break; default: return; } decset_decrst(term, param, enable); } void csi_dispatch(struct terminal *term, uint8_t final) { LOG_DBG("%s (%08x)", csi_as_string(term, final, -1), term->vt.private); switch (term->vt.private) { case 0: { switch (final) { case 'b': if (term->vt.last_printed != 0) { /* * Note: we never reset 'last-printed'. According to * ECMA-48, the behaviour is undefined if REP was * _not_ preceded by a graphical character. */ int count = vt_param_get(term, 0, 1); LOG_DBG("REP: '%lc' %d times", (wint_t)term->vt.last_printed, count); const int width = c32width(term->vt.last_printed); if (width > 0) { for (int i = 0; i < count; i++) term_print(term, term->vt.last_printed, width); } } break; case 'c': { if (vt_param_get(term, 0, 0) != 0) { UNHANDLED(); break; } /* Send Device Attributes (Primary DA) */ /* * Responses: * - CSI?1;2c vt100 with advanced video option * - CSI?1;0c vt101 with no options * - CSI?6c vt102 * - CSI?62;<Ps>c vt220 * - CSI?63;<Ps>c vt320 * - CSI?64;<Ps>c vt420 * * Ps (response may contain multiple): * - 1 132 columns * - 2 Printer. * - 3 ReGIS graphics. * - 4 Sixel graphics. * - 6 Selective erase. * - 8 User-defined keys. * - 9 National Replacement Character sets. * - 15 Technical characters. * - 16 Locator port. * - 17 Terminal state interrogation. * - 18 User windows. * - 21 Horizontal scrolling. * - 22 ANSI color, e.g., VT525. * - 28 Rectangular editing. * - 29 ANSI text locator (i.e., DEC Locator mode). * * Note: we report ourselves as a VT220, mainly to be able * to pass parameters, to indicate we support sixel, and * ANSI colors. * * The VT level must be synchronized with the secondary DA * response. * * Note: tertiary DA responds with "FOOT". */ static const char reply[] = "\033[?62;4;22c"; term_to_slave(term, reply, sizeof(reply) - 1); break; } case 'd': { /* VPA - vertical line position absolute */ int rel_row = vt_param_get(term, 0, 1) - 1; int row = term_row_rel_to_abs(term, rel_row); term_cursor_to(term, row, term->grid->cursor.point.col); break; } case 'm': csi_sgr(term); break; case 'A': term_cursor_up(term, vt_param_get(term, 0, 1)); break; case 'e': case 'B': term_cursor_down(term, vt_param_get(term, 0, 1)); break; case 'a': case 'C': term_cursor_right(term, vt_param_get(term, 0, 1)); break; case 'D': term_cursor_left(term, vt_param_get(term, 0, 1)); break; case 'E': /* CNL - Cursor Next Line */ term_cursor_down(term, vt_param_get(term, 0, 1)); term_cursor_left(term, term->grid->cursor.point.col); break; case 'F': /* CPL - Cursor Previous Line */ term_cursor_up(term, vt_param_get(term, 0, 1)); term_cursor_left(term, term->grid->cursor.point.col); break; case 'g': { int param = vt_param_get(term, 0, 0); switch (param) { case 0: /* Clear tab stop at *current* column */ tll_foreach(term->tab_stops, it) { if (it->item == term->grid->cursor.point.col) tll_remove(term->tab_stops, it); else if (it->item > term->grid->cursor.point.col) break; } break; case 3: /* Clear *all* tabs */ tll_free(term->tab_stops); break; default: UNHANDLED(); break; } break; } case '`': case 'G': { /* Cursor horizontal absolute */ int col = min(vt_param_get(term, 0, 1), term->cols) - 1; term_cursor_to(term, term->grid->cursor.point.row, col); break; } case 'f': case 'H': { /* Move cursor */ int rel_row = vt_param_get(term, 0, 1) - 1; int row = term_row_rel_to_abs(term, rel_row); int col = min(vt_param_get(term, 1, 1), term->cols) - 1; term_cursor_to(term, row, col); break; } case 'J': { /* Erase screen */ int param = vt_param_get(term, 0, 0); switch (param) { case 0: { /* From cursor to end of screen */ const struct coord *cursor = &term->grid->cursor.point; term_erase( term, cursor->row, cursor->col, term->rows - 1, term->cols - 1); term->grid->cursor.lcf = false; break; } case 1: { /* From start of screen to cursor */ const struct coord *cursor = &term->grid->cursor.point; term_erase(term, 0, 0, cursor->row, cursor->col); term->grid->cursor.lcf = false; break; } case 2: /* Erase entire screen */ term_erase(term, 0, 0, term->rows - 1, term->cols - 1); term->grid->cursor.lcf = false; break; case 3: { /* Erase scrollback */ term_erase_scrollback(term); break; } default: UNHANDLED(); break; } break; } case 'K': { /* Erase line */ int param = vt_param_get(term, 0, 0); switch (param) { case 0: { /* From cursor to end of line */ const struct coord *cursor = &term->grid->cursor.point; term_erase( term, cursor->row, cursor->col, cursor->row, term->cols - 1); term->grid->cursor.lcf = false; break; } case 1: { /* From start of line to cursor */ const struct coord *cursor = &term->grid->cursor.point; term_erase(term, cursor->row, 0, cursor->row, cursor->col); term->grid->cursor.lcf = false; break; } case 2: { /* Entire line */ const struct coord *cursor = &term->grid->cursor.point; term_erase(term, cursor->row, 0, cursor->row, term->cols - 1); term->grid->cursor.lcf = false; break; } default: UNHANDLED(); break; } break; } case 'L': { if (term->grid->cursor.point.row < term->scroll_region.start || term->grid->cursor.point.row >= term->scroll_region.end) break; int count = min( vt_param_get(term, 0, 1), term->scroll_region.end - term->grid->cursor.point.row); term_scroll_reverse_partial( term, (struct scroll_region){ .start = term->grid->cursor.point.row, .end = term->scroll_region.end}, count); term->grid->cursor.lcf = false; term->grid->cursor.point.col = 0; break; } case 'M': { if (term->grid->cursor.point.row < term->scroll_region.start || term->grid->cursor.point.row >= term->scroll_region.end) break; int count = min( vt_param_get(term, 0, 1), term->scroll_region.end - term->grid->cursor.point.row); term_scroll_partial( term, (struct scroll_region){ .start = term->grid->cursor.point.row, .end = term->scroll_region.end}, count); term->grid->cursor.lcf = false; term->grid->cursor.point.col = 0; break; } case 'P': { /* DCH: Delete character(s) */ /* Number of characters to delete */ int count = min( vt_param_get(term, 0, 1), term->cols - term->grid->cursor.point.col); /* Number of characters left after deletion (on current line) */ int remaining = term->cols - (term->grid->cursor.point.col + count); /* 'Delete' characters by moving the remaining ones */ memmove(&term->grid->cur_row->cells[term->grid->cursor.point.col], &term->grid->cur_row->cells[term->grid->cursor.point.col + count], remaining * sizeof(term->grid->cur_row->cells[0])); for (size_t c = 0; c < remaining; c++) term->grid->cur_row->cells[term->grid->cursor.point.col + c].attrs.clean = 0; term->grid->cur_row->dirty = true; /* Erase the remainder of the line */ const struct coord *cursor = &term->grid->cursor.point; term_erase( term, cursor->row, cursor->col + remaining, cursor->row, term->cols - 1); term->grid->cursor.lcf = false; break; } case '@': { /* ICH: insert character(s) */ /* Number of characters to insert */ int count = min( vt_param_get(term, 0, 1), term->cols - term->grid->cursor.point.col); /* Characters to move */ int remaining = term->cols - (term->grid->cursor.point.col + count); /* Push existing characters */ memmove(&term->grid->cur_row->cells[term->grid->cursor.point.col + count], &term->grid->cur_row->cells[term->grid->cursor.point.col], remaining * sizeof(term->grid->cur_row->cells[0])); for (size_t c = 0; c < remaining; c++) term->grid->cur_row->cells[term->grid->cursor.point.col + count + c].attrs.clean = 0; term->grid->cur_row->dirty = true; /* Erase (insert space characters) */ const struct coord *cursor = &term->grid->cursor.point; term_erase( term, cursor->row, cursor->col, cursor->row, cursor->col + count - 1); term->grid->cursor.lcf = false; break; } case 'S': { const struct scroll_region *r = &term->scroll_region; int amount = min(vt_param_get(term, 0, 1), r->end - r->start); term_scroll(term, amount); break; } case 'T': { const struct scroll_region *r = &term->scroll_region; int amount = min(vt_param_get(term, 0, 1), r->end - r->start); term_scroll_reverse(term, amount); break; } case 'X': { /* Erase chars */ int count = min( vt_param_get(term, 0, 1), term->cols - term->grid->cursor.point.col); const struct coord *cursor = &term->grid->cursor.point; term_erase( term, cursor->row, cursor->col, cursor->row, cursor->col + count - 1); term->grid->cursor.lcf = false; break; } case 'I': { /* CHT - Tab Forward (param is number of tab stops to move through) */ for (int i = 0; i < vt_param_get(term, 0, 1); i++) { int new_col = term->cols - 1; tll_foreach(term->tab_stops, it) { if (it->item > term->grid->cursor.point.col) { new_col = it->item; break; } } xassert(new_col >= term->grid->cursor.point.col); bool lcf = term->grid->cursor.lcf; term_cursor_right(term, new_col - term->grid->cursor.point.col); term->grid->cursor.lcf = lcf; } break; } case 'Z': /* CBT - Back tab (param is number of tab stops to move back through) */ for (int i = 0; i < vt_param_get(term, 0, 1); i++) { int new_col = 0; tll_rforeach(term->tab_stops, it) { if (it->item < term->grid->cursor.point.col) { new_col = it->item; break; } } xassert(term->grid->cursor.point.col >= new_col); term_cursor_left(term, term->grid->cursor.point.col - new_col); } break; case 'h': /* Set mode */ switch (vt_param_get(term, 0, 0)) { case 2: /* Keyboard Action Mode - AM */ LOG_WARN("unimplemented: keyboard action mode (AM)"); break; case 4: /* Insert Mode - IRM */ term->insert_mode = true; term_update_ascii_printer(term); break; case 12: /* Send/receive Mode - SRM */ LOG_WARN("unimplemented: send/receive mode (SRM)"); break; case 20: /* Automatic Newline Mode - LNM */ /* TODO: would be easy to implemented; when active * term_linefeed() would _also_ do a * term_carriage_return() */ LOG_WARN("unimplemented: automatic newline mode (LNM)"); break; } break; case 'l': /* Reset mode */ switch (vt_param_get(term, 0, 0)) { case 4: /* Insert Mode - IRM */ term->insert_mode = false; term_update_ascii_printer(term); break; case 2: /* Keyboard Action Mode - AM */ case 12: /* Send/receive Mode - SRM */ case 20: /* Automatic Newline Mode - LNM */ break; } break; case 'r': { int start = vt_param_get(term, 0, 1); int end = min(vt_param_get(term, 1, term->rows), term->rows); if (end > start) { /* 1-based */ term->scroll_region.start = start - 1; term->scroll_region.end = end; term_cursor_home(term); LOG_DBG("scroll region: %d-%d", term->scroll_region.start, term->scroll_region.end); } break; } case 's': term_save_cursor(term); break; case 'u': term_restore_cursor(term, &term->grid->saved_cursor); break; case 't': { /* * Window operations */ const unsigned param = vt_param_get(term, 0, 0); switch (param) { case 1: LOG_WARN("unimplemented: de-iconify"); break; case 2: LOG_WARN("unimplemented: iconify"); break; case 3: LOG_WARN("unimplemented: move window to pixel position"); break; case 4: LOG_WARN("unimplemented: resize window in pixels"); break; case 5: LOG_WARN("unimplemented: raise window to front of stack"); break; case 6: LOG_WARN("unimplemented: raise window to back of stack"); break; case 7: LOG_WARN("unimplemented: refresh window"); break; case 8: LOG_WARN("unimplemented: resize window in chars"); break; case 9: LOG_WARN("unimplemented: maximize/unmaximize window"); break; case 10: LOG_WARN("unimplemented: to/from full screen"); break; case 20: LOG_WARN("unimplemented: report icon label"); break; case 21: LOG_WARN("unimplemented: report window title"); break; case 24: LOG_WARN("unimplemented: resize window (DECSLPP)"); break; case 11: /* report if window is iconified */ /* We don't know - always report *not* iconified */ /* 1=not iconified, 2=iconified */ term_to_slave(term, "\033[1t", 4); break; case 13: { /* report window position */ /* We don't know our position - always report (0,0) */ static const char reply[] = "\033[3;0;0t"; switch (vt_param_get(term, 1, 0)) { case 0: /* window position */ case 2: /* text area position */ term_to_slave(term, reply, sizeof(reply) - 1); break; default: UNHANDLED(); break; } break; } case 14: { /* report window size in pixels */ int width = -1; int height = -1; switch (vt_param_get(term, 1, 0)) { case 0: /* text area size */ width = term->width - term->margins.left - term->margins.right; height = term->height - term->margins.top - term->margins.bottom; break; case 2: /* window size */ width = term->width; height = term->height; break; default: UNHANDLED(); break; } if (width >= 0 && height >= 0) { char reply[64]; size_t n = xsnprintf(reply, sizeof(reply), "\033[4;%d;%dt", height / term->scale, width / term->scale); term_to_slave(term, reply, n); } break; } case 15: /* report screen size in pixels */ tll_foreach(term->window->on_outputs, it) { char reply[64]; size_t n = xsnprintf(reply, sizeof(reply), "\033[5;%d;%dt", it->item->dim.px_scaled.height, it->item->dim.px_scaled.width); term_to_slave(term, reply, n); break; } if (tll_length(term->window->on_outputs) == 0) term_to_slave(term, "\033[5;0;0t", 8); break; case 16: { /* report cell size in pixels */ char reply[64]; size_t n = xsnprintf(reply, sizeof(reply), "\033[6;%d;%dt", term->cell_height / term->scale, term->cell_width / term->scale); term_to_slave(term, reply, n); break; } case 18: { /* text area size in chars */ char reply[64]; size_t n = xsnprintf(reply, sizeof(reply), "\033[8;%d;%dt", term->rows, term->cols); term_to_slave(term, reply, n); break; } case 19: { /* report screen size in chars */ tll_foreach(term->window->on_outputs, it) { char reply[64]; size_t n = xsnprintf(reply, sizeof(reply), "\033[9;%d;%dt", it->item->dim.px_real.height / term->cell_height / term->scale, it->item->dim.px_real.width / term->cell_width / term->scale); term_to_slave(term, reply, n); break; } if (tll_length(term->window->on_outputs) == 0) term_to_slave(term, "\033[9;0;0t", 8); break; } case 22: { /* push window title */ /* 0 - icon + title, 1 - icon, 2 - title */ unsigned what = vt_param_get(term, 1, 0); if (what == 0 || what == 2) { tll_push_back( term->window_title_stack, xstrdup(term->window_title)); } break; } case 23: { /* pop window title */ /* 0 - icon + title, 1 - icon, 2 - title */ unsigned what = vt_param_get(term, 1, 0); if (what == 0 || what == 2) { if (tll_length(term->window_title_stack) > 0) { char *title = tll_pop_back(term->window_title_stack); term_set_window_title(term, title); free(title); } } break; } case 1001: { } default: LOG_DBG("ignoring %s", csi_as_string(term, final, -1)); break; } break; } case 'n': { if (term->vt.params.idx > 0) { int param = vt_param_get(term, 0, 0); switch (param) { case 5: /* Query device status */ term_to_slave(term, "\x1b[0n", 4); /* "Device OK" */ break; case 6: { /* u7 - cursor position query */ int row = term->origin == ORIGIN_ABSOLUTE ? term->grid->cursor.point.row : term->grid->cursor.point.row - term->scroll_region.start; /* TODO: we use 0-based position, while the xterm * terminfo says the receiver of the reply should * decrement, hence we must add 1 */ char reply[64]; size_t n = xsnprintf(reply, sizeof(reply), "\x1b[%d;%dR", row + 1, term->grid->cursor.point.col + 1); term_to_slave(term, reply, n); break; } default: UNHANDLED(); break; } } else UNHANDLED(); break; } default: UNHANDLED(); break; } break; /* private[0] == 0 */ } case '?': { switch (final) { case 'h': /* DECSET - DEC private mode set */ for (size_t i = 0; i < term->vt.params.idx; i++) decset(term, term->vt.params.v[i].value); break; case 'l': /* DECRST - DEC private mode reset */ for (size_t i = 0; i < term->vt.params.idx; i++) decrst(term, term->vt.params.v[i].value); break; case 's': for (size_t i = 0; i < term->vt.params.idx; i++) xtsave(term, term->vt.params.v[i].value); break; case 'r': for (size_t i = 0; i < term->vt.params.idx; i++) xtrestore(term, term->vt.params.v[i].value); break; case 'S': { unsigned target = vt_param_get(term, 0, 0); unsigned operation = vt_param_get(term, 1, 0); switch (target) { case 1: switch (operation) { case 1: sixel_colors_report_current(term); break; case 2: sixel_colors_reset(term); break; case 3: sixel_colors_set(term, vt_param_get(term, 2, 0)); break; case 4: sixel_colors_report_max(term); break; default: UNHANDLED(); break; } break; case 2: switch (operation) { case 1: sixel_geometry_report_current(term); break; case 2: sixel_geometry_reset(term); break; case 3: sixel_geometry_set(term, vt_param_get(term, 2, 0), vt_param_get(term, 3, 0)); break; case 4: sixel_geometry_report_max(term); break; default: UNHANDLED(); break; } break; default: UNHANDLED(); break; } break; } case 'u': { enum kitty_kbd_flags flags = term->grid->kitty_kbd.flags[term->grid->kitty_kbd.idx]; char reply[8]; int chars = snprintf(reply, sizeof(reply), "\033[?%uu", flags); term_to_slave(term, reply, chars); break; } default: UNHANDLED(); break; } break; /* private[0] == '?' */ } case '>': { switch (final) { case 'c': /* Send Device Attributes (Secondary DA) */ if (vt_param_get(term, 0, 0) != 0) { UNHANDLED(); break; } /* * Param 1 - terminal type: * 0 - vt100 * 1 - vt220 * 2 - vt240 * 18 - vt330 * 19 - vt340 * 24 - vt320 * 41 - vt420 * 61 - vt510 * 64 - vt520 * 65 - vt525 * * Param 2 - firmware version * xterm uses its version number. We use an xterm * version number too, since e.g. Emacs uses this to * determine level of support. * * We report ourselves as a VT220. This must be * synchronized with the primary DA response. * * Note: tertiary DA replies with "FOOT". */ static_assert(FOOT_MAJOR < 100, "Major version must not exceed 99"); static_assert(FOOT_MINOR < 100, "Minor version must not exceed 99"); static_assert(FOOT_PATCH < 100, "Patch version must not exceed 99"); char reply[64]; size_t n = xsnprintf(reply, sizeof(reply), "\033[>1;%02u%02u%02u;0c", FOOT_MAJOR, FOOT_MINOR, FOOT_PATCH); term_to_slave(term, reply, n); break; case 'm': if (term->vt.params.idx == 0) { /* Reset all */ } else { int resource = vt_param_get(term, 0, 0); int value = vt_param_get(term, 1, -1); switch (resource) { case 0: /* modifyKeyboard */ break; case 1: /* modifyCursorKeys */ case 2: /* modifyFunctionKeys */ /* Ignored, we always report modifiers */ if (value != 2 && value != -1) { LOG_WARN( "unimplemented: %s = %d", resource == 1 ? "modifyCursorKeys" : resource == 2 ? "modifyFunctionKeys" : "<invalid>", value); } break; case 4: /* modifyOtherKeys */ term->modify_other_keys_2 = value == 2; LOG_DBG("modifyOtherKeys=%d", value); break; default: LOG_WARN("invalid resource %d in %s", resource, csi_as_string(term, final, -1)); break; } } break; /* final == 'm' */ case 'u': { int flags = vt_param_get(term, 0, 0) & KITTY_KBD_SUPPORTED; struct grid *grid = term->grid; uint8_t idx = grid->kitty_kbd.idx; if (idx + 1 >= ALEN(grid->kitty_kbd.flags)) { /* Stack full, evict oldest by wrapping around */ idx = 0; } else idx++; grid->kitty_kbd.flags[idx] = flags; grid->kitty_kbd.idx = idx; LOG_DBG("kitty kbd: pushed new flags: 0x%03x", flags); break; } case 'q': { /* XTVERSION */ if (vt_param_get(term, 0, 0) != 0) { UNHANDLED(); break; } char reply[64]; size_t n = xsnprintf( reply, sizeof(reply), "\033P>|foot(%u.%u.%u%s%s)\033\\", FOOT_MAJOR, FOOT_MINOR, FOOT_PATCH, FOOT_EXTRA[0] != '\0' ? "-" : "", FOOT_EXTRA); term_to_slave(term, reply, n); break; } default: UNHANDLED(); break; } break; /* private[0] == '>' */ } case '<': { switch (final) { case 'u': { int count = vt_param_get(term, 0, 1); LOG_DBG("kitty kbd: popping %d levels of flags", count); struct grid *grid = term->grid; uint8_t idx = grid->kitty_kbd.idx; for (int i = 0; i < count; i++) { /* Reset flags. This ensures we get flags=0 when * over-popping */ grid->kitty_kbd.flags[idx] = 0; if (idx == 0) idx = ALEN(grid->kitty_kbd.flags) - 1; else idx--; } grid->kitty_kbd.idx = idx; LOG_DBG("kitty kbd: flags after pop: 0x%03x", term->grid->kitty_kbd.flags[idx]); break; } } break; /* private[0] == ‘<’ */ } case ' ': { switch (final) { case 'q': { int param = vt_param_get(term, 0, 0); switch (param) { case 0: /* blinking block, but we use it to reset to configured default */ term->cursor_style = term->conf->cursor.style; term->cursor_blink.deccsusr = term->conf->cursor.blink; term_cursor_blink_update(term); break; case 1: /* blinking block */ case 2: /* steady block */ term->cursor_style = CURSOR_BLOCK; break; case 3: /* blinking underline */ case 4: /* steady underline */ term->cursor_style = CURSOR_UNDERLINE; break; case 5: /* blinking bar */ case 6: /* steady bar */ term->cursor_style = CURSOR_BEAM; break; default: UNHANDLED(); break; } if (param > 0 && param <= 6) { term->cursor_blink.deccsusr = param & 1; term_cursor_blink_update(term); } break; } default: UNHANDLED(); break; } break; /* private[0] == ' ' */ } case '!': { if (final == 'p') { term_reset(term, false); break; } UNHANDLED(); break; /* private[0] == '!' */ } case '=': { switch (final) { case 'c': if (vt_param_get(term, 0, 0) != 0) { UNHANDLED(); break; } /* * Send Device Attributes (Tertiary DA) * * Reply format is "DCS ! | DDDDDDDD ST" * * D..D is the unit ID of the terminal, consisting of four * hexadecimal pairs. The first pair represents the * manufacturing site code. This code can be any * hexadecimal value from 00 through FF. */ term_to_slave(term, "\033P!|464f4f54\033\\", 14); /* FOOT */ break; case 'u': { int flag_set = vt_param_get(term, 0, 0) & KITTY_KBD_SUPPORTED; int mode = vt_param_get(term, 1, 1); struct grid *grid = term->grid; uint8_t idx = grid->kitty_kbd.idx; switch (mode) { case 1: /* set bits are set, unset bits are reset */ grid->kitty_kbd.flags[idx] = flag_set; break; case 2: /* set bits are set, unset bits are left unchanged */ grid->kitty_kbd.flags[idx] |= flag_set; break; case 3: /* set bits are reset, unset bits are left unchanged */ grid->kitty_kbd.flags[idx] &= ~flag_set; break; default: UNHANDLED(); break; } LOG_DBG("kitty kbd: flags after update: 0x%03x", grid->kitty_kbd.flags[idx]); break; } default: UNHANDLED(); break; } break; /* private[0] == '=' */ } case 0x243f: /* ?$ */ switch (final) { case 'p': { unsigned param = vt_param_get(term, 0, 0); /* * Request DEC private mode (DECRQM) * Reply: * 0 - not recognized * 1 - set * 2 - reset * 3 - permanently set * 4 - permantently reset */ bool enabled; unsigned value; if (decrqm(term, param, &enabled)) value = enabled ? 1 : 2; else value = 0; char reply[32]; size_t n = xsnprintf(reply, sizeof(reply), "\033[?%u;%u$y", param, value); term_to_slave(term, reply, n); break; } default: UNHANDLED(); break; } break; /* private[0] == ‘?’ && private[1] == ‘$’ */ default: UNHANDLED(); break; } }
utf-8
1
Expat
Copyright (c) 2018 Daniel Eklöf
phonon-4.11.1/phonon/abstractmediastream.h
/* This file is part of the KDE project Copyright (C) 2007 Matthias Kretz <kretz@kde.org> This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) version 3, or any later version accepted by the membership of KDE e.V. (or its successor approved by the membership of KDE e.V.), Nokia Corporation (or its successors, if any) and the KDE Free Qt Foundation, which shall act as a proxy defined in Section 6 of version 3 of the license. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library. If not, see <http://www.gnu.org/licenses/>. */ #ifndef PHONON_ABSTRACTMEDIASTREAM_H #define PHONON_ABSTRACTMEDIASTREAM_H #include "phonon_export.h" #include "phononnamespace.h" #include <QtCore/QObject> class QByteArray; #ifndef QT_NO_PHONON_ABSTRACTMEDIASTREAM namespace Phonon { class MediaObject; class AbstractMediaStreamPrivate; /** \class AbstractMediaStream abstractmediastream.h phonon/AbstractMediaStream * \brief Base class for custom media data streams. * * Implement this class to provide a custom data stream to the backend. The class supports both, the * push and the pull model. * * Push: * \code * PushStream::PushStream(QObject *parent) * : AbstractMediaStream(parent), m_timer(new QTimer(this)) * { * setStreamSize(getMediaStreamSize()); * * connect(m_timer, SIGNAL(timeout()), SLOT(moreData())); * m_timer->setInterval(0); * } * * void PushStream::moreData() * { * const QByteArray data = getMediaData(); * if (data.isEmpty()) { * endOfData(); * } else { * writeData(data); * } * } * * void PushStream::needData() * { * m_timer->start(); * moreData(); * } * * void PushStream::enoughData() * { * m_timer->stop(); * } * \endcode * * Pull: * \code * PullStream::PullStream(QObject *parent) * : AbstractMediaStream(parent) * { * setStreamSize(getMediaStreamSize()); * } * * void PullStream::needData() * { * const QByteArray data = getMediaData(); * if (data.isEmpty()) { * endOfData(); * } else { * writeData(data); * } * } * \endcode * * \ingroup Playback * \author Matthias Kretz <kretz@kde.org> */ class PHONON_EXPORT AbstractMediaStream : public QObject { Q_OBJECT Q_DECLARE_PRIVATE(AbstractMediaStream) friend class MediaObject; friend class MediaObjectPrivate; friend class StreamInterface; public: virtual ~AbstractMediaStream(); protected: /** * Constructs an AbstractMediaStream object with a \p parent. */ explicit AbstractMediaStream(QObject *parent = 0); /** * Returns the stream size that was set with \ref setStreamSize. * * A negative value means that the length of the stream cannot be known. * * Defaults to \c 0. */ qint64 streamSize() const; /** * Sets the size of the stream in number of bytes. * * A negative value means that the length of the stream cannot be known. * * Defaults to 0. * * This function has to be called. A backend will not call \ref needData() until the * stream size is set. */ void setStreamSize(qint64); /** * Returns whether your data stream is set as seekable. * * Defaults to \c false. */ bool streamSeekable() const; /** * Sets whether your data stream is seekable. * * Defaults to \c false. * * If you set this to \c true you have to implement the \ref seekStream function. */ void setStreamSeekable(bool); /** * Sends the media \p data to the backend for decoding. * * \warning Don't call this function before the first needData() is emitted. */ void writeData(const QByteArray &data); /** * Tells the backend that the media data stream is at its end. * * \warning Don't call this function before the first needData() is emitted. */ void endOfData(); /** * If an I/O error occurs you should call this function to make MediaObject go into * ErrorState. * * \see MediaObject::errorType() * \see MediaObject::errorString() */ void error(Phonon::ErrorType errorType, const QString &errorString); /** * Reimplement this function to reset the stream. Subsequent calls to writeData should start * from the first position of the data unless a seek is requested. * * The function is necessary for the case where a non-seekable MediaStream is * played more than once. For a seekable stream the implementation can simply call * \code * seekStream(0); * \endcode. */ Q_INVOKABLE virtual void reset() = 0; /** * Reimplement this function to be notified when the backend needs data. * * When this function is called you should try to call writeData or endOfData before * returning. */ Q_INVOKABLE virtual void needData() = 0; /** * Reimplement this function to be notified when the backend has enough data and your stream * object may take a break. This method is important for pushing data to the backend in * order to not fill the backend buffer unnecessarily. */ Q_INVOKABLE virtual void enoughData(); /** * Reimplement this function if your stream is seekable. * * When this function is called the next call to writeData has to be at the requested \p * offset. * * \warning Do not call the parent implementation. */ Q_INVOKABLE virtual void seekStream(qint64 offset); AbstractMediaStream(AbstractMediaStreamPrivate &dd, QObject *parent); QScopedPointer<AbstractMediaStreamPrivate> d_ptr; }; } // namespace Phonon #endif //QT_NO_PHONON_ABSTRACTMEDIASTREAM #endif // PHONON_ABSTRACTMEDIASTREAM_H
utf-8
1
LGPL-2.1+3+KDEeV
2010-2011, Casian Andrei <skeletk13@gmail.com> 2009-2010, Colin Guthrie <cguthrie@mandriva.org> 2010, Harald Sitter <apachelogger@ubuntu.com> 2011-2015, Harald Sitter <sitter@kde.org> 2008, Ian Monroe <ian@monroe.nu> 2011, Jakub Spiewak <jmspiewak@gmail.com> 2010, Jean-Baptiste Kempf <jb@videolan.org> 2004-2008, Matthias Kretz <kretz@kde.org> 2010, Nokia Corporation and/or its subsidiary(-ies) 2009, Nokia Corporation and/or its subsidiary(-ies) <thierry.bastian@trolltech.com> 2006-2008, Ricardo Villalba <rvm@escomposlinux.org> 2010-2012, Trever Fischer <tdfischer@fedoraproject.org> 2011, Trever Fischer <tdfischer@kde.org> 2009, Nokia Corporation and/or its subsidiary(-ies)
mono-6.8.0.105+dfsg/external/boringssl/include/openssl/cpu.h
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). */ #ifndef OPENSSL_HEADER_CPU_H #define OPENSSL_HEADER_CPU_H #include <openssl/base.h> #if defined(__cplusplus) extern "C" { #endif /* Runtime CPU feature support */ #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) /* OPENSSL_ia32cap_P contains the Intel CPUID bits when running on an x86 or * x86-64 system. * * Index 0: * EDX for CPUID where EAX = 1 * Bit 20 is always zero * Bit 28 is adjusted to reflect whether the data cache is shared between * multiple logical cores * Bit 30 is used to indicate an Intel CPU * Index 1: * ECX for CPUID where EAX = 1 * Bit 11 is used to indicate AMD XOP support, not SDBG * Index 2: * EBX for CPUID where EAX = 7 * Index 3 is set to zero. * * Note: the CPUID bits are pre-adjusted for the OSXSAVE bit and the YMM and XMM * bits in XCR0, so it is not necessary to check those. */ extern uint32_t OPENSSL_ia32cap_P[4]; #endif #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) #if defined(OPENSSL_APPLE) /* iOS builds use the static ARM configuration. */ #define OPENSSL_STATIC_ARMCAP #endif #if !defined(OPENSSL_STATIC_ARMCAP) /* CRYPTO_is_NEON_capable_at_runtime returns true if the current CPU has a NEON * unit. Note that |OPENSSL_armcap_P| also exists and contains the same * information in a form that's easier for assembly to use. */ OPENSSL_EXPORT char CRYPTO_is_NEON_capable_at_runtime(void); /* CRYPTO_is_NEON_capable returns true if the current CPU has a NEON unit. If * this is known statically then it returns one immediately. */ static inline int CRYPTO_is_NEON_capable(void) { /* Only statically skip the runtime lookup on aarch64. On arm, one CPU is * known to have a broken NEON unit which is known to fail with on some * hand-written NEON assembly. For now, continue to apply the workaround even * when the compiler is instructed to freely emit NEON code. See * https://crbug.com/341598 and https://crbug.com/606629. */ #if defined(__ARM_NEON__) && !defined(OPENSSL_ARM) return 1; #else return CRYPTO_is_NEON_capable_at_runtime(); #endif } #if defined(OPENSSL_ARM) /* CRYPTO_has_broken_NEON returns one if the current CPU is known to have a * broken NEON unit. See https://crbug.com/341598. */ OPENSSL_EXPORT int CRYPTO_has_broken_NEON(void); #endif /* CRYPTO_is_ARMv8_AES_capable returns true if the current CPU supports the * ARMv8 AES instruction. */ int CRYPTO_is_ARMv8_AES_capable(void); /* CRYPTO_is_ARMv8_PMULL_capable returns true if the current CPU supports the * ARMv8 PMULL instruction. */ int CRYPTO_is_ARMv8_PMULL_capable(void); #else static inline int CRYPTO_is_NEON_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_NEON) || defined(__ARM_NEON__) return 1; #else return 0; #endif } static inline int CRYPTO_is_ARMv8_AES_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_AES) return 1; #else return 0; #endif } static inline int CRYPTO_is_ARMv8_PMULL_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_PMULL) return 1; #else return 0; #endif } #endif /* OPENSSL_STATIC_ARMCAP */ #endif /* OPENSSL_ARM || OPENSSL_AARCH64 */ #if defined(__cplusplus) } /* extern C */ #endif #endif /* OPENSSL_HEADER_CPU_H */
utf-8
1
MIT
© 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald © 2000 Intel Corporation. All rights reserved. © 2001 Andreas Jonsson © 2001 Andrew Sutton © 2001 Bob Smith. http://www.thestuff.net © 2001 Chris Hynes © 2001 Christopher Podurgiel © 2001 Daniel Weber © 2001 David Dawkins © 2001 Derek Holden (dholden@draper.com) © 2001 Garrett Rooney (rooneg@electricjellyfish.net) © 2001 John Barnette © 2001 John R. Hicks (angryjohn69@nc.rr.com) © 2001 Krister Hansson © 2001 Mads Pultz © 2001 Marcel Narings © 2001 Martin Weindel © 2001 Matthew S. Ford © 2001 Michael Lambert, All Rights Reserved © 2001 Moonlight Enterprises, All Rights Reserved © 2001 Phillip Pearson (http://www.myelin.co.nz) © 2001 Radek Doulik © 2001 Ricardo Fernández Pascual © 2001 Scott Sanders © 2001 Wictor Wilén (wictor@iBizkit.se) © 2001-2002 Jason Diamond http://injektilo.org/ © 2001-2002 Marcin Szczepanski © 2001-2002 Mike Kestner © 2001-2002 Nick Drochak II © 2001-2002 Southern Storm Software, Pty Ltd. © 2001-2002 Vladimir Vukicevic (vladimir@pobox.com) © 2001-2002 Wild West Software © 2001-2003 Ximian, Inc. http://www.ximian.com © 2002 Alejandro Sánchez Acosta <raciel@es.gnu.org> © 2002 Ameya Sailesh Gargesh (ameya_13@yahoo.com) © 2002 Brian Ritchie © 2002 Chew Keong TAN © 2002 Chris J Breisch © 2002 Dan Lewis © 2002 Daniel Stodden <stodden@in.tum.de> © 2002 Duco Fijma © 2002 Franklin Wise © 2002 Free Software Foundation © 2002 Gaurav Vaish © 2002 Jaime Anguiano Olarra © 2002 John Donagher © 2002 Jon Guymon © 2002 Kevin Winchester © 2002 Kral Ferch © 2002 Lawrence Pit © 2002 Martin Adoue © 2002 Martin Baulig © 2002 Matt Hunter © 2002 Miguel de Icaza © 2002 Owen Brady (Ocean at owenbrady dot net) © 2002 Piers Haken © 2002 Rodrigo Moya © 2002 Stuart Caborn © 2002 Ulrich Kunitz © 2002-2003 Dave Bettin © 2002-2003 Eduardo Garcia Cebollero <kiwnix@yahoo.es> © 2002-2003 Greg Reinacker, Reinacker & Associates, Inc. All rights reserved © 2002-2003 Jackson Harper, All rights reserved © 2002-2003 Sergey Chaban (serge@wildwestsoftware.com) © 2002-2003 Ville Palo © 2002-2004 Motus Technologies Inc. (http://www.motus.com) © 2002-2004 Neale Ferguson © 2002-2004 Tim Coleman © 2002-2005 Cesar Lopez Nataren © 2002-2005 Lluis Sanchez Gual © 2002-2005 Rafael Teixeira © 2002-2008 Daniel Morgan © 2002-2008 Mainsoft Corporation. © 2002-2011 Novell, Inc (http://www.novell.com) © 2003 Aleksey Sanin (aleksey@aleksey.com) © 2003 Alexandre Pigolkine (pigolkine@gmx.de) © 2003 Brad Fitzpatrick © 2003 Dominik Fretz © 2003 Duncan Mak, Ximian Inc. © 2003 Eric Glass © 2003 Erik LeBel © 2003 Gilles Freart © 2003 Ian MacLean © 2003 Jean-Marc André <jean-marc.andre@polymtl.ca> © 2003 Jochen Wezel (CompuMaster GmbH) © 2003 Johannes Roith <johannes@jroith.de> © 2003 Joshua Tauberer © 2003 Latitude Geographics Group, All rights reserved © 2003 Lee Mallabone <gnome@fonicmonkey.net> © 2003 Martin Willemoes Hansen © 2003 Oleg Tkachenko © 2003 Patrick Kalkman © 2003 Patrik Torstensson © 2003 Pedro Martínez Juliá <yoros@wanadoo.es> © 2003 Peter Van Isacker © 2003 Phillip Jerkins © 2003 PT Cakram Datalingga Duaribu http://www.cdl2000.com © 2003 Stefan Görling © 2003 The Mentalis.org Team (http://www.mentalis.org/) © 2003 Thong (Tum) Nguyen [tum@veridicus.com] © 2003-2004 Andreas Nahr © 2003-2004 Atsushi Enomoto © 2003-2004 Ben Maurer © 2003-2004 Bernie Solomon © 2003-2004 Carlos Guzman Alvarez © 2003-2004 Todd Berman © 2003-2007 Alp Toker <alp@atoker.com> © 2003-2007 Juraj Skripsky © 2003-2008 Jonathan Pryor © 2003-2008 Niels Kokholm © 2003-2008 Peter Sestoft © 2004 Edd Dumbill © 2004 Everaldo Canuto © 2004 IT+ A/S (http://www.itplus.dk) © 2004 Ivan Hamilton © 2004 Luca Barbieri © 2004 Matthijs ter Woord © 2004 Punit Todi © 2004-2005 Geoff Norton. © 2004-2006 Jaroslaw Kowalski © 2004-2006 John Luke © 2004-2008 Gert Driesen © 2004-2008 HotFeet GmbH (http://www.hotfeet.ch) © 2004-2013 Charlie Poole © 2005 akiramei (mei@work.email.ne.jp) © 2005 Carlo Kok (ck@carlo-kok.com) © 2005 David Waite (mass@akuma.org) © 2005 Hubert FONGARNAND © 2005 Iain McCoy © 2005 Senganal T © 2005 Sharif FarsiWeb, Inc. (http://www.farsiweb.info) © 2005 Voelcker Informatik AG © 2005-2007 Marek Sieradzki © 2005-2008 Jiri George Moudry © 2005-2008 Kornél Pál © 2005-2015 Jb Evain (http://evain.net) © 2006 Alexander Olk © 2006 Broadcom © 2006 Bruno Haible © 2006 Evaluant RC S.A © 2006 Forcom (http://www.forcom.com.pl/) © 2006 Marek Habersack © 2006 Matt Hargett © 2006 Patrick Earl © 2006 Sergey Tikhonov (tsv@solvo.ru) © 2006 Sridhar Kulkarni © 2006-2007 Dmitry S. Kataev © 2006-2009 Daniel Nauck © 2006-2009 Jonathan Chambers © 2007 Andreas Noever © 2007 Dean Brettle © 2007 Marcos Cobena (http://www.youcannoteatbits.org/) © 2007 Randolph Chung © 2007-2008 Andreas Faerber © 2007-2008 Andreia Gaita © 2007-2008 Ivan N. Zlatev © 2007-2008 Pascal Craponne © 2007-2008 Stefan Klinger © 2008 Andy Hume © 2008 db4objects, Inc. (http://www.db4o.com) © 2008 Eric Butler © 2008 George Giolfan © 2008 James Fitzsimons © 2008 Michael Barker © 2008 Realtime Worlds Ltd © 2008-2009 Jérémie "Garuma" Laval © 2009 Aaron Bockover © 2009 Craig Sutherland © 2009 Eric Maupin (http://www.ermau.com) © 2009 Federico Di Gregorio © 2009 leppie (http://xacc.wordpress.com/) © 2009 Olivier Dufour olivier(dot)duff(at)gmail(dot)com © 2011 Rodrigo Kumpera © 2011-2018 Xamarin Inc (http://www.xamarin.com) © 2015 Steffen Kiess © 2015-2016 Quamotion (http://quamotion.mobi) © 2015-2018 .NET Foundation © 2016 Unity Technologies (https://www.unity3d.com) © 2016 Dot net foundation. © 2016-2018 Microsoft
icom-20120228/.pc/fix-compile.patch/icom.c
/* * Program to control ICOM radios * * Main program */ #include "icom.h" #include <ctype.h> #ifndef MSDOS /* include for Unix */ #include <sys/stat.h> #include <fcntl.h> #include <termios.h> #include <time.h> #endif /* MSDOS */ /* * Module definitions */ #define ARGMAX 20 /* maximum number of command args */ #define DICOM "/dev/icom" /* CI-V serial port device */ /* * External functions */ extern FILE *fopen(); extern char *strtok(), *strcpy(); extern char *optarg; extern int optind, opterr; extern double freqdouble(u_char *, int); /* * Local function prototypes */ static int getline(char *); static int argchan(struct icom *, struct chan *, char *); static int argbank(struct icom *, struct chan *, char *); static int setswitch(struct icom *, struct cmdtable *, int); static int setswitch2(struct icom *, struct cmdtable *, int); static int setswitch3(struct icom *, struct cmdtable *, int, int); static int sw_keypad(void); static int sw_keybd(void); char *capname(int, struct cmdtable *); static char *capdescr(char *, struct cmdtable *); int capkey(char *, struct cmdtable *); double fabs(double); static void printch(struct icom *, char *); static int readmeter(struct icom *, struct cmdtable *, int, char *); static void perr(int); static int qqsv(struct icom *, struct cmdtable *); static void banner(struct icom *); /* * Global variables */ int fd_icom; /* CI-V device */ struct icom *rptr = NULL; /* radio structure pointer */ int flags; /* radio flags */ int pflags; /* program flags */ static char defarg[LINMAX]; /* default arguments */ static char args[LINMAX]; /* command line */ static char *argv[ARGMAX]; /* command line args */ static int argn; /* number of command line args */ static int argi; /* command args index */ static FILE *fp_cmd[FPMAX] = {NULL}; /* include file stack */ static int fp = 0; /* include file statck index */ static char *updown = " +- "; /* channel dinkle */ #ifndef MSDOS static int fd; /* terminal control file descriptor */ static struct termios terma, termb; /* Unix terminal interface */ extern int errno; #endif /* MSDOS */ /* * Main program */ int main( int argc, /* number of arguments */ char **argcv /* vector of argument pointers */ ) { char chr; /* char temp */ char *ptr; /* fiddles */ int i, temp; /* int temps */ double freq; /* double temp */ struct icom *rp; /* radio structure pointer */ struct chan *cp; /* memory channel pointer */ char s1[LINMAX]; /* string buffer */ FILE *fp_temp; /* * Initialize */ flags = pflags = 0; *defarg = '\0'; rptr = NULL; #ifndef MSDOS /* * Process command-line arguments */ if ((fd_icom = open(DICOM, O_RDWR, 0777)) < 0) { printf("*** Unable to open serial port %s\n", DICOM); exit(1); } while ((temp = getopt(argc, argcv, "c:df:g:km:o:r:")) != -1) { switch (temp) { /* * -d - debug trace */ case 'd': pflags |= P_TRACE | P_ERMSG; continue; /* * -f <file> - open command file */ case 'f': if ((fp_temp = fopen(optarg, "r")) == NULL) { perr(R_IO); exit(R_IO); } fp++; fp_cmd[fp] = fp_temp; continue; /* * -k - select keypad mode */ case 'k': pflags |= P_PAD; continue; /* * -r <radio> - select radio. Use default bit rate. */ case 'r': temp = capkey(optarg, identab); if (temp < 0) { perr(temp); exit(temp); } rptr = select_radio(temp, 0); if (rptr == NULL) { perr(R_RAD); exit(R_RAD); } continue; } /* * The remaining options are valid only after a radio * has been selected. If any are selected, the program * exits after executing the command line options. */ if (rptr == NULL) { perr(R_DEF); exit(R_DEF); } rp = rptr; cp = &rp->chan; switch (temp) { /* * -c <chan> - set bank, channel * * This is the same as the chan keyboard command. */ case 'c': pflags |= P_EXIT; temp = argchan(rp, cp, optarg); if (temp < 0) { perr(temp); continue; } temp = readchan(rp); if (temp < 0) perr(temp); continue; /* * -g <freq> - set frequency * * This is the same as the default frequency keyboard * command. */ case 'g': pflags |= P_EXIT; if (sscanf(optarg, "%lf", &freq) != 1) { perr(R_FMT); continue; } if (freq > 1000) freq /= 1000; temp = loadfreq(rp, freq); if (temp < 0) perr(temp); continue; /* * -m <mode> - set mode * * This is the same as the mode keybard command. Note, * this option must precede the -g option for older * radios. */ case 'm': pflags |= P_EXIT; temp = capkey(optarg, rp->modetab); if (temp < 0) { perr(temp); continue; } temp = loadmode(rp->ident, temp); if (temp < 0) perr(temp); continue; } } /* * If a radio was found, initialize it. If its settings were * changed and a command file is not open, assume this is run * from a script and nothing more needs to be done. */ if (pflags & P_EXIT) exit(0); if (pflags & P_PAD) { if (sw_keypad()) pflags &= ~P_PAD; } #endif /* MSDOS */ /* * Main loop */ while (1) { flags &= ~F_CACHE; pflags &= ~(P_DISP | P_DSPCH | P_DSPST | P_KEYP | P_ESC); if (pflags & P_PAD) { /* * Keypad mode. Keypad commands begin with a * sequence of digits and special characters and * end with a single letter, ANSI escape * sequence or '\n', which specifies the * function. Escape sequences consist of ESC * followed by '[' and either a letter or * sequence of digits followed by '~', which is * not part of the command. Help '?' displays a * list of command names and descriptions. */ printf(">"); ptr = s1; *ptr = *args = '\0'; while (1) { chr = (char)getchar(); if (chr == KILL) { printf(" XXX\n>"); ptr = s1; *ptr = *args = '\0'; continue; } if (chr == ESC) { pflags |= P_ESC; *ptr = '\0'; ptr = args; continue; } if (pflags & P_ESC) { if (chr == '~' || chr == '\n') break; *ptr++ = chr; if (isalpha(chr)) break; continue; } if (!isalpha(chr) && !iscntrl(chr) && chr != '?' && chr != ' ') { pflags |= P_KEYP; *ptr++ = chr; putchar(chr); continue; } *ptr = '\0'; ptr = args; if (chr != '\n') { pflags |= P_KEYP; *ptr++ = chr; putchar(chr); } if (pflags & P_KEYP) putchar('\n'); break; } /* * Rewrite the line with the command letter * first followed by the argument, then pretend * it a keyboard command. */ *ptr = '\0'; strcat(args, " "); strcat(args, s1); argn = getline(args); argi = 0; temp = command(rptr, key); } else { /* * Keyboard mode. Get the next command line and * parse the tokens separated by whitespace. * Ignore '#' and the rest of the line. This is * handy for command script annotations. */ if (fp_cmd[fp] != NULL) { if (fgets(args, LINMAX, fp_cmd[fp]) == NULL) { close(fp_cmd[fp]); fp--; continue; } else { printf("%s", args); } } else { printf("icom>"); if (gets(args) == NULL) exit(0); } if (*args == '#') continue; argn = getline(args); argi = 0; temp = command(rptr, cmd); } perr(temp); if (rptr == NULL) continue; /* * Update chan, freq, mode. */ rp = rptr; cp = &rp->chan; if (pflags & (P_DISP | P_DSPCH | P_DSPST)) { printch(rp, s1); printf("%s\n", s1); } } } /* * Process each command in the line in turn. */ int command( struct icom *rp, /* radio pointer */ struct cmdtable *cmdop /* command table pointer */ ) { int rval; rval = R_OK; if (argn == 0) { pflags |= P_DISP; return (rval); } while (argn - argi > 0) { rval = qqsv(rptr, cmdop); if (rval < 0) break; argi++; } return (rval); } /* * Execute command * * This routine executes a command consisting of a single line beginning * with a command token followed by argument tokens. Some commands cause * this routine to be invoked recursively. In each case the recursive * call points to a command token. */ int qqsv( struct icom *rp, /* radio pointer */ struct cmdtable *cmdop /* command table pointer */ ) { FILE *fp_in; /* data input file */ FILE *fp_out; /* data output file */ char s1[LINMAX]; /* string buffer */ char s2[LINMAX]; /* string buffer */ u_char rsp[BMAX]; /* radio response */ u_char *ptr; /* fiddles */ int i, temp, sw, rval; /* int temps */ FILE *fp_temp; double freq, step, dtemp; /* double temps */ struct chan *cp; /* memory channel pointer */ u_char cmdempty[] = {V_EMPTY, FI}; u_char cmdctl[] = {0, 0, 0, 0, FI}; u_char cmdvfom[] = {V_VFOM, FI}; u_char cmdwrite[] = {V_WRITE, FI}; u_char cmdtx[] = {V_TX, 0x00, 0x01, FI}; u_char cmdrx[] = {V_TX, 0x00, 0x00, FI}; u_char cmdvfoa[] = {V_SVFO, 0x00, FI}; u_char cmdvfob[] = {V_SVFO, 0x01, FI}; u_char cmdsplit[] = {V_SPLIT, 0x00, FI}; u_char cmdswap[] = {V_SVFO, 0xb0, FI}; struct timeval tv; struct tm *tm; /* * For the 756, 7000 and 7800 time comands */ u_char year7000[] = {V_SETW, 0x05, 0x00, 0x39, FI, 0x00, FI}; u_char date7000[] = {V_SETW, 0x05, 0x00, 0x40, FI, 0x00, FI}; u_char time7000[] = {V_SETW, 0x05, 0x00, 0x41, FI, 0x00, FI}; u_char date7800[] = {V_SETW, 0x05, 0x00, 0x59, FI, 0x00, 0x00, 0x00, FI}; u_char time7800[] = {V_SETW, 0x05, 0x00, 0x60, FI, 0x00, FI}; u_char time756[] = {V_SETW, 0x05, 0x16, FI, 0x00, FI}; /* * Ignore selected noise strings. */ rval = R_OK; if (strcmp(argv[argi], "MHz") == 0 || strcmp(argv[argi], "kHz") == 0 || strcmp(argv[argi], "dB") == 0) return(rval); sw = capkey(argv[argi], cmdop); switch (sw) { /* * radio [ <name> [ <baud> ]] * * Select the <name> radio and CI-V bit rate <baud> for further * commands and display its description and band limits. If * <baud> is missing, use the default from tables. If <name> is * missing and the radio has not been previously defined, the * bus is probed for all known radios, which takes some time. If * previously defined, its description and band limits of are * displayed. */ case C_RADIO: if (argn - argi < 2) { if (rp != NULL) { banner(rp); break; } temp = R_NOR;; for (i = 0; name[i].name[0] != '\0'; i++) { rp = select_radio(name[i].ident, 0); if (rp != NULL) banner(rp); } } else { temp = capkey(argv[++argi], identab); if (temp < 0) return (temp); if (argn - argi < 2) i = 0; else i = capkey(argv[++argi], baud); if (i < 0) return(i); rp = select_radio(temp, i); if (rp != NULL) banner(rp); } if (rptr == NULL) return (R_RAD); pflags |= P_DSPCH; break; /* * include * * include command file. */ case C_INCLD: if (argn - argi < 2) { rval = R_ARG; break; } if ((fp_temp = fopen(argv[++argi], "r")) == NULL) { rval = (R_IO); break; } fp++; fp_cmd[fp] = fp_temp; break; /* * quit * * Quit the dance */ case C_QUIT: exit(0); /* * verbose off | on * * Set verbose mode */ case C_VERB: if (argn - argi < 2) return (R_ARG); temp = capkey(argv[++argi], verbx); if (temp < 0) rval = temp; else pflags = (pflags & ~P_VERB) | temp; return (rval); /* * trace [ all | none | bus | pkt ] * * Set debug flags */ case C_DEBUG: if (argn - argi < 2) return (R_ARG); temp = capkey(argv[++argi], dbx); if (temp < 0) rval = temp; else pflags = (pflags & ~(P_TRACE | P_ERMSG)) | temp; return (rval); /* * pad * * Switch to keypad mode. */ case C_KEYPAD: if (!sw_keypad()) pflags |= P_PAD; return (rval); /* * / (keypad mode) * * Erase input */ case R_ERR: case C_ERASE: return (rval); /* * q (keypad mode) * * Switch to keyboard mode. */ case C_KEYBD: if (!sw_keybd()) pflags &= ~P_PAD; return (rval); } /* * The remaining commands are valid only after a radio has been * selected. */ if (rp == NULL) return (R_DEF); cp = &rp->chan; switch (sw) { /* * dump vfo (debug) */ case C_DUMP: printf("flags %x pflags %x vfo", flags, pflags); ptr = (u_char *)&cp->vfo; for (i = 0; i < sizeof(struct vfo7000); i++) printf(" %02x", *ptr++ & 0xff); printf("\nsplit %f step %02x pstep %02x %02x atten %02x scan %02x\n", cp->split, cp->aux.step, cp->aux.pstep[0], cp->aux.pstep[1], cp->aux.atten, cp->aux.scan); break; /* * default * * Concatenate remaining tokens as default string for restore. */ case C_DEFLT: if (argn - argi < 2) { printf("default:%s\n", defarg); break; } *defarg = '\0'; while (argn - argi > 1) { strcat(defarg, " "); strcat(defarg, argv[++argi]); } break; /* * time [ set ] * * Set date and time and display for readback. This works for * the 7000 and 756 and maybe works for the 7800. */ case C_TIME: gettimeofday(&tv, NULL); tm = gmtime((time_t *)&tv.tv_sec); /* * 7000 yyyy mm/dd hhmm */ if (rp->ident == 0x70) { if (argn - argi > 1) { ++argi; dtohex(tm->tm_year + 1900, &year7000[4]); rval = setcmda(rp->ident, year7000, rsp); if (rval < 0) break; dtohex((tm->tm_mon + 1) * 100 + tm->tm_mday, &date7000[4]); setcmda(rp->ident, date7000, rsp); dtohex(tm->tm_hour * 100 + tm->tm_min, &time7000[4]); setcmda(rp->ident, time7000, rsp); } year7000[4] = FI; setcmda(rp->ident, year7000, rsp); sprintf(s1, "%02x%02x ", rsp[4], rsp[5]); date7000[4] = FI; setcmda(rp->ident, date7000, rsp); sprintf(s2, "%x/%x ", rsp[4], rsp[5]); strcat(s1, s2); time7000[4] = FI; setcmda(rp->ident, time7000, rsp); sprintf(s2, "%02x%02x UTC", rsp[4], rsp[5]); strcat(s1, s2); /* * 7800 yyyy mm/dd hhmm */ } else if (rp->ident == 0x6a) { if (argn - argi > 1) { ++argi; dtohex(tm->tm_year + 1900, &date7800[4]); dtohex((tm->tm_mon + 1) * 100 + tm->tm_mday, &date7800[6]); rval = setcmda(rp->ident, date7800, rsp); if (rval < 0) break; dtohex(tm->tm_hour * 100 + tm->tm_min, &time7800[4]); setcmda(rp->ident, time7800, rsp); } date7800[4] = FI; setcmda(rp->ident, date7800, rsp); sprintf(s1, "%02x%02x ", rsp[4], rsp[5]); sprintf(s2, "%x/%x ", rsp[6], rsp[7]); strcat(s1, s2); time7800[4] = FI; setcmda(rp->ident, time7800, rsp); sprintf(s2, "%02x%02x UTC", rsp[4], rsp[5]); strcat(s1, s2); /* * 756 hhmm */ } else { if (argn - argi > 1) { ++argi; dtohex(tm->tm_hour * 100 + tm->tm_min, &time756[3]); rval = setcmda(rp->ident, time756, rsp); if (rval < 0) break; } time756[3] = FI; setcmda(rp->ident, time756, rsp); sprintf(s1, "%02x%02x UTC", rsp[3], rsp[4]); } printf("%s\n", s1); break; /* * (command not found) * * We get here if the token matches no valid command name. If it * has valid floating-point format, set the frequency as given. * If it is a valid mode name, then set the mode as given. * Otherwise, declare an error. */ case C_FREQX: if (sscanf(argv[argi], "%lf", &freq) == 1) { if (freq > 1000.) freq /= 1000; rval = loadfreq(rp, freq); if (rval < 0) break; pflags |= P_DISP; break; } temp = capkey(argv[argi], rp->modetab); if (temp < 0) { rval = temp; break; } rval = loadmode(rp->ident, temp); if (rval < 0) break; pflags |= P_DISP; break; /* * mode <mode> * * The only reason this command is here is to provide help with * the valid mode combinations. The following radios support the * mode variants listed (1 narrow, 2 medium, 3 wide). * * usb/lsb cw/rtty am fm wfm * 706g 2, 3 1, 2, 3 2, 3 2, 3 1 * 756 1, 2, 3 1, 2, 3 1, 2, 3 2, 3 no * 7000 1, 2, 3 1, 2, 3 1, 2, 3 1, 2, 3 1 * R8500 1, 2, 3 1, 2, 3 1, 2, 3 1, 2 1 */ case C_MODE: if (argn - argi < 2) { pflags |= P_DISP; break; } temp = capkey(argv[++argi], rp->modetab); if (temp < 0) { rval = temp; break; } rval = loadmode(rp->ident, temp); if (rval < 0) break; pflags |= P_DISP; break; /* *************************************************************** * * * These commands are the bread and butter for most operators. * * They can be used to enter and display frequency and mode * * data and, when available, filter configuration. Note the * * difference between the freq and chan commands; the freq * * command operates directly on the VFO, while the chan * * command retrieves the entire channel contents, including in * * the 7000 both VFOs and channel name. * * * *************************************************************** */ /* *************************************************************** * * * The save and restore commands are used to save a single * * channel or block of channels to a file and restore them * * from a file. They can also be used to clone data between * * compatible radios. The read, write and empty commands * * operate on a single channel or block of channels. * * * *************************************************************** * * The arguments to these commands specify a single bank/chanel * number or a range of bank/channel numbers. See argchan() for * syntax */ /* * save [ chan ] [ file ] * * Save a block of memory channels to a file. */ case C_SAVE: if (argn - argi < 2) temp = argchan(rp, cp, NULL); else temp = argchan(rp, cp, argv[++argi]); if (temp < 0) return (temp);; if ((fp_out = fopen(argv[++argi], "w")) == NULL) return (R_IO); pflags |= P_DSPCH; while (1) { rval = readchan(rp); if (rval < 0) break; printch(rp, s1); printf("%s\n", s1); if (cp->freq != 0) fprintf(fp_out, "%s\n", s1); if (argchan(rp, cp, &updown[temp]) == 3) break; } fclose(fp_out); pflags &= ~P_DSPCH; break; /* * restore [ chan ] [ file ] * * Restore a block of memory channels from a file. If the * argument is '*', restore each file line to the same memory * channel it came from. If not, restore the block specified, * from the first channel to the lase. */ case C_RESTORE: if (argn < 3) return (R_ARG); if (*argv[++argi] == '*') temp = 4; else temp = argchan(rp, cp, argv[argi]); if (temp < 0) return (temp); if ((fp_in = fopen(argv[++argi], "r")) == NULL) return (R_IO); pflags |= P_DSPCH; while (1) { /* * Get next line from file. Ignore empty lines. */ if (fgets(s1, LINMAX, fp_in) == NULL) break; if (*defarg != '\0') { strcpy(s2, defarg); argn = getline(s2); argi = 0; if (argn == 0) continue; rval = command(rp, loadtab); if (rval < 0) break; } argn = getline(s1); argi = 0; if (argn == 0) continue; /* * If '*' argument, copy the data to the channel * specified on the file line. If not, copy the * data to the channel specified in the * argument. */ emptyvfo(cp); if (temp == 4) { rval = argchan(rp, cp, argv[argi++]); if (rval < 0) break; if (argn == 1) continue; rval = command(rp, loadtab); if (rval < 0) break; printch(rp, s1); printf("%s\n", s1); rval = writechan(rp); if (rval < 0) break; } else { if (argn == 1) continue; argi++; rval = command(rp, loadtab); if (rval < 0) break; printch(rp, s1); printf("%s\n", s1); rval = writechan(rp); if (rval < 0) break; if (argchan(rp, cp, &updown[temp]) == 3) break; } } close(fp_in); pflags &= ~(P_DISP | P_DSPCH); break; /* * read [ chan ] * * Read frequency, mode and other data from a memory channel. * While it seems silly, specifying a block of channels reads * them all and leaves the current channel pointer at the first * one beyond the range. */ case C_READ: if (argn - argi < 2) temp = argchan(rp, cp, NULL); else temp = argchan(rp, cp, argv[++argi]); if (temp < 0) { if (temp == R_NOP) capkey("?", loadtab); return (temp); } pflags |= P_DSPCH; while (1) { rval = readchan(rp); if (rval < 0) break; printch(rp, s1); printf("%s\n", s1); if (argchan(rp, cp, &updown[temp]) == 3) break; } pflags &= ~P_DSPCH; break; /* * write [ chan ] * * Write the current frequency, mode and other data to a memory * channel. Various radios interpret other data in various ways. * For the 7000, this includes both VFOs with their current * mode, filter setting, duplex and CTSS/DTCS configuration. * While it seems silly, specifying a block of channels writes * them all and leaves the current channel pointer at the first * one beyond the range. */ case C_WRITE: if (argn < 2) temp = argchan(rp, cp, NULL); else temp = argchan(rp, cp, argv[++argi]); if (temp < 0) return (temp); while (1) { rval = writechan(rp); if (rval < 0) break; if (argchan(rp, cp, &updown[temp]) == 3) break; } break; /* * empty [ chan ] [ chan ] * * Empty memory channel or block of memory channels. */ case C_EMPTY: if (argn - argi < 2) temp = argchan(rp, cp, NULL); else temp = argchan(rp, cp, argv[++argi]); if (temp < 0) return (temp); while (1) { rval = emptychan(rp->ident, cp); if (rval < 0) break; if (argchan(rp, cp, &updown[temp]) == 3) break; } cp->freq = 0; break; /* * bank [ bank ] [ name ] [...] * * Read/write bank name. This works for the R8500 and maybe * R9000, but not for known transceivers, which don't have a * bank name. */ case C_BANK: if (argn - argi < 2) temp = cp->bank; else if (sscanf(argv[++argi], "%d", &temp) != 1) return (R_FMT); if (argn > 2) loadbank(rp->ident, temp, cp->name); rval = readbank(rp->ident, temp, s1); if (rval < 0) break; cp->bank = temp; printf("bank %d %s\n", temp, s1); break; /* *************************************************************** * * * The following commands can provide software compensation * * for VFO or BFO frequency errors. Some radios generate BFO * * frequencies using a VCXO for each mode. The BFO * * compensation command corrects for the intrinsic frequency * * error (Hz) in each mode. Other radios generate all LO and * * BFO frequencies from a single VFO synthesizer. The VFO * * compensation corrects for the intrinsic frequency error * * (PPM). As each radio is different, these commands should * * probably live in a batch file. * * * *************************************************************** */ /* * vfocomp [ <offset> ] * * Set the VFO frequency compensation (PPM). */ case C_VCOMP: if (argn - argi > 1) { if (sscanf(argv[++argi], "%lf", &freq) != 1) return (R_FMT); rp->freq_comp = freq; } printf("frequency %f VFO offset %.2f PPM\n", cp->freq, rp->freq_comp); break; /* * bfocomp <mode> [ <offset> ] * * Set the BFO frequency compensation (Hz). */ case C_BCOMP: if (argn - argi > 1) { if (sscanf(argv[++argi], "%lf", &freq) != 1) return (R_FMT); rp->bfo[cp->mode & 0x0f] = freq; } freq = rp->bfo[cp->mode & 0x0f]; printf("mode %s BFO offset %.0lf Hz\n", capname(cp->mode, rp->modetab), freq); break; /* *************************************************************** * * * Radio commands that read and write internal registers, * * including VFO options, scan options, and duplex/simplex * * options. * * * *************************************************************** */ /* * Modern transceivers have two VFOs, called A and B, * alternatively main and sub, and two frequency/mode displays. * Each VFO is associated with mode and memory channel. The * functions of the vfo commands differ in funky ways on the * various transceivers. * * On the 7000 a "vfo a" command shows VFO A on the main display * and VFO B on the sub display. A "vfo b" command shows VFO B * on the main display and VFO A on the sub display. A "vfo * btoa" command copies the contents of VFO B to VFO A. A "vfo * swap" command interchanges the contents of VFO A and VFO B. * The 7000 can't do any of the other vfo commands. * * On the 756 the main display is at the top in large bright * font, while the sub display is at the bottom in small dim * font. Each display is associated with a VFO and a memory * channel. The mode, duplex direction and tone configuration * can be set separately for each VFO. Both VFOs and related * data can be saved in a memory channel. Ordinarily, updates * from the panel controls and this program go to the main VFO. * A "split sub" command switches updates to the sub VFO, while * a "split main" command switches back to the main VFO. Note * that the updated VFO is highlighted in bright font. A "vfo * equal" command copies the contents of the main VFO to the sub * VFO, while a "vfo swap" command interchanges the contents of * the two VFOs. The "vfo watch" and "vfo nowatch" turn the * dual-watch function on and off. Note that in dual-watch the * sub VFO is highlihted in large font. * * vfo [ <command> ] (V_SVFO 0x07 command) * * Execute one of the subcommands on the help menu. */ case C_VFO: rval = setswitch(rp, vfo, V_SVFO); if (rval < 0) break; pflags |= P_DSPCH; break; /* * swap (V_SVFO 0x07 command) * * Swap the contents of VFO A and VFO B and make the swapped VFO * active to enter frequency and mode. This is the same function * as the "vfo swap" command useful in keypad mode to monitor * the transmit frequency. */ case C_SWAP: rval = setcmda(rp->ident, cmdswap, rsp); if (rval < 0) break; pflags |= P_DSPCH; break; /* * split [ cmd ] (V_SPLIT 0x0f command) * * Use VFO A for receive and VFO B for transmit. * * Ordinarily, ICOM transceivers transmit and receive with VFO * on VFO A. A "split on" command transmits with VFO B, while a * "split off" restores the normal behavior. The 7000 can do * "split dup+", "split dup-" and "split simplex" as well. * * In addition to the split commands on the help menu, this * command can be used to set the transmit offset in kHz if the * argument is preceeded by a + or - or to an arbitrary value in * MHz if an unsigned value. */ case C_SPLIT: if (argn - argi > 1) { if (sscanf(argv[++argi], "%lf", &freq) == 1) { if (freq > 1000) freq /= 1000; readvfo(rp); if (argv[argi][0] == '+' || argv[argi][0] == '-') { freq /= 1000.; freq += cp->freq; } cp->split = freq; if (cp->split == 0) { cmdsplit[1] = 0x00; rval = setcmda(rp->ident, cmdsplit, rsp); pflags |= P_DSPCH; break; } rval = setcmda(rp->ident, cmdswap, rsp); if (rval < 0) break; rval = loadfreq(rp, freq); if (rval < 0) break; rval = loadmode(rp->ident, cp->mode); if (rval < 0) break; setcmda(rp->ident, cmdvfoa, rsp); cmdsplit[1] = 0x01; setcmda(rp->ident, cmdswap, rsp); pflags |= P_DSPCH; break; } argi--; } rval = setswitch(rp, split, V_SPLIT); if (rval < 0) break; pflags |= P_DSPCH; break; /* * mvfo (V_VFOM 0x0a command) * * Read memory channel and transfer to VFO. This works for the * 756 and 7000. The radio does this his in memory channel mode, * so this program does it in VFO mode. In principle, this * command should never be necessary, but is included for test * and exploration. */ case C_VFOM: if (argn - argi < 2) rval = argchan(rp, cp, NULL); else rval = argchan(rp, cp, argv[++argi]); if (rval < 0) break; rval = readchan(rp); if (rval < 0) break; rval = setcmda(rp->ident, cmdvfom, rsp); break; /* * duplex [ duplex ] * * Set transmit offset for FM duplex. This works with the 706G * and 7000, which have four duplex registers, one for each of * the HF, 50 MHz, 144 MHz and 420 MHz bands. In order to read * and write these registers, the frequency must be first set * within the correct band. */ case C_DUPLEX: if (argn - argi < 2) { rval = readoffset(rp->ident, &freq); if (rval < 0) break; printf("duplex %.1f\n", freq); break; } if (sscanf(argv[++argi], "%lf", &freq) != 1) return (R_FMT); rval = loadoffset(rp->ident, freq); break; /* * scan [ <command> ] (V_SCAN 0x0e command) * * Perform awesome scans, both memory an channel. Some * radios have simply awesome scanning modes; others are * mostly bare. Need some volunteer experimentors here. */ case C_SCAN: return (setswitch(rp, scan, V_SCAN)); /* *************************************************************** * * * Tuning step and rate commands. The step and rate commands * * should work for all radios, as they do not use the radio * * tuning-step functions. The dial command works with the * * radio tuning-step functions, so is model dependent. * * * *************************************************************** */ /* * rate [ <rate> ] * * Set tuning rate. The values of <rate> from 0 through 20 * select the rate values in a 1-2.5-5-10 sequence. Warning: if * the frequency resolution (minimum tuning step) is 10, 100, * 1000 Hz, etc., the nexxt step up would be 25, 250, 2500, * etc., which is not a multiple of the minimum tuning step. In * such cases the step is rounded to 20, 200, 2000, etc. */ case C_RATE: if (argn - argi > 1) { if (sscanf(argv[++argi], "%d", &temp) != 1) return (R_FMT); if (temp > 20) temp = 20; else if (temp < rp->minstep) temp = rp->minstep; rp->rate = temp; rp->step = logtab[rp->rate]; step = modf(cp->freq * 1e6 / rp->step, &freq); freq = freq / 1e6 * rp->step; rval = loadfreq(rp, freq); if (rval < 0) break; } pflags |= P_DSPST; break; /* * rate up (keypad) * * Set tuning rate up one notch. */ case C_RUP: if (rp->rate < 20) rp->rate++; rp->step = logtab[rp->rate]; step = modf(cp->freq * 1e6 / rp->step, &freq); freq = freq / 1e6 * rp->step; rval = loadfreq(rp, freq); if (rval < 0) break; pflags |= P_DSPST; break; /* * rate down (keypad) * * Set tuning rate down one notch. */ case C_RDOWN: if (rp->rate > rp->minstep) rp->rate--; rp->step = logtab[rp->rate]; step = modf(cp->freq * 1e6 / rp->step, &freq); freq = freq / 1e6 * rp->step; rval = loadfreq(rp, freq); if (rval < 0) break; pflags |= P_DSPST; break; /* * Tuning step commands. The step command sets the tuning step * directly to an arbitrary value. The up and down commands * shift the frequency up or down by the value of the tuning * step. * * step [ <step> ] * * Set tuning step directly in Hz. This is useful when scanning * odd channel spacings, such as aviation and marine radio * channels. Note that the tuning rate is set to minimum here, * since otherwise the rounding process would violate the * principle of least astonishment. */ case C_STEP: if (argn - argi > 1) { if (sscanf(argv[++argi], "%lf", &dtemp) != 1) return (R_FMT); if (dtemp < logtab[rp->minstep]) dtemp = logtab[rp->minstep]; rp->step = dtemp; rp->rate = rp->minstep; } pflags |= P_DSPST; break; /* * up (keypad) * * Tune up one step. */ case C_UP: freq = cp->freq + rp->step / 1e6; if (freq >= rp->ustep) freq = rp->lstep; rval = loadfreq(rp, freq); if (rval < 0) break; cp->freq = freq; pflags |= P_DSPST; break; /* * down (keypad) * * Tune down one step. */ case C_DOWN: freq = cp->freq - rp->step / 1e6; if (freq < rp->lstep) freq = rp->ustep; rval = loadfreq(rp, freq); if (rval < 0) break; cp->freq = freq; pflags |= P_DSPST; break; /* * band [ <low> ] [ <high> ] * * Set band scan limits. An up or down via keypad beyond the * upper limit wraps to the lower limit and vice-versa. */ case C_BAND: if (argn - argi < 2) { printf("band %s\n", capdescr("band", rp->cap)); break; } if (argn - argi < 3) return (R_ARG); if (sscanf(argv[++argi], "%lf", &freq) != 1) return (R_FMT); if (sscanf(argv[++argi], "%lf", &step) != 1) return (R_FMT); if (freq > step) { dtemp = freq; freq = step; step = dtemp; } if (freq < rp->lband) freq = rp->lband; rp->lstep = freq; if (step > rp->uband) step = rp->uband; rp->ustep = step; break; /* *************************************************************** * * * Control commands. These are used to set switches, twirl * * controls and read meters. Various radios implement none or * * a subset of the defined functions. * * * *************************************************************** */ /* * ctl [ name ] [ value ] (V_WRCTL 0x14 command) * * The ctl subcommand reads or writes internal registers * associated with a front panel control. The 756 and 7000 can * read and write them. The 706G can read these registers but * cannot write them. The R8500 can write them but not read * them. Most radios implement only a subset of the defined * subcommands. */ case C_CTRL: if (argn - argi < 2) { for (i = 0; ctlc[i].name[0] != '\0'; i++) { temp = readmeter(rp, ctlc, ctlc[i].ident, rsp); if (temp < 0) continue; printf("%10s %s\n", ctlc[i].name, rsp); } break; } temp = capkey(argv[++argi], rp->ctrl); if (temp < 0) return (temp); cmdctl[0] = temp >> 8; cmdctl[1] = temp; if (argn < 3) { cmdctl[2] = FI; rval = readmeter(rp, ctlc, temp, s1); if (rval < 0) break; printf("%s\n", s1); } else { if (sscanf(argv[++argi], "%d", &sw) != 1) return (R_FMT); sw = (sw * 255) / 100; if (temp >> 16 == F) sw += 128; sprintf(s1, "%04d", sw); ptr = s1; cmdctl[2] = (*ptr++ & 0xf) * 16; cmdctl[2] += *ptr++ & 0xf; cmdctl[3] = (*ptr++ & 0xf) * 16; cmdctl[3] += *ptr++ & 0xf; rval = setcmda(rp->ident, cmdctl, rsp); } break; /* * meter [ name ] [ value ] (V_RMTR 0x15 command) * * The meter subcommands report current meter indications. Note * that the S meter is reported in S units and dB above S9. The * squelch condition is reported as open (signal) or closed * (silent). * * The 706G can read the signal and sql meters. */ case C_METER: if (argn - argi < 2) { for (i = 0; meter[i].name[0] != '\0'; i++) { temp = readmeter(rp, meter, meter[i].ident, s1); if (temp < 0) continue; printf("%10s %s\n", meter[i].name, s1); } break; } temp = capkey(argv[++argi], meter); if (temp < 0) return (temp); rval = readmeter(rp, meter, temp, s1); if (rval == R_OK) printf("%s\n", s1); break; /* * set [ name ] [ value ] (V_TOGL 0x16 command) * * The switch subcommands read or write internal switches. */ case C_SWTCH: if (argn - argi < 2) { for (i = 0; switches[i].name[0] != '\0'; i++) { temp = readmeter(rp, ctlc, switches[i].ident, s1); if (temp < 0) continue; printf("%10s %s\n", switches[i].name, s1); } break; } temp = capkey(argv[++argi], switches); if (temp < 0) return (temp); cmdctl[0] = temp >> 8; cmdctl[1] = temp; if (argn - argi < 2) { cmdctl[2] = FI; rval = readmeter(rp, switches, temp, s1); break; } else { temp >>= 16; if (temp == A) { temp = capkey(argv[++argi], agc); } else if (temp == B) { temp = capkey(argv[++argi], fmtb); } else if (temp == W) { temp = capkey(argv[++argi], fmtw); } else { sscanf(argv[++argi], "%d", &temp); sprintf(s1, "%02d", temp); ptr = s1; temp = (*ptr++ & 0xf) * 16; temp += *ptr++ & 0xf; } if (temp < 0) return (temp); cmdctl[2] = temp; cmdctl[3] = FI; rval = setcmda(rp->ident, cmdctl, rsp); } break; /* * dial [ <step> ] (V_DIAL 0x10 command) * * Set dial tuning step. This command works with all radios, * including the 775 and R8500; however, the allowable arguments * are different. Note that in the R8500 the allowable steps are * constrained to multiples of 0.5 kHz. */ case C_DIAL: if (argn - argi < 2) { temp = cp->aux.step; if (temp != 0x13) printf("dial %s kHz\n", capname(temp, rp->dialtab)); else printf("dial %.1f kHz\n", freqdouble(cp->aux.pstep, 2) / 10); break; } /* * Construct V_DIAL command for tuning step. */ ptr = s1; *ptr++ = V_DIAL; dtemp = 0; temp = capkey(argv[++argi], rp->dialtab); if (temp < 0) return (temp); if (temp == 0x13) { if (sscanf(argv[argi], "%lf", &dtemp) != 1) return (R_FMT); } *ptr++ = temp; *ptr = FI; rval = setcmda(rp->ident, s1, rsp); if (rval < 0) break; /* * Save step and programmed step for later. */ cp->aux.step = temp; doublefreq(dtemp * 10, s1, 2); memcpy(cp->aux.pstep, s1, 2); break; /* *************************************************************** * * * CTSS and DTCS commands. These commands turn on and off and * * program the repeater tone (tone), tone squelch (tsql) and * * digital tone squelch (dtcs) functions. * * * *************************************************************** * * The syntax for all three commands is the same; * * tone reports the status and frequency/code * tone off turns off the function * tone on turns on the function * tone <code> turns on the function and programs the * frequency/code * tone ? reports a list of valid CTSS/DTCS code * tsql ? values. The <code> must be identical to * an item in the list. * * dtcs ? reports a list of valid DTCS code values. The * <code> must be identical to an item on the list. * The polarity codes "-pp" are appended, where pp * are N (upright) or N (inverted). * * The three commands are mutually exlusive; setting one of them * on turns off the others. */ /* * tone [ args ] (V_TONE 0x1b command) * * Set the repeator tone CTSS frequency. This works for the 756 * and 7000. */ case C_TONE: return (setswitch3(rp, tone, 0x00, 0x42)); /* * tsql [ args ] (V_TONE 0x1b command) * * Set the tone squelch CTSS frequency. This works for the 756 * and 7000. */ case C_TSQL: return (setswitch3(rp, tone, 0x01, 0x43)); /* * dtcs [ args ] (V_TONE 0x1b command) * * Set the digital tone squelch DTCS code. This works only for * the 7000. */ case C_DTCS: return (setswitch3(rp, dtcs, 0x02, 0x4b)); /* *************************************************************** * * * Utility commands. Select the antenna, attenuator, preamp, * * agc and and break-in options. * * * *************************************************************** */ /* * The duplex, preamp and attenuator commands can be set for * each band segment. There are four duplex offset registers, * one each fot HF, 6 m, 2 m and 70 cm bands. The preamp and * attentuator settings are stored by band segment as follows: * * 703g 756 7000 * .3-1.6 .3-1.6 .03-1.6 * 1.6-2 1.6-2 1.6-2 160 m * 2-5 2-6 2-6 80 m * 5-8 6-8 6-8 40 m * 8-11 8-11 8-11 30 m * 11-20 11-20 20, 17 m * 11-15 20 m * 15-20 17 m * 20-22 20-22 20-22 15 m * 22-26 22-26 22-26 12 m * 26-40 26-45 26-30 10 m * 40-60 45-60 45-129 6 m * 129-144 * 60-148 144-148 2 m * 148-200 * 400-470 400-470 70 sm */ /* * ant [ 1 | 1R | 2 | 2R ] (V_SANT 0x12 command) * * Select antenna. Transceivers like the 756 have two antennas * (1, 2) for transmit/receive and a third (R) for receive only. * For instance, option 1R selects antenna 1 for transmit and R * for receive. */ case C_ANT: argv[argi] = capname(sw, cmd); return (setswitch(rp, ant, V_SANT)); /* * atten [ <command> ] (V_ATTEN 0x11 command) * * Set attenuator options. The options include all known * attentuator options in dB. Following are the known values for * a few modern radios. * * 706G 20 dB * 756, 7000 6, 12, 18 dB * R8500 10, 20, 30 dB */ case C_ATTEN: argv[argi] = capname(sw, cmd); return (setswitch(rp, atten, V_ATTEN)); /* * preamp [ off | 1 | 2 ] * * Set preamp options. Some radios have none, one or two preamp * settings. * * 706G, 7000 1 * 756 2 * R8500 none */ case C_PAMP: argv[argi] = capname(sw, cmd); return (setswitch2(rp, preamp, (V_TOGL << 8) | 0x02)); /* * agc [ slow | medium | fast ] * * Set AGC options. Some radios have none or a subset of these * options. * * 706G medium, fast * 756, 7000 slow, medium, fast * R8500 slow (agcslow), fast (agcfast) */ case C_AGC: argv[argi] = capname(sw, cmd); return (setswitch2(rp, agc, (V_TOGL << 8) | 0x12)); /* * break [ off | semi | full ] * * Set break options. This works on the 706G, 756 and 7000. */ case C_BREAK: argv[argi] = capname(sw, cmd); return (setswitch2(rp, fmtb, (V_TOGL << 8) | 0x47)); /* *************************************************************** * * * Power and voice commands. These commands control the power * * to the radio, turn the transmitter on and off and report * * the status by voice. * * * *************************************************************** */ /* * power [ off | on ] (V_POWER 0x18 command) * * Set power on/off. The radio will be powered off after the * sleep interval, but it will still listen for a power on * command. This works only on the R8500. */ case C_POWER: return (setswitch(rp, power, V_POWER)); /* * ptt (V_PTT 0x1c command) * * Display transmit condition; turn transmitter on and off. */ case C_PTT: return (setswitch2(rp, tx, V_TX << 8 | 0x00)); /* * rx (V_TX 0x1c command). Turn transmitter off. */ case C_RX: return (setcmda(rp->ident, cmdrx, rsp)); /* * tx (V_TX 0x1c command). Turn transmitter on. */ case C_TX: return (setcmda(rp->ident, cmdtx, rsp)); /* * say [ all | freq | mode ] (V_ANNC 0x13 command) * * Set announce control off/on. This requires the UT-102 Voice * Synthesizer Unit, which is standard in the 7000. */ case C_ANNC: return (setswitch(rp, say, V_ANNC)); /* *************************************************************** * * * Mongrels. These commands are used for testing and just * * playing around. * * * *************************************************************** */ /* * name <string> * * Set channel name. Enclose in quotes if <string> contains * spaces. */ case C_NAME: if (argn - argi < 2) printf("%s\n", cp->name); else strlcpy(cp->name, argv[++argi], sizeof(cp->name)); break; /* * key <id> <string> (V_SETW 0x1a command) * * Program memory keyer (756 and 7000). Each of four memory * keyer channels can hold 55 characters. */ case C_KEY: if (argn - argi < 2) { rval = R_ARG; break; } /* * Get memory keyer ID (1-4) */ ptr = s1; *ptr++ = V_SETW; *ptr++ = 0x02; if (sscanf(argv[++argi], "%d", &temp) != 1) { rval = R_FMT; break; } *ptr++ = temp; /* * If no argument string, read from radio. Remove * trailing spaces from the radio string. */ if (argn - argi < 2) { *ptr = FI; rval = setcmda(rp->ident, s1, rsp); if (rval < 0) return (rval); for (i = rval - 2; i > 2; i--) { if (rsp[i] == ' ') rsp[i] = '\0'; else break; } temp = rsp[2]; printf("%d (%d) %s\n", temp, i - 2, &rsp[3]); break; } /* * Concatenate remaining strings and send to radio. */ *ptr = '\0'; temp = 55; while (argn - argi > 1 && temp > 0) { strncat(s1, argv[++argi], temp); strcat(s1, " "); temp -= strlen(argv[argi]) + 1; } s1[strlen(s1) - 1] = FI; rval = setcmda(rp->ident, s1, rsp); break; /* * Miscellaneous control (S_CTRL) subcommands. */ case C_MISC: return (setswitch2(rp, misc, V_TOGL)); /* * test BCD * * Send CI-V test message */ case C_TEST: if (argn - argi < 2) break; ptr = s1; for (i = 1; i < argn; i++) { sscanf(argv[++argi], "%x", &temp); *ptr++ = temp; } *ptr = FI; rval = setcmda(rp->ident, s1, rsp); break; /* * step (r8500) */ C_XSTEP: if (sscanf(argv[++argi], "%x", &temp) != 1) { rval = R_FMT; break; } cp->aux.step = temp; break; /* * pstep (r8500) */ C_PSTEP: break; } return (rval); } /* * capname(ident, table) - returns capability name given key */ char * /* capability name, "" (not found) */ capname( int ident, /* capability key */ struct cmdtable *table /* capability table */ ) { int i; for (i = 0; table[i].name[0] != '\0'; i++) { if (table[i].ident == ident) return (table[i].name); } return (""); } /* * capkey(name, table) - returns capability key given name */ int /* capability key, -1 (not found) */ capkey( char *name, /* capability name */ struct cmdtable *table /* capability table */ ) { int i, temp; if (*name == '?') { for (i = 0; table[i].name[0] != '\0'; i++) printf("%10s %s\n", table[i].name, table[i].descr); return (R_NOP); } for (i = 0; table[i].name[0] != '\0'; i++) { if (strcasecmp(name, table[i].name) == 0 || *table[i].name == '*') break; } if (table[i].ident == R_NOP) printf("*** %s\n", table[i].descr); return (table[i].ident); } /* * capdescr(name, table) - returns capability description given name */ char * /* capability string, "" (not found") */ capdescr( char *name, /* capability name */ struct cmdtable *table /* capability table */ ) { int i; if (*name == '?') { for (i = 0; table[i].name[0] != '\0'; i++) printf("%10s %s\n", table[i].name, table[i].descr); return (""); } for (i = 0; table[i].name[0] != '\0'; i++) { if (strcasecmp(name, table[i].name) == 0) break; } return (table[i].descr); } /* * setcap(name, table, string) - insert capability string */ void setcap( char *name, /* capability name */ struct cmdtable *table, /* capability table */ char *string /* capability string */ ) { int i; for (i = 0; table[i].name[0] != '\0'; i++) { if (strcasecmp(name, table[i].name) == 0) { strcpy(table[i].descr, string); return; } } strcpy(table[i].name, name); strcpy(table[i].descr, string); table[i + 1].name[0] = '\0'; table[i + 1].ident = R_NOP; table[i + 1].descr[0] = '\0'; } /* * setswitch(radio, name, op) - V_VFO, V_SPLIT, V_SCAN, V_SANT, V_ATTEN, * V_POWER, V_TX, V_ANNC. * * Commands with a single character command and one argument, with the * single exception of the V_SANT, which can have one or two arguments * depending on the radio type. Only the V_SANT and V_ATTEN can */ static int setswitch( struct icom *rp, /* radio structure pointer */ struct cmdtable *cmmd, /* command table pointer */ int op /* command code */ ) { struct chan *cp; struct vfo7000 *vp; u_char cmd[BMAX], rsp[BMAX]; int temp, rval; cmd[0] = op; cmd[1] = FI; if (argn - argi < 2) { /* * Read data from radio. Only the V_SANT and V_ATTEN * return unambigous data. The V_SANT returns one or two * octets depending on the transceiver model. The R8500 * can't return anything. */ switch (op) { case V_SANT: rval = setcmda(rp->ident, cmd, rsp); if (rval < 0) break; temp = rsp[1]; if (rsp[2] != FI) temp |= rsp[2] << 8; printf("%s %s\n", argv[argi], capname(temp, cmmd)); break; case V_ATTEN: rval = setcmda(rp->ident, cmd, rsp); if (rval < 0) break; temp = rsp[1]; printf("%s %s dB\n", argv[argi], capname(temp, cmmd)); break; case V_ANNC: cmd[1] = 0x00; cmd[2] = FI; rval = setcmda(rp->ident, cmd, rsp); pflags |= P_DISP; break; default: rval = R_ARG; } } else { /* * Write data to the radio. First, decode argument. */ temp = capkey(argv[++argi], cmmd); if (temp < 0) return (temp); cmd[2] = FI; /* * For the ant command, find out if one or two * subcommand bytes follow. */ if (op == V_SANT) { rval = setcmda(rp->ident, cmd, rsp); if (rval < 0) return (rval); if (rsp[2] != FI) { cmd[2] = temp >> 8; cmd[3] = FI; } } cmd[1] = temp; rval = setcmda(rp->ident, cmd, rsp); if (rval < 0) return (rval); /* * For the attenuator command, copy attenuator code for * R8500. */ if (op == V_ATTEN) { cp = &rp->chan; cp->aux.atten = temp; /* * For the split duplex subcommands, edit mode2. */ } else if (op == V_SPLIT) { cp = &rp->chan; vp = &cp->vfo; switch (temp) { case 0x10: /* simplex */ vp->mode2 = (vp->mode2 & 0x0f) | 0x00; break; case 0x11: /* dup- */ vp->mode2 = (vp->mode2 & 0x0f) | 0x10; break; case 0x12: /* dup+ */ vp->mode2 = (vp->mode2 & 0x0f) | 0x20; break; } } } return (rval); } /* * setswitch2(radio, name, op) - V_PAMP, V_AGC, V_BREAK, V_PTT and * V_MISC. * * Commands with a two-octet command and one argument. */ static int setswitch2( struct icom *rp, /* radio structure pointer */ struct cmdtable *cmmd, /* command table pointer */ int op /* command code */ ) { u_char cmd[BMAX], rsp[BMAX]; int temp, rval, i; rval = R_OK; cmd[0] = op >> 8; cmd[1] = op; cmd[2] = FI; if (argn - argi < 2) { rval = setcmda(rp->ident, cmd, rsp); if (rval < 0) return (rval); for (i = 0; cmmd[i].name[0] != '\0'; i++) { if ((cmmd[i].ident & 0xff) == rsp[2]) { printf("%s %s\n", argv[argi], cmmd[i].name); break; } } } else { temp = capkey(argv[++argi], cmmd); if (temp < 0) return (temp); cmd[2] = temp; cmd[3] = FI; rval = setcmda(rp->ident, cmd, rsp); } return (rval); } /* * setswitch3(radio, name, op, sw) - V_TONE, etc. * * This routine is used with the tone, tsql and dtcs commands, which * turn on and off and program CTSS tones and DTCS codes. */ static int setswitch3( struct icom *rp, /* radio structure pointer */ struct cmdtable *cmmd, /* command table pointer */ int op, /* command code */ int sw /* switch code */ ) { struct chan *cp; struct vfo7000 *vp; u_char rsp[BMAX]; int temp, rval, i; u_char cmdctl[] = {V_TONE, 0, 0, 0, 0, FI}; u_char cmdswt[] = {V_TOGL, 0, 0, FI}; char *token; cmdctl[1] = op; cmdctl[2] = FI; cmdswt[1] = sw; cmdswt[2] = FI; if (argn - argi < 2) { /* * Read switch code */ rval = setcmda(rp->ident, cmdswt, rsp); if (rval < 0) return (rval); temp = rsp[2]; token = capname(temp, toneon); /* * Read frequency/code */ rval = setcmda(rp->ident, cmdctl, rsp); if (rval < 0) return (rval); temp = rsp[3] << 8 | rsp[4]; for (i = 0; cmmd[i].name[0] != '\0'; i++) { if ((cmmd[i].ident) == temp) { if (cmdctl[1] == 0x02) { temp = rsp[2]; printf("%s %s (%s-%s)\n", argv[argi], token, cmmd[i].name, capname(temp, polar)); } else { printf("%s %s (%s Hz)\n", argv[argi], token, cmmd[i].name); } } } } else { /* * Set frequency/code. Repeater tone and tone squelch * frequencies are in nn.n Hz and tenths. Digital tone * squelch codes are in nn-pp code polarity. */ token = strtok(argv[++argi], "-"); temp = capkey(token, cmmd); if (temp < 0) return (temp); cp = &rp->chan; vp = &cp->vfo; if (temp > 0x01) { cmdctl[3] = temp >> 8; cmdctl[4] = temp; token = strtok(NULL, "-"); if (token == NULL) { cmdctl[2] = 0; } else { temp = capkey(token, polar); if (temp < 0) return (temp); cmdctl[2] = temp; } rval = setcmda(rp->ident, cmdctl, rsp); if (rval < 0) return (rval); /* * Update VFO tones */ switch (op) { case 0: memcpy(&vp->tone, &cmdctl[2], 3); break; case 1: memcpy(&vp->tsql, &cmdctl[2], 3); break; case 2: memcpy(&vp->dtcs, &cmdctl[2], 3); break; } temp = 0x01; } /* * Set switch code */ vp->mode2 &= 0xf0; if (temp == 0x01) vp->mode2 += op + 1; cmdswt[2] = temp; rval = setcmda(rp->ident, cmdswt, rsp); } return (rval); } /* * readmeter(radio, optab, op, pstring) - V_METER, V_TOGL */ static int readmeter( struct icom *rp, /* radio structure */ struct cmdtable *optab, /* command table */ int op, /* operation code */ char *s2 /* result string */ ) { u_char cmd[] = {0, 0, 0, 0, FI}; char rsp[BMAX], *ptr; int temp, i; /* * Read register or switch */ cmd[0] = op >> 8; cmd[1] = op; cmd[2] = FI; temp = setcmda(rp->ident, cmd, rsp); if (temp < 0) return (temp); if (temp < 5) temp = ((rsp[2] >> 4) & 0xf) * 10 + (rsp[2] & 0xf); else temp = ((rsp[2] >> 4) & 0xf) * 1000 + (rsp[2] & 0xf) * 100 + ((rsp[3] >> 4) & 0xf) * 10 + (rsp[3] & 0xf); ptr = capname(op, optab); /* * Format as required */ switch (op >> 16) { case A: /* agc */ if (temp == 1) strcpy(s2, "fast"); else if (temp == 2) strcpy(s2, "mid"); else if (temp == 3) strcpy(s2, "slow"); break; case B: /* breakin */ if (temp == 0) strcpy(s2, "off"); else if (temp == 1) strcpy(s2, "semi"); else if (temp == 2) strcpy(s2, "full"); break; case F: /* signed control */ sprintf(s2, "%d", ((temp - 128) * 100) / 256); break; case G: /* unsigned control */ sprintf(s2, "%d", (temp * 100) / 256); break; case P: /* preamp */ if (temp == 0) strcpy(s2, "off"); else if (temp == 1) strcpy(s2, "1"); else if (temp == 2) strcpy(s2, "2"); break; case Q: /* squelch */ if (temp == 1) strcpy(s2, "open"); else strcpy(s2, "closed"); break; case S: /* S meter */ for (i = 0; temp > mtab[i].smeter; i++); strcpy(s2, mtab[i].pip); break; case W: /* miscellaneous switch */ if (temp == 0) strcpy(s2, "off"); else strcpy(s2, "on"); break; } return (R_OK); } /* * banner - format and print banner */ void banner( struct icom *rp /* radio structure pointer */ ) { printf("radio %s (%02x) %g-%g MHz chan %d bank %d baud %s\n", capdescr(rp->name, identab), rp->ident, rp->lband, rp->uband, rp->maxch - rp->minch + 1, rp->maxbk - rp->minbk + 1, capname(rp->baud, baud)); rptr = rp; } /* * printch - format and print channel data */ static void printch( struct icom *rp, /* radio structure pointer */ char *s1 /* prettyprint string */ ) { struct chan *cp; struct vfo7000 *vp; char s2[LINMAX]; char *s3; int temp; double dtemp; /* * Reveal frequency and mode */ cp = &rp->chan; vp = &cp->vfo; readvfo(rp); if (cp->freq == 0) { if (rp->flags & F_BANK) sprintf(s1, "%2d.%-2d empty", cp->bank, cp->mchan); else sprintf(s1, "%2d empty", cp->mchan); return; } if (rp->flags & F_BANK) sprintf(s1, "%2d.%-2d %.*f MHz %s", cp->bank, cp->mchan, sigtab[rp->rate], cp->freq, capname(cp->mode, rp->modetab)); else sprintf(s1, "%2d %.*f MHz %s", cp->mchan, sigtab[rp->rate], cp->freq, capname(cp->mode, rp->modetab)); if (pflags & P_DSPST) { sprintf(s2, " rate %d step %.0f Hz", rp->rate, rp->step); strcat(s1, s2); } if (!(pflags & P_DSPCH)) return; if (flags & (F_7000 | F_756)) { /* * Reveal split */ if (cp->split != 0) { sprintf(s2, " split %.*f MHz", sigtab[rp->rate], cp->split); strcat(s1, s2); } /* * Reveal duplex direction */ temp = vp->mode2 & 0xf0; switch(temp) { case 0x10: strcat(s1, " split dup-"); break; case 0x20: strcat(s1, " split dup+"); break; } /* * Reveal tone squelch info */ temp = vp->mode2 & 0xf; switch (temp) { case 1: temp = (vp->tone[1] << 8) | vp->tone[2]; sprintf(s2, " tone %s Hz", capname(temp, tone)); strcat(s1, s2); break; case 2: temp = (vp->tsql[1] << 8) | vp->tsql[2]; sprintf(s2, " tsql %s Hz", capname(temp, tone)); strcat(s1, s2); break; case 3: temp = (vp->dtcs[1] << 8) | vp->dtcs[2]; sprintf(s2, " dtcs %s-", capname(temp, dtcs)); strcat(s1, s2); temp = vp->dtcs[0] & 0x3; sprintf(s2, "%s", capname(temp, polar)); strcat(s1, s2); } } else if (flags & F_8500){ /* * Reveal tuning step. */ if (cp->aux.step != 0) { temp = cp->aux.step; if (temp != 0x13) sprintf(s2, " dial %s kHz", capname(temp, rp->dialtab)); else sprintf(s2, " dial %.1f kHz", freqdouble(cp->aux.pstep, 2) / 10); strcat(s1, s2); } /* * Reveal attentuator setting. */ if (cp->aux.atten != 0) { sprintf(s2, " atten %x dB", cp->aux.atten); strcat(s1, s2); } } /* * Reveal channel name enclosed in quotes ". */ if (cp->name[0] != '\0') { sprintf(s2, " name \"%s\"", cp->name); strcat(s1, s2); } } /* * Print error comment */ static void perr( int temp /* error code */ ) { switch (temp) { case R_CMD: printf("*** unknown command\n"); break; case R_ARG: printf("*** unknown or missing command argument\n"); break; case R_FMT: printf("*** invalid argument format\n"); break; case R_RAD: printf("*** unknown radio\n"); break; case R_NOR: printf("*** no radios found\n"); break; case R_DEF: printf("*** radio not defined\n"); break; case R_ERR: printf("*** radio can't do that\n"); break; case R_IO: printf("*** file open error\n"); break; } } /* * Getline(str) - process input line and extract tokens * * Blank lines and comments beginning with '#' are ignored and the * string converted to lower case. The resulting tokens are saved in the * *argv[] array. The number of tokens found is returned to the caller. */ static int /* number of tokens */ getline( char *str /* pointer to input string */ ) { char *ptr; char xbreak[] = " ,\t\n\0"; char sbreak[] = "\"\n\0"; int i, temp; /* * Trim trailing \r and comments */ ptr = strchr(str, '\r'); if (ptr != NULL) *ptr = '\0'; ptr = strchr(str, '#'); if (ptr != NULL) *ptr = '\0'; /* * Scan for tokens delimited by space, comma, tab, newline or * null. Arbitrary strings begin with quote " and end with * quote, newline or null. Quotes are not included in the token. */ ptr = str; for (i = 0; i < ARGMAX;) { temp = strspn(ptr, xbreak); ptr += temp; if (*ptr == '\0') break; if (*ptr == '"') { argv[i++] = ++ptr; temp = strcspn(ptr, sbreak); } else { argv[i++] = ptr; temp = strcspn(ptr, xbreak); } ptr += temp; if (*ptr == '\0') break; *ptr++ = '\0'; } argn = i; return (i); } /* * argchan(radio, chan, sptr) - decode channel argument * * NULL current bank/channel * $ all channels, current bank * b:$ all channels, bank b * $:$ all channels, all banks * + current bank/channel plus 1 with wrap * - current bank/channel minus 1 with wrap * c channel c, current bank * b.c channel c, bank b * c1:c2 channel range c1-c2, current bank with wrap * b1.c1:b2.c2 channel range b1.c1-b2.c2 with wrap * * returns 0 if single, 1 if multiple, 2 if reversed, 3 if end of range, * R_NOP if error */ static int /* > 0 (ok), < 0 (error) */ argchan( struct icom *rp, /* radio structure */ struct chan *cp, /* channel structure */ char *sptr /* ascii argument pointer */ ) { int rval, bank, mchan, topbk, topch; /* * null: current channel only */ if (cp->bank == rp->topbk && cp->mchan == rp->topch) rval = 3; else rval = 0; if (sptr == NULL) return (0); /* * '?': format help */ if (*sptr == '?') { capkey(sptr, argch); return (R_NOP); } /* * " ": end of range */ if (*sptr == ' ') return (3); /* * "+": next higher channel, current bank with wrap */ if (*sptr == '+') { cp->mchan++; if (cp->mchan > rp->maxch) { cp->mchan = rp->minch; cp->bank++; if (cp->bank > rp->maxbk) cp->bank = rp->minbk; } return (rval); } /* * "-" next lower channel, current bank with wrap */ if (*sptr == '-') { cp->mchan--; if (cp->mchan < rp->minch) { cp->mchan = rp->maxch; cp->bank--; if (cp->bank < rp->minbk) cp->bank = rp->maxbk; } return (rval); } /* * "$" all channels, current bank */ if (strcmp(sptr, "$") == 0) { rp->topbk = cp->bank; cp->mchan = rp->minch; rp->topch = rp->maxch; return (1); } /* * "$:$" all banks, all channels */ if (strcmp(sptr, "$:$") == 0) { cp->bank = rp->minbk; rp->topbk = rp->maxbk; cp->mchan = rp->minch; rp->topch = rp->maxch; return (1); } /* * "b.$" bank b, all channels */ if (strstr(sptr, ".$") != NULL && sscanf(sptr, "%d", &bank) == 1) { cp->bank = bank; rp->topbk = bank; cp->mchan = rp->minch; rp->topch = rp->maxch; return (1); } /* * "b1.c1:b2.c2:": channel range b1.c1-b2.c2 */ if (sscanf(sptr, "%d.%d:%d.%d", &bank, &mchan, &topbk, &topch) == 4) { cp->bank = bank; rp->topbk = topbk; cp->mchan = mchan; rp->topch = topch; if (cp->bank > rp->topbk || (cp->bank == rp->topbk && cp->mchan > rp->topch)) return (2); else return (1); } /* * "c1:c2": channel range c1-c2 current bank */ if (sscanf(sptr, "%d:%d", &mchan, &topch) == 2) { rp->topbk = cp->bank; cp->mchan = mchan; rp->topch = topch; if (cp->mchan > rp->topch) return (2); else return (1); } /* * "c.b": channel c, bank b only */ if (sscanf(sptr, "%d.%d", &bank, &mchan) == 2) { cp->bank = rp->topbk = bank; cp->mchan = rp->topch = mchan; return (0); } /* * "c": channel c, current bank */ if (sscanf(sptr, "%d", &mchan) == 1) { rp->topbk = cp->bank; cp->mchan = rp->topch = mchan; return (0); } printf("*** bad channel format %s\n", sptr); return (R_NOP); } /* * sw_keypad() - switch to keypad mode */ static int /* 0 (ok), < 0 (system error) */ sw_keypad() { fd = open("/dev/tty", O_RDONLY); if (fd < 0) return (R_IO); if (tcgetattr(fd, &terma) < 0) return (R_IO); tcgetattr(fd, &termb); termb.c_lflag &= ~(ICANON | ECHO); termb.c_cc[VMIN] = 1; termb.c_cc[VTIME] = 0; if (tcsetattr(fd, TCSADRAIN, &termb) < 0) return (R_IO); return (R_OK); } /* * sw_keybd() - switch to keyboard mode */ static int /* 0 (ok), < 0 (system error) */ sw_keybd() { if (tcsetattr(fd, TCSADRAIN, &terma) < 0) return (R_IO); return (R_OK); } /* end program */
utf-8
1
unknown
unknown
mame-0.240+dfsg.1/src/devices/bus/pc_kbd/msnat.h
// license:BSD-3-Clause // copyright-holders:Wilbert Pol /*************************************************************************** Microsoft Natural Keyboard ***************************************************************************/ #ifndef MAME_BUS_PC_KB_MSNAT_H #define MAME_BUS_PC_KB_MSNAT_H #pragma once #include "cpu/mcs51/mcs51.h" #include "pc_kbdc.h" //************************************************************************** // TYPE DEFINITIONS //************************************************************************** class pc_kbd_microsoft_natural_device : public device_t, public device_pc_kbd_interface { public: // construction/destruction pc_kbd_microsoft_natural_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock); protected: // device-level overrides virtual void device_start() override; virtual void device_reset() override; // optional information overrides virtual void device_add_mconfig(machine_config &config) override; virtual ioport_constructor device_input_ports() const override; virtual const tiny_rom_entry *device_rom_region() const override; virtual DECLARE_WRITE_LINE_MEMBER(clock_write) override; virtual DECLARE_WRITE_LINE_MEMBER(data_write) override; private: required_device<i8051_device> m_cpu; required_ioport_array<8> m_p2_r; required_ioport_array<8> m_p1_r; uint8_t m_p0; uint8_t m_p1; uint8_t m_p2; uint8_t m_p3; uint8_t p0_read(); void p0_write(uint8_t data); void p1_write(uint8_t data); void p2_write(uint8_t data); uint8_t p3_read(); void p3_write(uint8_t data); }; // device type definition DECLARE_DEVICE_TYPE(PC_KBD_MICROSOFT_NATURAL, pc_kbd_microsoft_natural_device) #endif // MAME_BUS_PC_KB_MSNAT_H
utf-8
1
BSD-3-clause
Aaron Giles Alex Pasadyn Alex Wulms Antoine Mine Brad Martin Bryan McPhail Chris Kirmse Dag Lem Dreamer Nom Eric Smith Ernesto Corvi Fabio Priuli Frank Palazzolo F. Ulivi Grazvydas Ignotas hap Igor Jarek Burczynski Jason Eckhardt John Butler John Weidman Jonas Quinn Jonathan Gevaryahu Joseph Zbiciak Juergen Buchmueller J. Wallace Karl Stenerud Kris Bleakley Lancer MAME32/MAMEUI team Mariusz Wojcieszek Michael Soderstrom Miodrag Milanovic Mirko Buffoni Nach Nicola Salmoria Olivier Galibert Peter J.C.Clare Peter Trauner Raphael Nabet Ron Fries R. Belmont Sean Young smf Steve Baines Steve Ellenoff Sven Gothel Tatsuyuki Satoh The AGEMAME Development Team The MAME team The MESS team Tim Lindner Tony La Porta XelaSoft z80 gaiden Zsolt Vasvari
chromium-98.0.4758.102/third_party/skia/tests/SkSLCross.cpp
/* * Copyright 2020 Google LLC * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "tests/Test.h" #include "src/gpu/GrFragmentProcessor.h" #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h" #include "src/gpu/v1/SurfaceDrawContext_v1.h" static void run_test(skiatest::Reporter*, GrDirectContext*, skgpu::v1::SurfaceDrawContext*, SkVector a, SkVector b, float expectedCrossProduct); // This is a GPU test that ensures the SkSL 2d cross() intrinsic returns the correct sign (negative, // positive, or zero). DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SkSLCross, reporter, ctxInfo) { GrDirectContext* dContext = ctxInfo.directContext(); auto sdc = skgpu::v1::SurfaceDrawContext::Make(dContext, GrColorType::kRGBA_8888, nullptr, SkBackingFit::kExact, {1, 1}, SkSurfaceProps()); if (!sdc) { ERRORF(reporter, "could not create render target context."); return; } run_test(reporter, dContext, sdc.get(), {3,4}, {5,6}, -2); // Negative. run_test(reporter, dContext, sdc.get(), {3,4}, {-5,-6}, 2); // Positive. run_test(reporter, dContext, sdc.get(), {0, 2.287f}, {0, -7.741f}, 0); // Zero. run_test(reporter, dContext, sdc.get(), {62.17f, 0}, {-43.49f, 0}, 0); // Zero. } namespace { // Outputs: // Green if cross(a,b) > 0 // Red if cross(a,b) < 0 // Black if cross(a,b) == 0 class VisualizeCrossProductSignFP : public GrFragmentProcessor { public: VisualizeCrossProductSignFP(SkVector a, SkVector b) : GrFragmentProcessor(kTestFP_ClassID, kPreservesOpaqueInput_OptimizationFlag) , fA(a), fB(b) { } const char* name() const override { return "VisualizeCrossProductSignFP"; } std::unique_ptr<GrFragmentProcessor> clone() const override { return std::unique_ptr<GrFragmentProcessor>(new VisualizeCrossProductSignFP(fA, fB)); } private: void onAddToKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {} bool onIsEqual(const GrFragmentProcessor&) const override { return true; } std::unique_ptr<ProgramImpl> onMakeProgramImpl() const override { class Impl : public ProgramImpl { public: void emitCode(EmitArgs& args) override { auto& fp = args.fFp.cast<VisualizeCrossProductSignFP>(); const char *a, *b; fAUniform = args.fUniformHandler->addUniform(&fp, kFragment_GrShaderFlag, GrSLType::kFloat2_GrSLType, "a", &a); fBUniform = args.fUniformHandler->addUniform(&fp, kFragment_GrShaderFlag, GrSLType::kFloat2_GrSLType, "b", &b); args.fFragBuilder->codeAppendf(R"( float crossProduct = cross(%s, %s); float2 visualization = clamp(float2(-sign(crossProduct), sign(crossProduct)), float2(0), float2(1)); return half2(visualization).xy01;)", a, b); } private: void onSetData(const GrGLSLProgramDataManager& pdman, const GrFragmentProcessor& processor) override { const auto& fp = processor.cast<VisualizeCrossProductSignFP>(); pdman.set2f(fAUniform, fp.fA.x(), fp.fA.y()); pdman.set2f(fBUniform, fp.fB.x(), fp.fB.y()); } GrGLSLUniformHandler::UniformHandle fAUniform; GrGLSLUniformHandler::UniformHandle fBUniform; }; return std::make_unique<Impl>(); } const SkVector fA, fB; }; } // namespace static void run_test(skiatest::Reporter* reporter, GrDirectContext* directContext, skgpu::v1::SurfaceDrawContext* sdc, SkVector a, SkVector b, float expectedCrossProduct) { SkASSERT(sdc->width() == 1); SkASSERT(sdc->height() == 1); sdc->clear(SkPMColor4f::FromBytes_RGBA(0xbaaaaaad)); GrPaint crossPaint; crossPaint.setColor4f(SK_PMColor4fWHITE); crossPaint.setPorterDuffXPFactory(SkBlendMode::kSrcOver); crossPaint.setColorFragmentProcessor(std::make_unique<VisualizeCrossProductSignFP>(a, b)); sdc->drawRect(/*clip=*/nullptr, std::move(crossPaint), GrAA::kNo, SkMatrix::I(), SkRect::MakeWH(1,1)); GrColor result; GrPixmap resultPM(SkImageInfo::Make(1, 1, kRGBA_8888_SkColorType, kPremul_SkAlphaType), &result, sizeof(GrColor)); sdc->readPixels(directContext, resultPM, {0, 0}); SkASSERT(expectedCrossProduct == a.cross(b)); if (expectedCrossProduct > 0) { REPORTER_ASSERT(reporter, result == GrColorPackRGBA(0, 255, 0, 255)); // Green. } else if (expectedCrossProduct < 0) { REPORTER_ASSERT(reporter, result == GrColorPackRGBA(255, 0, 0, 255)); // Red. } else { REPORTER_ASSERT(reporter, result == GrColorPackRGBA(0, 0, 0, 255)); // Black. } }
utf-8
1
BSD-3-clause
The Chromium Authors. All rights reserved.
python3-typed-ast-1.4.3/ast3/Python/graminit.c
/* Generated by Parser/pgen */ #include "../Include/pgenheaders.h" #include "../Include/grammar.h" extern grammar _Ta3Parser_Grammar; static arc arcs_0_0[3] = { {2, 1}, {3, 1}, {4, 2}, }; static arc arcs_0_1[1] = { {0, 1}, }; static arc arcs_0_2[1] = { {2, 1}, }; static state states_0[3] = { {3, arcs_0_0}, {1, arcs_0_1}, {1, arcs_0_2}, }; static arc arcs_1_0[3] = { {2, 0}, {6, 0}, {7, 1}, }; static arc arcs_1_1[1] = { {0, 1}, }; static state states_1[2] = { {3, arcs_1_0}, {1, arcs_1_1}, }; static arc arcs_2_0[1] = { {9, 1}, }; static arc arcs_2_1[2] = { {2, 1}, {7, 2}, }; static arc arcs_2_2[1] = { {0, 2}, }; static state states_2[3] = { {1, arcs_2_0}, {2, arcs_2_1}, {1, arcs_2_2}, }; static arc arcs_3_0[1] = { {11, 1}, }; static arc arcs_3_1[1] = { {12, 2}, }; static arc arcs_3_2[2] = { {13, 3}, {2, 4}, }; static arc arcs_3_3[2] = { {14, 5}, {15, 6}, }; static arc arcs_3_4[1] = { {0, 4}, }; static arc arcs_3_5[1] = { {15, 6}, }; static arc arcs_3_6[1] = { {2, 4}, }; static state states_3[7] = { {1, arcs_3_0}, {1, arcs_3_1}, {2, arcs_3_2}, {2, arcs_3_3}, {1, arcs_3_4}, {1, arcs_3_5}, {1, arcs_3_6}, }; static arc arcs_4_0[1] = { {10, 1}, }; static arc arcs_4_1[2] = { {10, 1}, {0, 1}, }; static state states_4[2] = { {1, arcs_4_0}, {2, arcs_4_1}, }; static arc arcs_5_0[1] = { {16, 1}, }; static arc arcs_5_1[3] = { {18, 2}, {19, 2}, {20, 2}, }; static arc arcs_5_2[1] = { {0, 2}, }; static state states_5[3] = { {1, arcs_5_0}, {3, arcs_5_1}, {1, arcs_5_2}, }; static arc arcs_6_0[1] = { {21, 1}, }; static arc arcs_6_1[1] = { {19, 2}, }; static arc arcs_6_2[1] = { {0, 2}, }; static state states_6[3] = { {1, arcs_6_0}, {1, arcs_6_1}, {1, arcs_6_2}, }; static arc arcs_7_0[1] = { {22, 1}, }; static arc arcs_7_1[1] = { {23, 2}, }; static arc arcs_7_2[1] = { {24, 3}, }; static arc arcs_7_3[2] = { {25, 4}, {27, 5}, }; static arc arcs_7_4[1] = { {26, 6}, }; static arc arcs_7_5[2] = { {28, 7}, {29, 8}, }; static arc arcs_7_6[1] = { {27, 5}, }; static arc arcs_7_7[1] = { {29, 8}, }; static arc arcs_7_8[1] = { {0, 8}, }; static state states_7[9] = { {1, arcs_7_0}, {1, arcs_7_1}, {1, arcs_7_2}, {2, arcs_7_3}, {1, arcs_7_4}, {2, arcs_7_5}, {1, arcs_7_6}, {1, arcs_7_7}, {1, arcs_7_8}, }; static arc arcs_8_0[1] = { {13, 1}, }; static arc arcs_8_1[2] = { {30, 2}, {15, 3}, }; static arc arcs_8_2[1] = { {15, 3}, }; static arc arcs_8_3[1] = { {0, 3}, }; static state states_8[4] = { {1, arcs_8_0}, {2, arcs_8_1}, {1, arcs_8_2}, {1, arcs_8_3}, }; static arc arcs_9_0[3] = { {31, 1}, {34, 2}, {35, 3}, }; static arc arcs_9_1[4] = { {32, 4}, {33, 5}, {28, 6}, {0, 1}, }; static arc arcs_9_2[4] = { {31, 7}, {33, 8}, {28, 6}, {0, 2}, }; static arc arcs_9_3[1] = { {31, 9}, }; static arc arcs_9_4[1] = { {26, 10}, }; static arc arcs_9_5[5] = { {28, 11}, {31, 12}, {34, 13}, {35, 3}, {0, 5}, }; static arc arcs_9_6[1] = { {0, 6}, }; static arc arcs_9_7[3] = { {33, 8}, {28, 6}, {0, 7}, }; static arc arcs_9_8[4] = { {28, 14}, {31, 15}, {35, 3}, {0, 8}, }; static arc arcs_9_9[3] = { {33, 16}, {28, 6}, {0, 9}, }; static arc arcs_9_10[3] = { {33, 5}, {28, 6}, {0, 10}, }; static arc arcs_9_11[4] = { {31, 12}, {34, 13}, {35, 3}, {0, 11}, }; static arc arcs_9_12[4] = { {33, 5}, {32, 4}, {28, 6}, {0, 12}, }; static arc arcs_9_13[4] = { {31, 17}, {33, 18}, {28, 6}, {0, 13}, }; static arc arcs_9_14[3] = { {31, 15}, {35, 3}, {0, 14}, }; static arc arcs_9_15[4] = { {33, 8}, {32, 19}, {28, 6}, {0, 15}, }; static arc arcs_9_16[2] = { {28, 6}, {0, 16}, }; static arc arcs_9_17[3] = { {33, 18}, {28, 6}, {0, 17}, }; static arc arcs_9_18[4] = { {28, 20}, {31, 21}, {35, 3}, {0, 18}, }; static arc arcs_9_19[1] = { {26, 7}, }; static arc arcs_9_20[3] = { {31, 21}, {35, 3}, {0, 20}, }; static arc arcs_9_21[4] = { {33, 18}, {32, 22}, {28, 6}, {0, 21}, }; static arc arcs_9_22[1] = { {26, 17}, }; static state states_9[23] = { {3, arcs_9_0}, {4, arcs_9_1}, {4, arcs_9_2}, {1, arcs_9_3}, {1, arcs_9_4}, {5, arcs_9_5}, {1, arcs_9_6}, {3, arcs_9_7}, {4, arcs_9_8}, {3, arcs_9_9}, {3, arcs_9_10}, {4, arcs_9_11}, {4, arcs_9_12}, {4, arcs_9_13}, {3, arcs_9_14}, {4, arcs_9_15}, {2, arcs_9_16}, {3, arcs_9_17}, {4, arcs_9_18}, {1, arcs_9_19}, {3, arcs_9_20}, {4, arcs_9_21}, {1, arcs_9_22}, }; static arc arcs_10_0[1] = { {23, 1}, }; static arc arcs_10_1[2] = { {27, 2}, {0, 1}, }; static arc arcs_10_2[1] = { {26, 3}, }; static arc arcs_10_3[1] = { {0, 3}, }; static state states_10[4] = { {1, arcs_10_0}, {2, arcs_10_1}, {1, arcs_10_2}, {1, arcs_10_3}, }; static arc arcs_11_0[3] = { {37, 1}, {34, 2}, {35, 3}, }; static arc arcs_11_1[3] = { {32, 4}, {33, 5}, {0, 1}, }; static arc arcs_11_2[3] = { {37, 6}, {33, 7}, {0, 2}, }; static arc arcs_11_3[1] = { {37, 8}, }; static arc arcs_11_4[1] = { {26, 9}, }; static arc arcs_11_5[4] = { {37, 10}, {34, 11}, {35, 3}, {0, 5}, }; static arc arcs_11_6[2] = { {33, 7}, {0, 6}, }; static arc arcs_11_7[3] = { {37, 12}, {35, 3}, {0, 7}, }; static arc arcs_11_8[2] = { {33, 13}, {0, 8}, }; static arc arcs_11_9[2] = { {33, 5}, {0, 9}, }; static arc arcs_11_10[3] = { {33, 5}, {32, 4}, {0, 10}, }; static arc arcs_11_11[3] = { {37, 14}, {33, 15}, {0, 11}, }; static arc arcs_11_12[3] = { {33, 7}, {32, 16}, {0, 12}, }; static arc arcs_11_13[1] = { {0, 13}, }; static arc arcs_11_14[2] = { {33, 15}, {0, 14}, }; static arc arcs_11_15[3] = { {37, 17}, {35, 3}, {0, 15}, }; static arc arcs_11_16[1] = { {26, 6}, }; static arc arcs_11_17[3] = { {33, 15}, {32, 18}, {0, 17}, }; static arc arcs_11_18[1] = { {26, 14}, }; static state states_11[19] = { {3, arcs_11_0}, {3, arcs_11_1}, {3, arcs_11_2}, {1, arcs_11_3}, {1, arcs_11_4}, {4, arcs_11_5}, {2, arcs_11_6}, {3, arcs_11_7}, {2, arcs_11_8}, {2, arcs_11_9}, {3, arcs_11_10}, {3, arcs_11_11}, {3, arcs_11_12}, {1, arcs_11_13}, {2, arcs_11_14}, {3, arcs_11_15}, {1, arcs_11_16}, {3, arcs_11_17}, {1, arcs_11_18}, }; static arc arcs_12_0[1] = { {23, 1}, }; static arc arcs_12_1[1] = { {0, 1}, }; static state states_12[2] = { {1, arcs_12_0}, {1, arcs_12_1}, }; static arc arcs_13_0[2] = { {3, 1}, {4, 1}, }; static arc arcs_13_1[1] = { {0, 1}, }; static state states_13[2] = { {2, arcs_13_0}, {1, arcs_13_1}, }; static arc arcs_14_0[1] = { {38, 1}, }; static arc arcs_14_1[2] = { {39, 2}, {2, 3}, }; static arc arcs_14_2[2] = { {38, 1}, {2, 3}, }; static arc arcs_14_3[1] = { {0, 3}, }; static state states_14[4] = { {1, arcs_14_0}, {2, arcs_14_1}, {2, arcs_14_2}, {1, arcs_14_3}, }; static arc arcs_15_0[8] = { {40, 1}, {41, 1}, {42, 1}, {43, 1}, {44, 1}, {45, 1}, {46, 1}, {47, 1}, }; static arc arcs_15_1[1] = { {0, 1}, }; static state states_15[2] = { {8, arcs_15_0}, {1, arcs_15_1}, }; static arc arcs_16_0[1] = { {48, 1}, }; static arc arcs_16_1[5] = { {49, 2}, {50, 3}, {32, 4}, {28, 2}, {0, 1}, }; static arc arcs_16_2[1] = { {0, 2}, }; static arc arcs_16_3[2] = { {51, 2}, {9, 2}, }; static arc arcs_16_4[2] = { {51, 5}, {48, 5}, }; static arc arcs_16_5[3] = { {32, 4}, {28, 2}, {0, 5}, }; static state states_16[6] = { {1, arcs_16_0}, {5, arcs_16_1}, {1, arcs_16_2}, {2, arcs_16_3}, {2, arcs_16_4}, {3, arcs_16_5}, }; static arc arcs_17_0[1] = { {27, 1}, }; static arc arcs_17_1[1] = { {26, 2}, }; static arc arcs_17_2[2] = { {32, 3}, {0, 2}, }; static arc arcs_17_3[1] = { {26, 4}, }; static arc arcs_17_4[1] = { {0, 4}, }; static state states_17[5] = { {1, arcs_17_0}, {1, arcs_17_1}, {2, arcs_17_2}, {1, arcs_17_3}, {1, arcs_17_4}, }; static arc arcs_18_0[2] = { {26, 1}, {52, 1}, }; static arc arcs_18_1[2] = { {33, 2}, {0, 1}, }; static arc arcs_18_2[3] = { {26, 1}, {52, 1}, {0, 2}, }; static state states_18[3] = { {2, arcs_18_0}, {2, arcs_18_1}, {3, arcs_18_2}, }; static arc arcs_19_0[13] = { {53, 1}, {54, 1}, {55, 1}, {56, 1}, {57, 1}, {58, 1}, {59, 1}, {60, 1}, {61, 1}, {62, 1}, {63, 1}, {64, 1}, {65, 1}, }; static arc arcs_19_1[1] = { {0, 1}, }; static state states_19[2] = { {13, arcs_19_0}, {1, arcs_19_1}, }; static arc arcs_20_0[1] = { {66, 1}, }; static arc arcs_20_1[1] = { {67, 2}, }; static arc arcs_20_2[1] = { {0, 2}, }; static state states_20[3] = { {1, arcs_20_0}, {1, arcs_20_1}, {1, arcs_20_2}, }; static arc arcs_21_0[1] = { {68, 1}, }; static arc arcs_21_1[1] = { {0, 1}, }; static state states_21[2] = { {1, arcs_21_0}, {1, arcs_21_1}, }; static arc arcs_22_0[5] = { {69, 1}, {70, 1}, {71, 1}, {72, 1}, {73, 1}, }; static arc arcs_22_1[1] = { {0, 1}, }; static state states_22[2] = { {5, arcs_22_0}, {1, arcs_22_1}, }; static arc arcs_23_0[1] = { {74, 1}, }; static arc arcs_23_1[1] = { {0, 1}, }; static state states_23[2] = { {1, arcs_23_0}, {1, arcs_23_1}, }; static arc arcs_24_0[1] = { {75, 1}, }; static arc arcs_24_1[1] = { {0, 1}, }; static state states_24[2] = { {1, arcs_24_0}, {1, arcs_24_1}, }; static arc arcs_25_0[1] = { {76, 1}, }; static arc arcs_25_1[2] = { {9, 2}, {0, 1}, }; static arc arcs_25_2[1] = { {0, 2}, }; static state states_25[3] = { {1, arcs_25_0}, {2, arcs_25_1}, {1, arcs_25_2}, }; static arc arcs_26_0[1] = { {51, 1}, }; static arc arcs_26_1[1] = { {0, 1}, }; static state states_26[2] = { {1, arcs_26_0}, {1, arcs_26_1}, }; static arc arcs_27_0[1] = { {77, 1}, }; static arc arcs_27_1[2] = { {26, 2}, {0, 1}, }; static arc arcs_27_2[2] = { {78, 3}, {0, 2}, }; static arc arcs_27_3[1] = { {26, 4}, }; static arc arcs_27_4[1] = { {0, 4}, }; static state states_27[5] = { {1, arcs_27_0}, {2, arcs_27_1}, {2, arcs_27_2}, {1, arcs_27_3}, {1, arcs_27_4}, }; static arc arcs_28_0[2] = { {79, 1}, {80, 1}, }; static arc arcs_28_1[1] = { {0, 1}, }; static state states_28[2] = { {2, arcs_28_0}, {1, arcs_28_1}, }; static arc arcs_29_0[1] = { {81, 1}, }; static arc arcs_29_1[1] = { {82, 2}, }; static arc arcs_29_2[1] = { {0, 2}, }; static state states_29[3] = { {1, arcs_29_0}, {1, arcs_29_1}, {1, arcs_29_2}, }; static arc arcs_30_0[1] = { {78, 1}, }; static arc arcs_30_1[3] = { {83, 2}, {84, 2}, {12, 3}, }; static arc arcs_30_2[4] = { {83, 2}, {84, 2}, {12, 3}, {81, 4}, }; static arc arcs_30_3[1] = { {81, 4}, }; static arc arcs_30_4[3] = { {34, 5}, {13, 6}, {85, 5}, }; static arc arcs_30_5[1] = { {0, 5}, }; static arc arcs_30_6[1] = { {85, 7}, }; static arc arcs_30_7[1] = { {15, 5}, }; static state states_30[8] = { {1, arcs_30_0}, {3, arcs_30_1}, {4, arcs_30_2}, {1, arcs_30_3}, {3, arcs_30_4}, {1, arcs_30_5}, {1, arcs_30_6}, {1, arcs_30_7}, }; static arc arcs_31_0[1] = { {23, 1}, }; static arc arcs_31_1[2] = { {87, 2}, {0, 1}, }; static arc arcs_31_2[1] = { {23, 3}, }; static arc arcs_31_3[1] = { {0, 3}, }; static state states_31[4] = { {1, arcs_31_0}, {2, arcs_31_1}, {1, arcs_31_2}, {1, arcs_31_3}, }; static arc arcs_32_0[1] = { {12, 1}, }; static arc arcs_32_1[2] = { {87, 2}, {0, 1}, }; static arc arcs_32_2[1] = { {23, 3}, }; static arc arcs_32_3[1] = { {0, 3}, }; static state states_32[4] = { {1, arcs_32_0}, {2, arcs_32_1}, {1, arcs_32_2}, {1, arcs_32_3}, }; static arc arcs_33_0[1] = { {86, 1}, }; static arc arcs_33_1[2] = { {33, 2}, {0, 1}, }; static arc arcs_33_2[2] = { {86, 1}, {0, 2}, }; static state states_33[3] = { {1, arcs_33_0}, {2, arcs_33_1}, {2, arcs_33_2}, }; static arc arcs_34_0[1] = { {88, 1}, }; static arc arcs_34_1[2] = { {33, 0}, {0, 1}, }; static state states_34[2] = { {1, arcs_34_0}, {2, arcs_34_1}, }; static arc arcs_35_0[1] = { {23, 1}, }; static arc arcs_35_1[2] = { {83, 0}, {0, 1}, }; static state states_35[2] = { {1, arcs_35_0}, {2, arcs_35_1}, }; static arc arcs_36_0[1] = { {89, 1}, }; static arc arcs_36_1[1] = { {23, 2}, }; static arc arcs_36_2[2] = { {33, 1}, {0, 2}, }; static state states_36[3] = { {1, arcs_36_0}, {1, arcs_36_1}, {2, arcs_36_2}, }; static arc arcs_37_0[1] = { {90, 1}, }; static arc arcs_37_1[1] = { {23, 2}, }; static arc arcs_37_2[2] = { {33, 1}, {0, 2}, }; static state states_37[3] = { {1, arcs_37_0}, {1, arcs_37_1}, {2, arcs_37_2}, }; static arc arcs_38_0[1] = { {91, 1}, }; static arc arcs_38_1[1] = { {26, 2}, }; static arc arcs_38_2[2] = { {33, 3}, {0, 2}, }; static arc arcs_38_3[1] = { {26, 4}, }; static arc arcs_38_4[1] = { {0, 4}, }; static state states_38[5] = { {1, arcs_38_0}, {1, arcs_38_1}, {2, arcs_38_2}, {1, arcs_38_3}, {1, arcs_38_4}, }; static arc arcs_39_0[9] = { {92, 1}, {93, 1}, {94, 1}, {95, 1}, {96, 1}, {19, 1}, {18, 1}, {17, 1}, {97, 1}, }; static arc arcs_39_1[1] = { {0, 1}, }; static state states_39[2] = { {9, arcs_39_0}, {1, arcs_39_1}, }; static arc arcs_40_0[1] = { {21, 1}, }; static arc arcs_40_1[3] = { {19, 2}, {96, 2}, {94, 2}, }; static arc arcs_40_2[1] = { {0, 2}, }; static state states_40[3] = { {1, arcs_40_0}, {3, arcs_40_1}, {1, arcs_40_2}, }; static arc arcs_41_0[1] = { {98, 1}, }; static arc arcs_41_1[1] = { {26, 2}, }; static arc arcs_41_2[1] = { {27, 3}, }; static arc arcs_41_3[1] = { {29, 4}, }; static arc arcs_41_4[3] = { {99, 1}, {100, 5}, {0, 4}, }; static arc arcs_41_5[1] = { {27, 6}, }; static arc arcs_41_6[1] = { {29, 7}, }; static arc arcs_41_7[1] = { {0, 7}, }; static state states_41[8] = { {1, arcs_41_0}, {1, arcs_41_1}, {1, arcs_41_2}, {1, arcs_41_3}, {3, arcs_41_4}, {1, arcs_41_5}, {1, arcs_41_6}, {1, arcs_41_7}, }; static arc arcs_42_0[1] = { {101, 1}, }; static arc arcs_42_1[1] = { {26, 2}, }; static arc arcs_42_2[1] = { {27, 3}, }; static arc arcs_42_3[1] = { {29, 4}, }; static arc arcs_42_4[2] = { {100, 5}, {0, 4}, }; static arc arcs_42_5[1] = { {27, 6}, }; static arc arcs_42_6[1] = { {29, 7}, }; static arc arcs_42_7[1] = { {0, 7}, }; static state states_42[8] = { {1, arcs_42_0}, {1, arcs_42_1}, {1, arcs_42_2}, {1, arcs_42_3}, {2, arcs_42_4}, {1, arcs_42_5}, {1, arcs_42_6}, {1, arcs_42_7}, }; static arc arcs_43_0[1] = { {102, 1}, }; static arc arcs_43_1[1] = { {67, 2}, }; static arc arcs_43_2[1] = { {103, 3}, }; static arc arcs_43_3[1] = { {9, 4}, }; static arc arcs_43_4[1] = { {27, 5}, }; static arc arcs_43_5[2] = { {28, 6}, {29, 7}, }; static arc arcs_43_6[1] = { {29, 7}, }; static arc arcs_43_7[2] = { {100, 8}, {0, 7}, }; static arc arcs_43_8[1] = { {27, 9}, }; static arc arcs_43_9[1] = { {29, 10}, }; static arc arcs_43_10[1] = { {0, 10}, }; static state states_43[11] = { {1, arcs_43_0}, {1, arcs_43_1}, {1, arcs_43_2}, {1, arcs_43_3}, {1, arcs_43_4}, {2, arcs_43_5}, {1, arcs_43_6}, {2, arcs_43_7}, {1, arcs_43_8}, {1, arcs_43_9}, {1, arcs_43_10}, }; static arc arcs_44_0[1] = { {104, 1}, }; static arc arcs_44_1[1] = { {27, 2}, }; static arc arcs_44_2[1] = { {29, 3}, }; static arc arcs_44_3[2] = { {105, 4}, {106, 5}, }; static arc arcs_44_4[1] = { {27, 6}, }; static arc arcs_44_5[1] = { {27, 7}, }; static arc arcs_44_6[1] = { {29, 8}, }; static arc arcs_44_7[1] = { {29, 9}, }; static arc arcs_44_8[4] = { {105, 4}, {100, 10}, {106, 5}, {0, 8}, }; static arc arcs_44_9[1] = { {0, 9}, }; static arc arcs_44_10[1] = { {27, 11}, }; static arc arcs_44_11[1] = { {29, 12}, }; static arc arcs_44_12[2] = { {106, 5}, {0, 12}, }; static state states_44[13] = { {1, arcs_44_0}, {1, arcs_44_1}, {1, arcs_44_2}, {2, arcs_44_3}, {1, arcs_44_4}, {1, arcs_44_5}, {1, arcs_44_6}, {1, arcs_44_7}, {4, arcs_44_8}, {1, arcs_44_9}, {1, arcs_44_10}, {1, arcs_44_11}, {2, arcs_44_12}, }; static arc arcs_45_0[1] = { {107, 1}, }; static arc arcs_45_1[1] = { {108, 2}, }; static arc arcs_45_2[2] = { {33, 1}, {27, 3}, }; static arc arcs_45_3[2] = { {28, 4}, {29, 5}, }; static arc arcs_45_4[1] = { {29, 5}, }; static arc arcs_45_5[1] = { {0, 5}, }; static state states_45[6] = { {1, arcs_45_0}, {1, arcs_45_1}, {2, arcs_45_2}, {2, arcs_45_3}, {1, arcs_45_4}, {1, arcs_45_5}, }; static arc arcs_46_0[1] = { {26, 1}, }; static arc arcs_46_1[2] = { {87, 2}, {0, 1}, }; static arc arcs_46_2[1] = { {109, 3}, }; static arc arcs_46_3[1] = { {0, 3}, }; static state states_46[4] = { {1, arcs_46_0}, {2, arcs_46_1}, {1, arcs_46_2}, {1, arcs_46_3}, }; static arc arcs_47_0[1] = { {110, 1}, }; static arc arcs_47_1[2] = { {26, 2}, {0, 1}, }; static arc arcs_47_2[2] = { {87, 3}, {0, 2}, }; static arc arcs_47_3[1] = { {23, 4}, }; static arc arcs_47_4[1] = { {0, 4}, }; static state states_47[5] = { {1, arcs_47_0}, {2, arcs_47_1}, {2, arcs_47_2}, {1, arcs_47_3}, {1, arcs_47_4}, }; static arc arcs_48_0[2] = { {3, 1}, {2, 2}, }; static arc arcs_48_1[1] = { {0, 1}, }; static arc arcs_48_2[2] = { {28, 3}, {111, 4}, }; static arc arcs_48_3[1] = { {2, 5}, }; static arc arcs_48_4[1] = { {6, 6}, }; static arc arcs_48_5[1] = { {111, 4}, }; static arc arcs_48_6[2] = { {6, 6}, {112, 1}, }; static state states_48[7] = { {2, arcs_48_0}, {1, arcs_48_1}, {2, arcs_48_2}, {1, arcs_48_3}, {1, arcs_48_4}, {1, arcs_48_5}, {2, arcs_48_6}, }; static arc arcs_49_0[2] = { {113, 1}, {114, 2}, }; static arc arcs_49_1[2] = { {98, 3}, {0, 1}, }; static arc arcs_49_2[1] = { {0, 2}, }; static arc arcs_49_3[1] = { {113, 4}, }; static arc arcs_49_4[1] = { {100, 5}, }; static arc arcs_49_5[1] = { {26, 2}, }; static state states_49[6] = { {2, arcs_49_0}, {2, arcs_49_1}, {1, arcs_49_2}, {1, arcs_49_3}, {1, arcs_49_4}, {1, arcs_49_5}, }; static arc arcs_50_0[2] = { {113, 1}, {116, 1}, }; static arc arcs_50_1[1] = { {0, 1}, }; static state states_50[2] = { {2, arcs_50_0}, {1, arcs_50_1}, }; static arc arcs_51_0[1] = { {117, 1}, }; static arc arcs_51_1[2] = { {36, 2}, {27, 3}, }; static arc arcs_51_2[1] = { {27, 3}, }; static arc arcs_51_3[1] = { {26, 4}, }; static arc arcs_51_4[1] = { {0, 4}, }; static state states_51[5] = { {1, arcs_51_0}, {2, arcs_51_1}, {1, arcs_51_2}, {1, arcs_51_3}, {1, arcs_51_4}, }; static arc arcs_52_0[1] = { {117, 1}, }; static arc arcs_52_1[2] = { {36, 2}, {27, 3}, }; static arc arcs_52_2[1] = { {27, 3}, }; static arc arcs_52_3[1] = { {115, 4}, }; static arc arcs_52_4[1] = { {0, 4}, }; static state states_52[5] = { {1, arcs_52_0}, {2, arcs_52_1}, {1, arcs_52_2}, {1, arcs_52_3}, {1, arcs_52_4}, }; static arc arcs_53_0[1] = { {118, 1}, }; static arc arcs_53_1[2] = { {119, 0}, {0, 1}, }; static state states_53[2] = { {1, arcs_53_0}, {2, arcs_53_1}, }; static arc arcs_54_0[1] = { {120, 1}, }; static arc arcs_54_1[2] = { {121, 0}, {0, 1}, }; static state states_54[2] = { {1, arcs_54_0}, {2, arcs_54_1}, }; static arc arcs_55_0[2] = { {122, 1}, {123, 2}, }; static arc arcs_55_1[1] = { {120, 2}, }; static arc arcs_55_2[1] = { {0, 2}, }; static state states_55[3] = { {2, arcs_55_0}, {1, arcs_55_1}, {1, arcs_55_2}, }; static arc arcs_56_0[1] = { {109, 1}, }; static arc arcs_56_1[2] = { {124, 0}, {0, 1}, }; static state states_56[2] = { {1, arcs_56_0}, {2, arcs_56_1}, }; static arc arcs_57_0[10] = { {125, 1}, {126, 1}, {127, 1}, {128, 1}, {129, 1}, {130, 1}, {131, 1}, {103, 1}, {122, 2}, {132, 3}, }; static arc arcs_57_1[1] = { {0, 1}, }; static arc arcs_57_2[1] = { {103, 1}, }; static arc arcs_57_3[2] = { {122, 1}, {0, 3}, }; static state states_57[4] = { {10, arcs_57_0}, {1, arcs_57_1}, {1, arcs_57_2}, {2, arcs_57_3}, }; static arc arcs_58_0[1] = { {34, 1}, }; static arc arcs_58_1[1] = { {109, 2}, }; static arc arcs_58_2[1] = { {0, 2}, }; static state states_58[3] = { {1, arcs_58_0}, {1, arcs_58_1}, {1, arcs_58_2}, }; static arc arcs_59_0[1] = { {133, 1}, }; static arc arcs_59_1[2] = { {134, 0}, {0, 1}, }; static state states_59[2] = { {1, arcs_59_0}, {2, arcs_59_1}, }; static arc arcs_60_0[1] = { {135, 1}, }; static arc arcs_60_1[2] = { {136, 0}, {0, 1}, }; static state states_60[2] = { {1, arcs_60_0}, {2, arcs_60_1}, }; static arc arcs_61_0[1] = { {137, 1}, }; static arc arcs_61_1[2] = { {138, 0}, {0, 1}, }; static state states_61[2] = { {1, arcs_61_0}, {2, arcs_61_1}, }; static arc arcs_62_0[1] = { {139, 1}, }; static arc arcs_62_1[3] = { {140, 0}, {141, 0}, {0, 1}, }; static state states_62[2] = { {1, arcs_62_0}, {3, arcs_62_1}, }; static arc arcs_63_0[1] = { {142, 1}, }; static arc arcs_63_1[3] = { {143, 0}, {144, 0}, {0, 1}, }; static state states_63[2] = { {1, arcs_63_0}, {3, arcs_63_1}, }; static arc arcs_64_0[1] = { {145, 1}, }; static arc arcs_64_1[6] = { {34, 0}, {11, 0}, {146, 0}, {147, 0}, {148, 0}, {0, 1}, }; static state states_64[2] = { {1, arcs_64_0}, {6, arcs_64_1}, }; static arc arcs_65_0[4] = { {143, 1}, {144, 1}, {149, 1}, {150, 2}, }; static arc arcs_65_1[1] = { {145, 2}, }; static arc arcs_65_2[1] = { {0, 2}, }; static state states_65[3] = { {4, arcs_65_0}, {1, arcs_65_1}, {1, arcs_65_2}, }; static arc arcs_66_0[1] = { {151, 1}, }; static arc arcs_66_1[2] = { {35, 2}, {0, 1}, }; static arc arcs_66_2[1] = { {145, 3}, }; static arc arcs_66_3[1] = { {0, 3}, }; static state states_66[4] = { {1, arcs_66_0}, {2, arcs_66_1}, {1, arcs_66_2}, {1, arcs_66_3}, }; static arc arcs_67_0[2] = { {152, 1}, {153, 2}, }; static arc arcs_67_1[1] = { {153, 2}, }; static arc arcs_67_2[2] = { {154, 2}, {0, 2}, }; static state states_67[3] = { {2, arcs_67_0}, {1, arcs_67_1}, {2, arcs_67_2}, }; static arc arcs_68_0[10] = { {13, 1}, {156, 2}, {158, 3}, {23, 4}, {161, 4}, {162, 5}, {84, 4}, {163, 4}, {164, 4}, {165, 4}, }; static arc arcs_68_1[3] = { {51, 6}, {155, 6}, {15, 4}, }; static arc arcs_68_2[2] = { {155, 7}, {157, 4}, }; static arc arcs_68_3[2] = { {159, 8}, {160, 4}, }; static arc arcs_68_4[1] = { {0, 4}, }; static arc arcs_68_5[2] = { {162, 5}, {0, 5}, }; static arc arcs_68_6[1] = { {15, 4}, }; static arc arcs_68_7[1] = { {157, 4}, }; static arc arcs_68_8[1] = { {160, 4}, }; static state states_68[9] = { {10, arcs_68_0}, {3, arcs_68_1}, {2, arcs_68_2}, {2, arcs_68_3}, {1, arcs_68_4}, {2, arcs_68_5}, {1, arcs_68_6}, {1, arcs_68_7}, {1, arcs_68_8}, }; static arc arcs_69_0[2] = { {26, 1}, {52, 1}, }; static arc arcs_69_1[3] = { {166, 2}, {33, 3}, {0, 1}, }; static arc arcs_69_2[1] = { {0, 2}, }; static arc arcs_69_3[3] = { {26, 4}, {52, 4}, {0, 3}, }; static arc arcs_69_4[2] = { {33, 3}, {0, 4}, }; static state states_69[5] = { {2, arcs_69_0}, {3, arcs_69_1}, {1, arcs_69_2}, {3, arcs_69_3}, {2, arcs_69_4}, }; static arc arcs_70_0[3] = { {13, 1}, {156, 2}, {83, 3}, }; static arc arcs_70_1[2] = { {14, 4}, {15, 5}, }; static arc arcs_70_2[1] = { {167, 6}, }; static arc arcs_70_3[1] = { {23, 5}, }; static arc arcs_70_4[1] = { {15, 5}, }; static arc arcs_70_5[1] = { {0, 5}, }; static arc arcs_70_6[1] = { {157, 5}, }; static state states_70[7] = { {3, arcs_70_0}, {2, arcs_70_1}, {1, arcs_70_2}, {1, arcs_70_3}, {1, arcs_70_4}, {1, arcs_70_5}, {1, arcs_70_6}, }; static arc arcs_71_0[1] = { {168, 1}, }; static arc arcs_71_1[2] = { {33, 2}, {0, 1}, }; static arc arcs_71_2[2] = { {168, 1}, {0, 2}, }; static state states_71[3] = { {1, arcs_71_0}, {2, arcs_71_1}, {2, arcs_71_2}, }; static arc arcs_72_0[2] = { {26, 1}, {27, 2}, }; static arc arcs_72_1[2] = { {27, 2}, {0, 1}, }; static arc arcs_72_2[3] = { {26, 3}, {169, 4}, {0, 2}, }; static arc arcs_72_3[2] = { {169, 4}, {0, 3}, }; static arc arcs_72_4[1] = { {0, 4}, }; static state states_72[5] = { {2, arcs_72_0}, {2, arcs_72_1}, {3, arcs_72_2}, {2, arcs_72_3}, {1, arcs_72_4}, }; static arc arcs_73_0[1] = { {27, 1}, }; static arc arcs_73_1[2] = { {26, 2}, {0, 1}, }; static arc arcs_73_2[1] = { {0, 2}, }; static state states_73[3] = { {1, arcs_73_0}, {2, arcs_73_1}, {1, arcs_73_2}, }; static arc arcs_74_0[2] = { {109, 1}, {52, 1}, }; static arc arcs_74_1[2] = { {33, 2}, {0, 1}, }; static arc arcs_74_2[3] = { {109, 1}, {52, 1}, {0, 2}, }; static state states_74[3] = { {2, arcs_74_0}, {2, arcs_74_1}, {3, arcs_74_2}, }; static arc arcs_75_0[1] = { {26, 1}, }; static arc arcs_75_1[2] = { {33, 2}, {0, 1}, }; static arc arcs_75_2[2] = { {26, 1}, {0, 2}, }; static state states_75[3] = { {1, arcs_75_0}, {2, arcs_75_1}, {2, arcs_75_2}, }; static arc arcs_76_0[3] = { {26, 1}, {35, 2}, {52, 3}, }; static arc arcs_76_1[4] = { {27, 4}, {166, 5}, {33, 6}, {0, 1}, }; static arc arcs_76_2[1] = { {109, 7}, }; static arc arcs_76_3[3] = { {166, 5}, {33, 6}, {0, 3}, }; static arc arcs_76_4[1] = { {26, 7}, }; static arc arcs_76_5[1] = { {0, 5}, }; static arc arcs_76_6[3] = { {26, 8}, {52, 8}, {0, 6}, }; static arc arcs_76_7[3] = { {166, 5}, {33, 9}, {0, 7}, }; static arc arcs_76_8[2] = { {33, 6}, {0, 8}, }; static arc arcs_76_9[3] = { {26, 10}, {35, 11}, {0, 9}, }; static arc arcs_76_10[1] = { {27, 12}, }; static arc arcs_76_11[1] = { {109, 13}, }; static arc arcs_76_12[1] = { {26, 13}, }; static arc arcs_76_13[2] = { {33, 9}, {0, 13}, }; static state states_76[14] = { {3, arcs_76_0}, {4, arcs_76_1}, {1, arcs_76_2}, {3, arcs_76_3}, {1, arcs_76_4}, {1, arcs_76_5}, {3, arcs_76_6}, {3, arcs_76_7}, {2, arcs_76_8}, {3, arcs_76_9}, {1, arcs_76_10}, {1, arcs_76_11}, {1, arcs_76_12}, {2, arcs_76_13}, }; static arc arcs_77_0[1] = { {170, 1}, }; static arc arcs_77_1[1] = { {23, 2}, }; static arc arcs_77_2[2] = { {13, 3}, {27, 4}, }; static arc arcs_77_3[2] = { {14, 5}, {15, 6}, }; static arc arcs_77_4[1] = { {29, 7}, }; static arc arcs_77_5[1] = { {15, 6}, }; static arc arcs_77_6[1] = { {27, 4}, }; static arc arcs_77_7[1] = { {0, 7}, }; static state states_77[8] = { {1, arcs_77_0}, {1, arcs_77_1}, {2, arcs_77_2}, {2, arcs_77_3}, {1, arcs_77_4}, {1, arcs_77_5}, {1, arcs_77_6}, {1, arcs_77_7}, }; static arc arcs_78_0[1] = { {171, 1}, }; static arc arcs_78_1[2] = { {33, 2}, {0, 1}, }; static arc arcs_78_2[2] = { {171, 1}, {0, 2}, }; static state states_78[3] = { {1, arcs_78_0}, {2, arcs_78_1}, {2, arcs_78_2}, }; static arc arcs_79_0[3] = { {26, 1}, {35, 2}, {34, 2}, }; static arc arcs_79_1[3] = { {166, 3}, {32, 2}, {0, 1}, }; static arc arcs_79_2[1] = { {26, 3}, }; static arc arcs_79_3[1] = { {0, 3}, }; static state states_79[4] = { {3, arcs_79_0}, {3, arcs_79_1}, {1, arcs_79_2}, {1, arcs_79_3}, }; static arc arcs_80_0[2] = { {166, 1}, {173, 1}, }; static arc arcs_80_1[1] = { {0, 1}, }; static state states_80[2] = { {2, arcs_80_0}, {1, arcs_80_1}, }; static arc arcs_81_0[1] = { {102, 1}, }; static arc arcs_81_1[1] = { {67, 2}, }; static arc arcs_81_2[1] = { {103, 3}, }; static arc arcs_81_3[1] = { {113, 4}, }; static arc arcs_81_4[2] = { {172, 5}, {0, 4}, }; static arc arcs_81_5[1] = { {0, 5}, }; static state states_81[6] = { {1, arcs_81_0}, {1, arcs_81_1}, {1, arcs_81_2}, {1, arcs_81_3}, {2, arcs_81_4}, {1, arcs_81_5}, }; static arc arcs_82_0[2] = { {21, 1}, {174, 2}, }; static arc arcs_82_1[1] = { {174, 2}, }; static arc arcs_82_2[1] = { {0, 2}, }; static state states_82[3] = { {2, arcs_82_0}, {1, arcs_82_1}, {1, arcs_82_2}, }; static arc arcs_83_0[1] = { {98, 1}, }; static arc arcs_83_1[1] = { {115, 2}, }; static arc arcs_83_2[2] = { {172, 3}, {0, 2}, }; static arc arcs_83_3[1] = { {0, 3}, }; static state states_83[4] = { {1, arcs_83_0}, {1, arcs_83_1}, {2, arcs_83_2}, {1, arcs_83_3}, }; static arc arcs_84_0[1] = { {23, 1}, }; static arc arcs_84_1[1] = { {0, 1}, }; static state states_84[2] = { {1, arcs_84_0}, {1, arcs_84_1}, }; static arc arcs_85_0[1] = { {176, 1}, }; static arc arcs_85_1[2] = { {177, 2}, {0, 1}, }; static arc arcs_85_2[1] = { {0, 2}, }; static state states_85[3] = { {1, arcs_85_0}, {2, arcs_85_1}, {1, arcs_85_2}, }; static arc arcs_86_0[2] = { {78, 1}, {9, 2}, }; static arc arcs_86_1[1] = { {26, 2}, }; static arc arcs_86_2[1] = { {0, 2}, }; static state states_86[3] = { {2, arcs_86_0}, {1, arcs_86_1}, {1, arcs_86_2}, }; static arc arcs_87_0[1] = { {179, 1}, }; static arc arcs_87_1[2] = { {2, 1}, {7, 2}, }; static arc arcs_87_2[1] = { {0, 2}, }; static state states_87[3] = { {1, arcs_87_0}, {2, arcs_87_1}, {1, arcs_87_2}, }; static arc arcs_88_0[1] = { {13, 1}, }; static arc arcs_88_1[2] = { {180, 2}, {15, 3}, }; static arc arcs_88_2[1] = { {15, 3}, }; static arc arcs_88_3[1] = { {25, 4}, }; static arc arcs_88_4[1] = { {26, 5}, }; static arc arcs_88_5[1] = { {0, 5}, }; static state states_88[6] = { {1, arcs_88_0}, {2, arcs_88_1}, {1, arcs_88_2}, {1, arcs_88_3}, {1, arcs_88_4}, {1, arcs_88_5}, }; static arc arcs_89_0[3] = { {26, 1}, {34, 2}, {35, 3}, }; static arc arcs_89_1[2] = { {33, 4}, {0, 1}, }; static arc arcs_89_2[3] = { {26, 5}, {33, 6}, {0, 2}, }; static arc arcs_89_3[1] = { {26, 7}, }; static arc arcs_89_4[4] = { {26, 1}, {34, 8}, {35, 3}, {0, 4}, }; static arc arcs_89_5[2] = { {33, 6}, {0, 5}, }; static arc arcs_89_6[2] = { {26, 5}, {35, 3}, }; static arc arcs_89_7[1] = { {0, 7}, }; static arc arcs_89_8[3] = { {26, 9}, {33, 10}, {0, 8}, }; static arc arcs_89_9[2] = { {33, 10}, {0, 9}, }; static arc arcs_89_10[2] = { {26, 9}, {35, 3}, }; static state states_89[11] = { {3, arcs_89_0}, {2, arcs_89_1}, {3, arcs_89_2}, {1, arcs_89_3}, {4, arcs_89_4}, {2, arcs_89_5}, {2, arcs_89_6}, {1, arcs_89_7}, {3, arcs_89_8}, {2, arcs_89_9}, {2, arcs_89_10}, }; static dfa dfas[90] = { {256, "single_input", 0, 3, states_0, "\004\050\340\000\004\000\000\000\024\174\022\016\144\011\040\004\000\200\041\121\076\004\001"}, {257, "file_input", 0, 2, states_1, "\204\050\340\000\004\000\000\000\024\174\022\016\144\011\040\004\000\200\041\121\076\004\001"}, {258, "eval_input", 0, 3, states_2, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {259, "decorator", 0, 7, states_3, "\000\010\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {260, "decorators", 0, 2, states_4, "\000\010\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {261, "decorated", 0, 3, states_5, "\000\010\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {262, "async_funcdef", 0, 3, states_6, "\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {263, "funcdef", 0, 9, states_7, "\000\000\100\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {264, "parameters", 0, 4, states_8, "\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {265, "typedargslist", 0, 23, states_9, "\000\000\200\000\014\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {266, "tfpdef", 0, 4, states_10, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {267, "varargslist", 0, 19, states_11, "\000\000\200\000\014\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {268, "vfpdef", 0, 2, states_12, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {269, "stmt", 0, 2, states_13, "\000\050\340\000\004\000\000\000\024\174\022\016\144\011\040\004\000\200\041\121\076\004\001"}, {270, "simple_stmt", 0, 4, states_14, "\000\040\200\000\004\000\000\000\024\174\022\016\000\000\040\004\000\200\041\121\076\000\001"}, {271, "small_stmt", 0, 2, states_15, "\000\040\200\000\004\000\000\000\024\174\022\016\000\000\040\004\000\200\041\121\076\000\001"}, {272, "expr_stmt", 0, 6, states_16, "\000\040\200\000\004\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {273, "annassign", 0, 5, states_17, "\000\000\000\010\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {274, "testlist_star_expr", 0, 3, states_18, "\000\040\200\000\004\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {275, "augassign", 0, 2, states_19, "\000\000\000\000\000\000\340\377\003\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {276, "del_stmt", 0, 3, states_20, "\000\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {277, "pass_stmt", 0, 2, states_21, "\000\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {278, "flow_stmt", 0, 2, states_22, "\000\000\000\000\000\000\000\000\000\074\000\000\000\000\000\000\000\000\000\000\000\000\001"}, {279, "break_stmt", 0, 2, states_23, "\000\000\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {280, "continue_stmt", 0, 2, states_24, "\000\000\000\000\000\000\000\000\000\010\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {281, "return_stmt", 0, 3, states_25, "\000\000\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {282, "yield_stmt", 0, 2, states_26, "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001"}, {283, "raise_stmt", 0, 5, states_27, "\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {284, "import_stmt", 0, 2, states_28, "\000\000\000\000\000\000\000\000\000\100\002\000\000\000\000\000\000\000\000\000\000\000\000"}, {285, "import_name", 0, 3, states_29, "\000\000\000\000\000\000\000\000\000\000\002\000\000\000\000\000\000\000\000\000\000\000\000"}, {286, "import_from", 0, 8, states_30, "\000\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {287, "import_as_name", 0, 4, states_31, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {288, "dotted_as_name", 0, 4, states_32, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {289, "import_as_names", 0, 3, states_33, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {290, "dotted_as_names", 0, 2, states_34, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {291, "dotted_name", 0, 2, states_35, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {292, "global_stmt", 0, 3, states_36, "\000\000\000\000\000\000\000\000\000\000\000\002\000\000\000\000\000\000\000\000\000\000\000"}, {293, "nonlocal_stmt", 0, 3, states_37, "\000\000\000\000\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000\000\000\000\000"}, {294, "assert_stmt", 0, 5, states_38, "\000\000\000\000\000\000\000\000\000\000\000\010\000\000\000\000\000\000\000\000\000\000\000"}, {295, "compound_stmt", 0, 2, states_39, "\000\010\140\000\000\000\000\000\000\000\000\000\144\011\000\000\000\000\000\000\000\004\000"}, {296, "async_stmt", 0, 3, states_40, "\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {297, "if_stmt", 0, 8, states_41, "\000\000\000\000\000\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000\000\000\000"}, {298, "while_stmt", 0, 8, states_42, "\000\000\000\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000"}, {299, "for_stmt", 0, 11, states_43, "\000\000\000\000\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000\000"}, {300, "try_stmt", 0, 13, states_44, "\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\000\000"}, {301, "with_stmt", 0, 6, states_45, "\000\000\000\000\000\000\000\000\000\000\000\000\000\010\000\000\000\000\000\000\000\000\000"}, {302, "with_item", 0, 4, states_46, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {303, "except_clause", 0, 5, states_47, "\000\000\000\000\000\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000"}, {304, "suite", 0, 7, states_48, "\004\040\200\000\004\000\000\000\024\174\022\016\000\000\040\004\000\200\041\121\076\000\001"}, {305, "test", 0, 6, states_49, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {306, "test_nocond", 0, 2, states_50, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {307, "lambdef", 0, 5, states_51, "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000"}, {308, "lambdef_nocond", 0, 5, states_52, "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000"}, {309, "or_test", 0, 2, states_53, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\004\000\200\041\121\076\000\000"}, {310, "and_test", 0, 2, states_54, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\004\000\200\041\121\076\000\000"}, {311, "not_test", 0, 3, states_55, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\004\000\200\041\121\076\000\000"}, {312, "comparison", 0, 2, states_56, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {313, "comp_op", 0, 4, states_57, "\000\000\000\000\000\000\000\000\000\000\000\000\200\000\000\344\037\000\000\000\000\000\000"}, {314, "star_expr", 0, 3, states_58, "\000\000\000\000\004\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {315, "expr", 0, 2, states_59, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {316, "xor_expr", 0, 2, states_60, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {317, "and_expr", 0, 2, states_61, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {318, "shift_expr", 0, 2, states_62, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {319, "arith_expr", 0, 2, states_63, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {320, "term", 0, 2, states_64, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {321, "factor", 0, 3, states_65, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {322, "power", 0, 4, states_66, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\121\076\000\000"}, {323, "atom_expr", 0, 3, states_67, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\121\076\000\000"}, {324, "atom", 0, 9, states_68, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\120\076\000\000"}, {325, "testlist_comp", 0, 5, states_69, "\000\040\200\000\004\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {326, "trailer", 0, 7, states_70, "\000\040\000\000\000\000\000\000\000\000\010\000\000\000\000\000\000\000\000\020\000\000\000"}, {327, "subscriptlist", 0, 3, states_71, "\000\040\200\010\000\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {328, "subscript", 0, 5, states_72, "\000\040\200\010\000\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {329, "sliceop", 0, 3, states_73, "\000\000\000\010\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {330, "exprlist", 0, 3, states_74, "\000\040\200\000\004\000\000\000\000\000\020\000\000\000\000\000\000\200\041\121\076\000\000"}, {331, "testlist", 0, 3, states_75, "\000\040\200\000\000\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {332, "dictorsetmaker", 0, 14, states_76, "\000\040\200\000\014\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {333, "classdef", 0, 8, states_77, "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\004\000"}, {334, "arglist", 0, 3, states_78, "\000\040\200\000\014\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {335, "argument", 0, 4, states_79, "\000\040\200\000\014\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {336, "comp_iter", 0, 2, states_80, "\000\000\040\000\000\000\000\000\000\000\000\000\104\000\000\000\000\000\000\000\000\000\000"}, {337, "sync_comp_for", 0, 6, states_81, "\000\000\000\000\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000\000"}, {338, "comp_for", 0, 3, states_82, "\000\000\040\000\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000\000"}, {339, "comp_if", 0, 4, states_83, "\000\000\000\000\000\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000\000\000\000"}, {340, "encoding_decl", 0, 2, states_84, "\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {341, "yield_expr", 0, 3, states_85, "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001"}, {342, "yield_arg", 0, 3, states_86, "\000\040\200\000\000\000\000\000\000\100\020\000\000\000\040\004\000\200\041\121\076\000\000"}, {343, "func_type_input", 0, 3, states_87, "\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {344, "func_type", 0, 6, states_88, "\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"}, {345, "typelist", 0, 11, states_89, "\000\040\200\000\014\000\000\000\000\000\020\000\000\000\040\004\000\200\041\121\076\000\000"}, }; static label labels[181] = { {0, "EMPTY"}, {256, 0}, {4, 0}, {270, 0}, {295, 0}, {257, 0}, {269, 0}, {0, 0}, {258, 0}, {331, 0}, {259, 0}, {49, 0}, {291, 0}, {7, 0}, {334, 0}, {8, 0}, {260, 0}, {261, 0}, {333, 0}, {263, 0}, {262, 0}, {55, 0}, {1, "def"}, {1, 0}, {264, 0}, {51, 0}, {305, 0}, {11, 0}, {57, 0}, {304, 0}, {265, 0}, {266, 0}, {22, 0}, {12, 0}, {16, 0}, {35, 0}, {267, 0}, {268, 0}, {271, 0}, {13, 0}, {272, 0}, {276, 0}, {277, 0}, {278, 0}, {284, 0}, {292, 0}, {293, 0}, {294, 0}, {274, 0}, {273, 0}, {275, 0}, {341, 0}, {314, 0}, {36, 0}, {37, 0}, {38, 0}, {50, 0}, {39, 0}, {40, 0}, {41, 0}, {42, 0}, {43, 0}, {44, 0}, {45, 0}, {46, 0}, {48, 0}, {1, "del"}, {330, 0}, {1, "pass"}, {279, 0}, {280, 0}, {281, 0}, {283, 0}, {282, 0}, {1, "break"}, {1, "continue"}, {1, "return"}, {1, "raise"}, {1, "from"}, {285, 0}, {286, 0}, {1, "import"}, {290, 0}, {23, 0}, {52, 0}, {289, 0}, {287, 0}, {1, "as"}, {288, 0}, {1, "global"}, {1, "nonlocal"}, {1, "assert"}, {297, 0}, {298, 0}, {299, 0}, {300, 0}, {301, 0}, {296, 0}, {1, "if"}, {1, "elif"}, {1, "else"}, {1, "while"}, {1, "for"}, {1, "in"}, {1, "try"}, {303, 0}, {1, "finally"}, {1, "with"}, {302, 0}, {315, 0}, {1, "except"}, {5, 0}, {6, 0}, {309, 0}, {307, 0}, {306, 0}, {308, 0}, {1, "lambda"}, {310, 0}, {1, "or"}, {311, 0}, {1, "and"}, {1, "not"}, {312, 0}, {313, 0}, {20, 0}, {21, 0}, {27, 0}, {30, 0}, {29, 0}, {28, 0}, {28, 0}, {1, "is"}, {316, 0}, {18, 0}, {317, 0}, {32, 0}, {318, 0}, {19, 0}, {319, 0}, {33, 0}, {34, 0}, {320, 0}, {14, 0}, {15, 0}, {321, 0}, {17, 0}, {24, 0}, {47, 0}, {31, 0}, {322, 0}, {323, 0}, {54, 0}, {324, 0}, {326, 0}, {325, 0}, {9, 0}, {10, 0}, {25, 0}, {332, 0}, {26, 0}, {2, 0}, {3, 0}, {1, "None"}, {1, "True"}, {1, "False"}, {338, 0}, {327, 0}, {328, 0}, {329, 0}, {1, "class"}, {335, 0}, {336, 0}, {339, 0}, {337, 0}, {340, 0}, {1, "yield"}, {342, 0}, {343, 0}, {344, 0}, {345, 0}, }; grammar _Ta3Parser_Grammar = { 90, dfas, {181, labels}, 256 };
utf-8
1
Apache-2.0
© 2016 David Fisher <ddfisher@dropbox.com>
libstatgen-1.0.15/.pc/spelling.patch/general/PedigreeDescription.cpp
/* * Copyright (C) 2010 Regents of the University of Michigan * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "PedigreeDescription.h" #include "MapFunction.h" #include "MathVector.h" #include "Constant.h" #include "FortranFormat.h" #include "Error.h" #include <stdlib.h> #include <ctype.h> #include <string.h> #include <math.h> PedigreeDescription::PedigreeDescription() { columnCount = 0; mendelFormat = false; } PedigreeDescription::~PedigreeDescription() { }; PedigreeDescription & PedigreeDescription::operator = (PedigreeDescription & rhs) { columnCount = rhs.columnCount; columns = rhs.columns; columnHash = rhs.columnHash; return *this; }; void PedigreeDescription::Load(IFILE & input, bool warnIfLinkage) { // Check if we are dealing with a linkage format data file String buffer; StringArray tokens; mendelFormat = false; ReadLineHelper(input, buffer, tokens); ifrewind(input); if (tokens.Length() == 4 && isdigit(tokens[0][0])) { if (warnIfLinkage) printf("Data file looks like a LINKAGE format file...\n\n"); LoadLinkageDataFile(input); return; } if (buffer.Length() > 18 && (buffer.SubStr(8,8).SlowCompare("AUTOSOME") == 0 || buffer.SubStr(8,8).SlowCompare("X-LINKED") == 0) && (isdigit(buffer[16]) || isdigit(buffer[17])) && (isdigit(buffer[18]) || isdigit(buffer[19]) || (buffer.Length() > 19 && isdigit(buffer[20])))) { printf("Data file looks like a MENDEL format file...\n" " Activating EXPERIMENTAL support for this format\n\n"); LoadMendelDataFile(input); return; } // Reset things ifrewind(input); int done = 0; int line = 0; columns.Clear(); columnHash.Clear(); columnCount = 0; while (!ifeof(input) && !done) { int i; buffer.ReadLine(input); line++; tokens.Clear(); tokens.AddTokens(buffer, WHITESPACE); if (tokens.Length() < 1) continue; if (tokens.Length() == 1) error("Problem reading data file:\n" "Item #%d (of type %s) has no name.", columnCount+1, (const char *) tokens[0]); switch (toupper(tokens[0][0])) { case 'A' : columnHash.Push(GetAffectionID(tokens[1])); columns.Push(pcAffection); columnCount++; break; case 'M' : columnHash.Push(GetMarkerID(tokens[1])); columns.Push(pcMarker); columnCount++; break; case 'T' : columnHash.Push(GetTraitID(tokens[1])); columns.Push(pcTrait); columnCount++; break; case 'C' : columnHash.Push(GetCovariateID(tokens[1])); columns.Push(pcCovariate); columnCount++; break; case '$' : columnHash.Push(GetStringID(tokens[1])); columns.Push(pcString); columnCount++; break; case 'S' : i = (int) tokens[0].SubStr(1); i = i > 0 ? i : 1; while (i--) { columns.Push(pcSkip); columnHash.Push(0); columnCount++; } break; case 'Z' : columnHash.Push(0); columns.Push(pcZygosity); columnCount++; break; case 'V' : GetMarkerID(tokens[1]); break; case 'E' : done = 1; break; case 'U' : if (toupper(tokens[0][1]) == 'T' && toupper(tokens[0][2]) == 'C') { int c = GetCovariateID(tokens[1]); int t = GetTraitID(tokens[1]); if (c >= 32767 || t >= 32767) error("Internal error processing data file\n"); columnHash.Push(t * 32768 + c); columns.Push(pcUndocumentedTraitCovariate); columnCount++; break; } default : error("Problem in data file (line %d):\n%s\n", line, (const char *) buffer); } } columns.Push(pcEnd); columnHash.Push(0); }; void PedigreeDescription::Load(const char * iFilename, bool warnIfLinkage) { IFILE f = ifopen(iFilename, "rb"); if (f == NULL) error( "The datafile %s cannot be opened\n\n" "Common causes for this problem are:\n" " * You might not have used the correct options to specify input file names,\n" " please check the program documentation for information on how to do this\n\n" " * The file doesn't exist or the filename might have been misspelt\n\n" " * The file exists but it is being used by another program which you will need\n" " to close before continuing\n\n" " * The file is larger than 2GB and you haven't compiled this application with\n" " large file support.\n\n", iFilename); Load(f, warnIfLinkage); ifclose(f); filename = iFilename; }; void PedigreeDescription::LoadMap(const char * iFilename) { IFILE f = ifopen(iFilename, "rb"); if (f == NULL) error( "The mapfile %s cannot be opened\n\n" "Please check that the file exists and is not being used by another program\n" "To find out how to set input filenames, check the documentation\n", iFilename); LoadMap(f); ifclose(f); }; void PedigreeDescription::LoadMap(IFILE & input) { columns.Clear(); columnHash.Clear(); columnCount = 0; int lastposition = 0; String buffer; StringArray tokens; buffer.ReadLine(input); tokens.AddTokens(buffer, WHITESPACE); while (tokens.Length() == 0 && !ifeof(input)) { buffer.ReadLine(input); tokens.AddTokens(buffer, WHITESPACE); } if (tokens.Length() != 3) error("Error reading map file header, which has %d columns.\n" "Three columns were expected, corresponding to\n" "MARKER_ID, MARKER_NAME and BASE_PAIR_POSITION\n" "The offending header is transcribed below:\n\n" "%s", tokens.Length(), (const char *) buffer); else printf("Map file column labels\n" " -- COLUMN 1, Expecting MARKER_ID, Read %s\n" " -- COLUMN 2, Expecting MARKER_NAME, Read %s\n" " -- COLUMN 3, Expection BASE_PAIR_POSITION, Read %s\n\n", (const char *)(tokens[0]), (const char *)(tokens[1]), (const char *)(tokens[2])); int line = 1; while (!ifeof(input)) { int serial; long position; buffer.ReadLine(input); line++; tokens.Clear(); tokens.AddTokens(buffer, WHITESPACE); if (tokens.Length() < 1) continue; if (tokens.Length() != 3) error("Each line in the map file should have 3 tokens, corresponding\n" "to MARKER_ID, MARKER_NAME and BASE_PAIR_POSITION respectively\n" "However, there are %d tokens in line %d, transcribed below:\n\n" "%s", tokens.Length(), line, (const char *) buffer); serial = (int) tokens[0]; if (serial != columnCount + 1) error("Reading Marker Index from Map File...\n" "Markers should be indexed consecutively starting at 1\n" "Marker %d does not fit this pattern\n", columnCount + 1); position = (int) tokens[2]; if (position < lastposition) error("Reading Marker Position from Map File...\n" "Marker position should be in base-pairs\n" "and markers should be in map order\n"); // TODO -- store marker locations somewhere! lastposition = position; columnHash.Push(GetMarkerID(tokens[1])); columns.Push(pcMarker); columnCount++; GetMarkerInfo(tokens[1])->position = position * 1e-8; } columns.Push(pcEnd); columnHash.Push(0); }; int PedigreeDescription::CountTextColumns() { int count = 0; for (int i = 0; i < columnCount; i++, count++) if (columns[i] == pcMarker) count++; return count; } void PedigreeDescription::LoadLinkageDataFile(const char * iFilename) { IFILE f = ifopen(iFilename, "rb"); if (f == NULL) error( "The linkage format datafile %s cannot be opened\n\n" "Please check that the file exists and is not being used by another program\n" "To find out how to set input filenames, check the documentation\n", iFilename); LoadLinkageDataFile(f); ifclose(f); filename = iFilename; }; void PedigreeDescription::LoadLinkageDataFile(IFILE & input) { columns.Clear(); columnHash.Clear(); columnCount = 0; String buffer, label; StringArray tokens; ReadLineHelper(input, buffer, tokens); if (tokens.Length() != 4 || tokens[2].AsInteger() != (int) chromosomeX || tokens[0].AsInteger() < 0) error("Cannot handle first line of data file\n\n" "Expecting four (4) numeric values, which correspond to:\n" " num-loci -- number of loci in the pedigree\n" " this value must be positive\n" " risk-locus -- locus for which risks should be calculated\n" " this value will be ignored\n" " sex-link -- are the loci sex linked [0 - No, 1 - Yes]\n" " %s\n" " program -- which LINKAGE program do you want to use?\n" " this value will also be ignored\n\n" "The actual input read:\n%s\n", chromosomeX ? "expecting X-linked data, so this value must be ONE (1)" : "expecting autosomal data, so this must be ZERO (0)", (const char *) buffer); int numloci = tokens[0]; ReadLineHelper(input, buffer, tokens); if (tokens.Length() != 4 || tokens[0].AsInteger() != 0 || tokens[3].AsInteger() != 0) error("Cannot handle second line of data file\n\n" "Expecting four (4) numeric values, which correspond to:\n" " mutation-model -- must be zero, corresponding to no mutation\n" " male-mutation-rate -- ignored\n" " female-mutation-rate -- ignored\n" " linkage-disequilibrium -- must be zero, may be used in the future to\n" " read haplotype frequencies\n\n" "The actual input read:\n%s\n", (const char *) buffer); StringArray markerOrder; int unknown = 0; ReadLineHelper(input, buffer, markerOrder); if (markerOrder.Length() > numloci) error("The third line of the data file lists marker order\n\n" "Although %d loci are defined [in the first line],\n" "this line includes %d values:\n%s\n", numloci, markerOrder.Length(), (const char *) buffer); IntArray locus; bool need_blank_line = false; while (!ifeof(input) && numloci--) { if (ReadLineHelper(input, buffer, tokens) == 0) error("Linkage data file ends unexpectedly"); if (tokens.Length() < 2) error("Incomplete locus information in data file\n" "Information for each locus should include 2 or more fiels\n" "The expected fields are:\n" " field_type -- indicator of locus type (trait, marker,...)\n" " alleles -- number of alleles\n" " name -- locus name, preceded by hash (#) sign\n\n" "The actual input read:\n%s\n", (const char *) buffer); int locus_type = (int) tokens[0]; int alleles = (int) tokens[1]; String locus_name("LOCUS"); locus_name += ++unknown; if (tokens.Length() > 2 && tokens[2][0] == '#') { if (tokens[2][1] != 0) locus_name = tokens[2].SubStr(1); else if (tokens.Length() > 3) locus_name = tokens[3]; } if ((locus_type == 4 && alleles == 0) || (locus_type == 4 && alleles == 1)) { columnHash.Push(GetCovariateID(locus_name)); columns.Push(pcCovariate); columnCount++; continue; } if (locus_type == 0 && alleles == 0) { columnHash.Push(GetTraitID(locus_name)); columns.Push(pcTrait); columnCount++; continue; } if (ReadLineHelper(input, buffer, tokens) != alleles) error("Expecting %d allele frequencies, but input has %d columns:\n" "%s\n", alleles, tokens.Length(), (const char *) buffer); Vector frequencies(alleles + 1); frequencies[0] = 0.0; for (int i = 1; i <= alleles; i++) frequencies[i] = (double) tokens[i - 1]; double sum = frequencies.Sum(); if (sum <= 0.0) error("Allele frequencies at %s sum to %f, which doesn't make sense\n", (const char *) locus_name, sum); if (fabs(sum - 1.0) > 1.2e-5) { printf("Allele frequencies at %s sum to %f, adjusted to 1.0\n", (const char *) locus_name, sum); need_blank_line = true; } if (sum != 1.0) frequencies *= 1.0 / sum; switch (locus_type) { case 1 : { // Affection columnHash.Push(GetAffectionID(locus_name)); columns.Push(pcAffection); columnCount++; // Read number of liability classes if (ReadLineHelper(input, buffer, tokens) == 0) error("Linkage data file ends unexpectedly\n"); // Skip liability class data int classes = tokens[0]; if (classes > 1) { columnHash.Push(0); columns.Push(pcSkip); columnCount++; } // Separate liability class rows for males and females for X-linked data if (chromosomeX) classes *= 2; while (classes--) if (ReadLineHelper(input, buffer, tokens) == 0) error("Linkage data file ends unexpectedly\n"); // Ignore map location for quantitative variables locus.Push(-1); } break; case 3 : { columnHash.Push(GetMarkerID(locus_name)); columns.Push(pcMarker); columnCount++; // Store allele frequencies MarkerInfo * info = GetMarkerInfo(locus_name); info->freq = frequencies; // Initialize allele labels info->alleleLabels.Clear(); for (int i = 0; i < frequencies.Length(); i++) info->alleleLabels.Push(label = i); info->IndexAlleles(); // Store marker id, so that we can track map location locus.Push(GetMarkerID(locus_name)); } break; case 0 : { // Read number of quantitative variables if (ReadLineHelper(input, buffer, tokens) == 0) error("Linkage data file ends unexpectedly\n"); // Add each quantitative variable to pedigree // Discard information on means for (int vars = tokens[0], i = 0; i < vars; i++) { if (ReadLineHelper(input, buffer, tokens) == 0) error("Linkage data file ends unexpectedly\n"); String trait_name(locus_name); if (i) { trait_name += "."; trait_name += i + 1; } columnHash.Push(GetTraitID(trait_name)); columns.Push(pcTrait); columnCount++; } // Skip var-covar matrix if (ReadLineHelper(input, buffer, tokens) == 0) error("Linkage data file ends unexpectedly\n"); // Skip heterozygote scaling factor for var-covar matrix if (ReadLineHelper(input, buffer, tokens) == 0) error("Linkage data file ends unexpectedly\n"); // Ignore map location for quantitative variables locus.Push(-1); } break; case 2 : error("The data file includes binary factors\n" "Regretably, loci of this type are not supported\n\n"); break; default : error("Unsupported locus type [%d] in data file", locus_type); break; } } if (need_blank_line) printf("\n"); columns.Push(pcEnd); columnHash.Push(0); ReadLineHelper(input, buffer, tokens); int sexDifference = tokens.Length() ? tokens[0].AsInteger() : -1; if (tokens.Length() != 2 || (sexDifference != 0 && sexDifference != 2) || tokens[1].AsInteger() != 0) error("Error retrieving recombination information\n\n" "Expecting two (2) numeric values, which correspond to:\n" " sex-difference -- must be zero (no difference) or two (sex specific recombination)\n" " map-function -- must be zero, that is, no interference\n" "The actual input read:\n%s\n", (const char *) buffer); Vector distances[2]; bool distance_in_centimorgans = false; for (int r = 0; r <= sexDifference; r += 2) { ReadLineHelper(input, buffer, tokens); if (tokens.Length() != markerOrder.Length() - 1) error("Error retrieving recombination information\n\n" "Expecting %d recombination fractions (current map includes %d loci)\n" "Instead the following line was input:\n%s\n", markerOrder.Length() - 1, markerOrder.Length(), (const char *) buffer); distances[r >> 1].Dimension(tokens.Length()); for (int i = 0; i < tokens.Length(); i++) distances[r >> 1][i] = (double) tokens[i]; if (distances[r >> 1].Min() < 0.0) error("Linkage datafile specifies negative recombination fractions"); bool centimorgans = distances[r >> 1].Max() > 0.5; if (centimorgans && !distance_in_centimorgans) printf(" Some recombination fractions in datafile are greater than 0.5,\n" " so recombination fractions will be interpreted as cM distances\n\n"); distance_in_centimorgans |= centimorgans; } double position = 0.0, positionMale = 0.0; for (int i = 0, moving = false; i < markerOrder.Length(); i++) { int m = markerOrder[i].AsInteger() - 1; if (m < 0 || m >= locus.Length()) error("The marker order in the linkage datafile is invalid\n"); m = locus[m]; if (m != -1) { MarkerInfo * info = GetMarkerInfo(m); info->chromosome = chromosomeX ? 9999 : 0; if (sexDifference == 2) info->position = (position + positionMale) * 0.5, info->positionFemale = position, info->positionMale = positionMale; else info->position = info->positionMale = info->positionFemale = position; moving = true; } if (i < markerOrder.Length() - 1 && moving) position += distance_in_centimorgans ? 0.01 * distances[0][i] : RecombinationToDistance(distances[0][i]); if (sexDifference == 2 && i < markerOrder.Length() - 1 && moving) positionMale += distance_in_centimorgans ? 0.01 * distances[1][i] : RecombinationToDistance(distances[1][i]); } } int PedigreeDescription::ReadLineHelper(IFILE & input, String & buffer, StringArray & tokens) { do { // Read Line buffer.ReadLine(input); buffer.Trim(); // Strip comments marked with >> int pos = buffer.FastFind(">>"); if (pos == -1) pos = buffer.FastFind("<<"); if (pos == -1) pos = buffer.Length() + 1; if (buffer[0] == '#') pos = 0; // Find space/tab delimited tokens tokens.Clear(); tokens.AddTokens(buffer.Left(pos - 1), WHITESPACE); } while (tokens.Length() == 0 && !ifeof(input)); return tokens.Length(); } void PedigreeDescription::LoadMendelDataFile(const char * iFilename) { IFILE f = ifopen(iFilename, "rb"); if (f == NULL) error( "The MENDEL format datafile %s cannot be opened\n\n" "Please check that the file exists and is not being used by another program\n" "To find out how to set input filenames, check the documentation\n", iFilename); LoadMendelDataFile(f); ifclose(f); }; void PedigreeDescription::LoadMendelDataFile(IFILE & file) { // Processes mendel format file mendelFormat = true; // Codominant markers are mapped to markers // Non-codominant markers are mapped into multiple "affection status" // (Y/N) variables columns.Clear(); columnHash.Clear(); columnCount = 0; FortranFormat parser; // Variables for storing parsed input String locusName; String locusType; String alleleLabel; String alleleFreq; String phenotype; String genotype; int phenoCount; int alleleCount; while (!ifeof(file)) { // Cycle through headers for each locus parser.SetInputFile(file); parser.SetFormat("(2A8,I2,I3)"); // After retrieving locus name, check that we haven't tried to // read past the end-of-file parser.GetNextField(locusName); parser.GetNextField(locusType); alleleCount = parser.GetNextInteger(); phenoCount = parser.GetNextInteger(); if (locusName.IsEmpty() && locusType.IsEmpty() && alleleCount == 0 && phenoCount == 0 && ifeof(file)) break; // Only recognize autosomal and x-linked loci if (locusType.Compare("AUTOSOME") != 0 && locusType.Compare("X-LINKED")) error("Unrecognized locus type '%s' in Mendel data file\n\n" "Recognized locus types are \"AUTOSOME\" and \"X-LINKED\".", (const char *) locusType); if (locusType.Compare("AUTOSOME") == 0 && chromosomeX) error("The data file indicates that locus %s is AUTOSOMAL, but\n" "X-LINKED loci were expected as input\n", (const char *) locusName); if (locusType.Compare("X-LINKED") == 0 && !chromosomeX) error("The data file indicates that locus %s is X-LINKED, but\n" "AUTOSOMAL loci were expected as input\n", (const char *) locusName); if (locusName.IsEmpty()) error("Blank locus name encountered in data file\n"); if (phenoCount == 0) { // Co-dominant marker columns.Push(pcMarker); columnHash.Push(GetMarkerID(locusName)); columnCount++; // Update marker info with allele labels and frequencies MarkerInfo * info = GetMarkerInfo(locusName); info->alleleLabels.Clear(); info->alleleLabels.Push(""); info->freq.Clear(); parser.SetFormat("(2A8)"); // Mendel allows allele names to be specified with frequencies // left blank for (int i = 0; i < alleleCount; i++) { parser.GetNextField(alleleLabel); parser.GetNextField(alleleFreq); if (alleleLabel.IsEmpty()) error("Locus %s is missing allele label for allele #%d\n", (const char *) locusName, i+1); info->alleleLabels.Push(alleleLabel); if (!alleleFreq.IsEmpty()) { if (info->freq.Length() == 0) info->freq.Push(0.0); info->freq.Push(alleleFreq.AsDouble()); } } info->IndexAlleles(); if (info->alleleLabels.Length() != info->freq.Length() && info->freq.Length() != 0) error("Locus %s is missing allele frequency information for %d alleles\n", (const char *) locusName, info->alleleLabels.Length() - info->freq.Length()); } else { // Non-codominant marker, which we decompose into multiple traits... parser.SetFormat("(2A8)"); // First skip allele frequency information for (int i = 0; i < alleleCount; i++) { parser.GetNextField(alleleLabel); parser.GetNextField(alleleFreq); } // Then read in each phenotype for (int i = 0; i < alleleCount; i++) { parser.SetFormat("(A8,I3)"); parser.GetNextField(phenotype); int genoCount = parser.GetNextInteger(); parser.SetFormat("(A17)"); for (int j = 0; j < genoCount; j++) parser.GetNextField(genotype); columns.Push(pcAffection); columnHash.Push(GetAffectionID(locusName + "->" + phenotype)); columnCount++; } } } columns.Push(pcEnd); columnHash.Push(0); } int PedigreeDescription::CountColumns(int type) { int count = 0; for (int i = 0; i < columns.Length(); i++) if (columns[i] == type) count++; return count; } const char * PedigreeDescription::ColumnSummary(String & string) { string.Clear(); UpdateSummary(string, pcMarker, " markers [x2 cols]"); UpdateSummary(string, pcTrait, " traits"); UpdateSummary(string, pcAffection, " discrete traits"); UpdateSummary(string, pcCovariate, " covariates"); UpdateSummary(string, pcString, " strings"); UpdateSummary(string, pcZygosity, " zygosity"); UpdateSummary(string, pcSkip, " skipped"); return string; } void PedigreeDescription::UpdateSummary(String & string, int type, const char * label) { int count = CountColumns(type); if (count) { if (string.Length()) string += ", "; string += count; string += label; } } void PedigreeDescription::AddMarkerColumn(const char * markerName) { if (columns.Last() == pcEnd) { columns.Pop(); columnHash.Pop(); } columnHash.Push(GetMarkerID(markerName)); columns.Push(pcMarker); columnCount++; } void PedigreeDescription::AddCovariateColumn(const char * covariateName) { if (columns.Last() == pcEnd) { columns.Pop(); columnHash.Pop(); } columnHash.Push(GetCovariateID(covariateName)); columns.Push(pcCovariate); columnCount++; } void PedigreeDescription::AddTraitColumn(const char * traitName) { if (columns.Last() == pcEnd) { columns.Pop(); columnHash.Pop(); } columnHash.Push(GetCovariateID(traitName)); columns.Push(pcTrait); columnCount++; } void PedigreeDescription::AddAffectionColumn(const char * affectionName) { if (columns.Last() == pcEnd) { columns.Pop(); columnHash.Pop(); } columnHash.Push(GetAffectionID(affectionName)); columns.Push(pcAffection); columnCount++; } void PedigreeDescription::AddStringColumn(const char * stringName) { if (columns.Last() == pcEnd) { columns.Pop(); columnHash.Pop(); } columnHash.Push(GetStringID(stringName)); columns.Push(pcString); columnCount++; } void PedigreeDescription::AddZygosityColumn() { if (columns.Last() == pcEnd) { columns.Pop(); columnHash.Pop(); } columnHash.Push(0); columns.Push(pcZygosity); columnCount++; } void PedigreeDescription::AddSkippedColumn() { if (columns.Last() == pcEnd) { columns.Pop(); columnHash.Pop(); } columnHash.Push(0); columns.Push(pcSkip); columnCount++; }
utf-8
1
GPL-3+
2010-2018 Mary Kate Wing
openhpi-3.8.0/plugins/simulator/t/sim_sanity_003.c
/* -*- linux-c -*- * * (C) Copyright IBM Corp. 2005 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This * file and program are licensed under a BSD style license. See * the Copying file included with the OpenHPI distribution for * full licensing terms. * * Authors: * Sean Dague <http://dague.net/sean> */ #include <stdlib.h> #include <SaHpi.h> #include <oh_utils.h> #include <oh_error.h> /** * Run a series of sanity tests on the simulator * Return 0 on success, otherwise return -1 **/ int main(int argc, char **argv) { SaHpiSessionIdT sid = 0; SaHpiRptEntryT res; SaHpiEntryIdT rptid = SAHPI_FIRST_ENTRY; SaHpiEntryIdT rdrid; SaHpiRdrT rdr; SaErrorT rc = SA_OK; int rptctr = 0; int rdrctr; rc = saHpiSessionOpen(SAHPI_UNSPECIFIED_DOMAIN_ID, &sid, NULL); if(rc != SA_OK) { err("Failed to open session"); return -1; } rc = saHpiDiscover(sid); if(rc != SA_OK) { err("Failed to run discover"); return -1; } /* loop over all resources, ensure that ResourceTag and * ManufacturerId have been set */ while(saHpiRptEntryGet(sid, rptid, &rptid, &res) == SA_OK) { /* verify we have a valid rptentry */ if(!res.ResourceTag.DataLength) { err("Resource Tag has zero length"); return -1; } if(!res.ResourceInfo.ManufacturerId) { err("Resource has no Manufacturer Id"); return -1; } /* just check for the first rdr */ rdrid = SAHPI_FIRST_ENTRY; rdrctr = 0; while (saHpiRdrGet(sid, res.ResourceId, rdrid, &rdrid, &rdr) == SA_OK) { if (rdr.RecordId == 0) { err("Invalid rdr entry found"); return -1; } rdrctr++; } // note that the hot swap resource has no rdrs if (rdrctr == 0 && res.ResourceEntity.Entry[0].EntityType != SAHPI_ENT_DISK_DRIVE_BAY) { err("No rdr entries found"); return -1; } err("%d rdrs found for resource %d", rdrctr, res.ResourceId); rptctr++; } if (rptctr == 0) { err("No rpt entries found"); return -1; } return 0; }
utf-8
1
unknown
unknown
gimp-gap-2.6.0+dfsg/gap/gap_story_file.h
/* gap_story_file.h * * This module handles GAP storyboard file * parsing of storyboard level1 files (load informations into a list) * and (re)write storyboard files from the list (back to storyboard file) * */ /* The GIMP -- an image manipulation program * Copyright (C) 1995 Spencer Kimball and Peter Mattis * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* revision history: * version 2.3.0; 2006/04/14 new features: overlap, flip, mask definitions * version 1.3.25b; 2004/01/23 hof: created */ #ifndef _GAP_STORY_FILE_H #define _GAP_STORY_FILE_H #include "libgimp/gimp.h" #include "gap_lib.h" #include "gap_story_syntax.h" #include "gap_story_render_types.h" /* transition attribute types * (values are used as index for look-up tables) */ #define GAP_STB_ATT_TYPES_ARRAY_MAX 5 #define GAP_STB_ATT_TYPE_OPACITY 0 #define GAP_STB_ATT_TYPE_MOVE_X 1 #define GAP_STB_ATT_TYPE_MOVE_Y 2 #define GAP_STB_ATT_TYPE_ZOOM_X 3 #define GAP_STB_ATT_TYPE_ZOOM_Y 4 #define GAP_STB_MASK_SECTION_NAME "Masks" #define GAP_STB_MAX_FRAMENR 99999999 /* GapStoryRecordType enum values are superset of GapLibAinfoType * from the sourcefile gap_lib.h */ typedef enum { GAP_STBREC_VID_SILENCE ,GAP_STBREC_VID_COLOR ,GAP_STBREC_VID_IMAGE ,GAP_STBREC_VID_ANIMIMAGE ,GAP_STBREC_VID_FRAMES ,GAP_STBREC_VID_MOVIE ,GAP_STBREC_VID_COMMENT ,GAP_STBREC_VID_UNKNOWN ,GAP_STBREC_AUD_SILENCE ,GAP_STBREC_AUD_SOUND ,GAP_STBREC_AUD_MOVIE ,GAP_STBREC_ATT_TRANSITION ,GAP_STBREC_VID_SECTION ,GAP_STBREC_VID_BLACKSECTION } GapStoryRecordType; typedef enum { GAP_STB_TARGET_URILIST, GAP_STB_TARGET_UTF8_STRING, GAP_STB_TARGET_STRING, GAP_STB_TARGET_TEXT, GAP_STB_TARGET_COMPOUND_TEXT, GAP_STB_TARGET_STORYBOARD_ELEM } GapStoryDndTargets; typedef enum { GAP_STB_PM_NORMAL ,GAP_STB_PM_PINGPONG } GapStoryVideoPlaymode; typedef enum { GAP_STB_MASTER_TYPE_UNDEFINED ,GAP_STB_MASTER_TYPE_STORYBOARD ,GAP_STB_MASTER_TYPE_CLIPLIST } GapStoryMasterType; /* The GapStoryElem is a common used structure for * all type of video clips, audio clips, video attributes * and mask definitions. * mask definitions are handled as video clips using the reserved track number * GAP_STB_MASK_TRACK_NUMBER. */ typedef struct GapStoryElem { gint32 story_id; gint32 story_orig_id; gboolean selected; GapStoryRecordType record_type; GapStoryVideoPlaymode playmode; gint32 track; char *orig_filename; /* full filename use for IMAGE and MOVIE Files * and SECTIONS (for section_name) */ char *orig_src_line; /* without \n, used to store header, comment and unknown lines */ /* basename + ext are used for FRAME range elements only */ gchar *basename; /* path+filename (without number part and without extension */ gchar *ext; /* extenson ".xcf" ".jpg" ... including the dot */ gint32 seltrack; /* selected videotrack in a videofile (for GAP_FRN_MOVIE) */ gint32 exact_seek; /* 0 fast seek, 1 exact seek (for GAP_FRN_MOVIE) */ gdouble delace; /* 0.0 no deinterlace, 1.0-1.99 odd 2.0-2.99 even rows (for GAP_FRN_MOVIE) */ gint32 flip_request; /* 0 none, 1 flip horizontal, 2 flip vertical, 3 flip both */ char *mask_name; /* optional reference to a layer mask * if track == GAP_STB_MASK_TRACK_NUMBER this atribute * is the mandatory definition of the mask_name. */ gdouble mask_stepsize; GapStoryMaskAnchormode mask_anchor; gboolean mask_disable; gchar *preferred_decoder; gchar *filtermacro_file; gint32 fmac_total_steps; gint32 from_frame; gint32 to_frame; gint32 nloop; /* 1 play one time */ gint32 nframes; /* if playmode == normal * then frames = nloop * (ABS(from_frame - to_frame) + 1); * else frames = (nloop * 2 * ABS(from_frame - to_frame)) + 1; */ gdouble step_density; /* 1.0 for normal stepsize * 2.0 use every 2.nd frame (double speed at same framerate) * 0.5 use each frame twice (half speed at same framerate) */ gint32 file_line_nr; /* line Number in the storyboard file */ /* members for level2 VID Record types */ gdouble vid_wait_untiltime_sec; gdouble color_red; gdouble color_green; gdouble color_blue; gdouble color_alpha; /* members for attribute Record types */ gboolean att_keep_proportions; gboolean att_fit_width; gboolean att_fit_height; /* members for transition attribute Record type */ gboolean att_arr_enable[GAP_STB_ATT_TYPES_ARRAY_MAX]; gdouble att_arr_value_from[GAP_STB_ATT_TYPES_ARRAY_MAX]; gdouble att_arr_value_to[GAP_STB_ATT_TYPES_ARRAY_MAX]; gint32 att_arr_value_dur[GAP_STB_ATT_TYPES_ARRAY_MAX]; /* number of frames to change from -> to value */ gint32 att_overlap; /* number of overlapping frames (value > 0 will generate a shadow track) */ /* new members for Audio Record types */ char *aud_filename; gint32 aud_seltrack; /* selected audiotrack in a videofile (for GAP_AUT_MOVIE) */ gdouble aud_wait_untiltime_sec; gdouble aud_play_from_sec; gdouble aud_play_to_sec; gdouble aud_volume_start; gdouble aud_volume; gdouble aud_volume_end; gdouble aud_fade_in_sec; gdouble aud_fade_out_sec; gdouble aud_min_play_sec; /* for optimzed audio extract from videofiles */ gdouble aud_max_play_sec; gdouble aud_framerate; /* framerate that is used to convert audio unit frame <-> secs */ struct GapStoryElem *comment; struct GapStoryElem *next; } GapStoryElem; typedef struct GapStorySection { GapStoryElem *stb_elem; gchar *section_name; /* null refers to the main section */ gint32 current_vtrack; gint32 section_id; /* unique ID, NOT persistent */ gint32 version; /* numer of changes while editing, NOT persistent */ void *next; } GapStorySection; typedef struct GapStoryEditSettings { gchar *section_name; /* null refers to the main section */ gint32 track; gint32 page; } GapStoryEditSettings; typedef struct GapStoryFrameNumberMappingElem { gint32 mapped_frame_number; gint32 orig_frame_number; struct GapStoryFrameNumberMappingElem *next; } GapStoryFrameNumberMappingElem; typedef struct GapStoryFrameNumberMap { gint32 total_frames_selected; GapStoryFrameNumberMappingElem *map_list; } GapStoryFrameNumberMap; typedef struct GapStoryBoard { GapStorySection *active_section; /* reference pointer to active section (dont free this) */ GapStorySection *mask_section; /* reference pointer to mask section (dont free this) */ GapStorySection *stb_section; /* root of section list */ gchar *storyboardfile; GapStoryMasterType master_type; gint32 master_width; gint32 master_height; gdouble master_framerate; gboolean master_vtrack1_is_toplayer; /* default = true; */ gdouble master_aspect_ratio; gint32 master_aspect_width; gint32 master_aspect_height; gint32 master_samplerate; gdouble master_volume; gint32 layout_cols; gint32 layout_rows; gint32 layout_thumbsize; gchar *preferred_decoder; /* for error handling while parsing */ gchar *errtext; gchar *errline; gint32 errline_nr; gchar *warntext; gchar *warnline; gint32 warnline_nr; gint32 curr_nr; gchar *currline; /* dont g_free this one ! */ gint32 count_unprintable_chars; /* for composite vide playback */ gint32 stb_parttype; gint32 stb_unique_id; /* selection mapping is not relevant for rendering * but is used at playback of composite video * where frame ranges of the selected clips * are represented by a mapping * and the player only picks frame numbers via mapping */ GapStoryFrameNumberMap *mapping; gboolean unsaved_changes; GapStoryEditSettings *edit_settings; gchar *master_insert_area_format; } GapStoryBoard; typedef struct GapStoryLocateRet { GapStoryElem *stb_elem; gint32 ret_framenr; gboolean locate_ok; } GapStoryLocateRet; typedef struct GapStoryCalcAttr { gint32 width; gint32 height; gint32 x_offs; gint32 y_offs; gdouble opacity; } GapStoryCalcAttr; typedef struct GapStoryVideoFileRef { gchar *videofile; /* full filename */ gchar *userdata; gchar *preferred_decoder; gint32 seltrack; gint32 max_ref_framenr; void *next; } GapStoryVideoFileRef; void gap_story_debug_print_list(GapStoryBoard *stb); void gap_story_debug_print_elem(GapStoryElem *stb_elem); GapStoryBoard * gap_story_new_story_board(const char *filename); GapStoryBoard * gap_story_parse(const gchar *filename); void gap_story_elem_calculate_nframes(GapStoryElem *stb_elem); GapStoryLocateRet * gap_story_locate_framenr(GapStoryBoard *stb , gint32 in_framenr , gint32 in_track); GapStoryLocateRet * gap_story_locate_expanded_framenr(GapStorySection *section , gint32 in_framenr , gint32 in_track); void gap_story_lists_merge(GapStoryBoard *stb_dst , GapStoryBoard *stb_src , gint32 story_id , gboolean insert_after , gint32 dst_vtrack); gint32 gap_story_find_last_selected_in_track(GapStorySection *section, gint32 track_nr); GapStoryElem * gap_story_elem_find_by_story_id(GapStoryBoard *stb, gint32 story_id); GapStoryElem * gap_story_elem_find_by_story_orig_id(GapStoryBoard *stb, gint32 story_orig_id); gboolean gap_story_save(GapStoryBoard *stb, const char *filename); GapStoryElem * gap_story_new_elem(GapStoryRecordType record_type); long gap_story_upd_elem_from_filename(GapStoryElem *stb_elem, const char *filename); gboolean gap_story_filename_is_videofile_by_ext(const char *filename); gboolean gap_story_filename_is_videofile(const char *filename); void gap_story_elem_free(GapStoryElem **stb_elem); void gap_story_free_stb_section(GapStorySection *stb_section); void gap_story_free_selection_mapping(GapStoryFrameNumberMap *mapping); void gap_story_free_storyboard(GapStoryBoard **stb_ptr); GapStoryElem * gap_story_new_mask_elem(GapStoryRecordType record_type); GapStorySection * gap_story_new_section(); GapStorySection * gap_story_find_first_referable_subsection(GapStoryBoard *stb_dst); GapStoryElem * gap_story_elem_find_in_section_by_story_id(GapStorySection *section, gint32 story_id); GapStorySection * gap_story_find_section_by_story_id(GapStoryBoard *stb, gint32 story_id); GapStorySection * gap_story_find_section_by_stb_elem(GapStoryBoard *stb, GapStoryElem *stb_elem); GapStorySection * gap_story_find_section_by_name(GapStoryBoard *stb, const char *section_name); GapStorySection * gap_story_find_main_section(GapStoryBoard *stb); GapStorySection * gap_story_create_or_find_section_by_name(GapStoryBoard *stb, const char *section_name); gboolean gap_story_remove_section(GapStoryBoard *stb, GapStorySection *del_section); gchar * gap_story_generate_new_unique_section_name(GapStoryBoard *stb); void gap_story_list_append_elem(GapStoryBoard *stb, GapStoryElem *stb_elem); void gap_story_list_append_elem_at_section(GapStoryBoard *stb , GapStoryElem *stb_elem , GapStorySection *active_section); gint32 gap_story_count_total_frames_in_section(GapStorySection *section); gint32 gap_story_get_framenr_by_story_id(GapStorySection *section, gint32 story_id, gint32 in_track); gint32 gap_story_get_expanded_framenr_by_story_id(GapStorySection *section, gint32 story_id, gint32 in_track); char * gap_story_get_filename_from_elem(GapStoryElem *stb_elem); char * gap_story_get_filename_from_elem_nr(GapStoryElem *stb_elem, gint32 in_framenr); GapStoryElem * gap_story_fetch_nth_active_elem(GapStoryBoard *stb , gint32 seq_nr , gint32 in_track ); GapAnimInfo * gap_story_fake_ainfo_from_stb(GapStoryBoard *stb_ptr, gint32 in_track); GapStoryElem * gap_story_elem_duplicate(GapStoryElem *stb_elem); void gap_story_elem_copy(GapStoryElem *stb_elem_dst, GapStoryElem *stb_elem_src); GapStoryElem * gap_story_find_mask_definition_by_name(GapStoryBoard *stb_ptr, const char *mask_name); GapStoryElem * gap_story_find_mask_reference_by_name(GapStoryBoard *stb_ptr, const char *mask_name); void gap_story_enable_hidden_maskdefinitions(GapStoryBoard *stb_ptr); GapStoryBoard * gap_story_duplicate_full(GapStoryBoard *stb_ptr); GapStoryBoard * gap_story_duplicate_active_and_mask_section(GapStoryBoard *stb_ptr); GapStoryBoard * gap_story_duplicate_vtrack(GapStoryBoard *stb_ptr, gint32 in_vtrack); GapStoryBoard * gap_story_duplicate_sel_only(GapStoryBoard *stb_ptr, gint32 in_vtrack); GapStoryBoard * gap_story_duplicate_one_elem_and_masks(GapStoryBoard *stb_ptr , GapStorySection *active_section, gint32 story_id); GapStoryBoard * gap_story_duplicate_one_elem(GapStoryBoard *stb_ptr , GapStorySection *active_section, gint32 story_id); GapStoryBoard * gap_story_board_duplicate_distinct_sorted(GapStoryBoard *stb_dup, GapStoryBoard *stb_ptr); void gap_story_copy_sub_sections(GapStoryBoard *stb_src, GapStoryBoard *stb_dst); void gap_story_set_properties_like_sample_storyboard (GapStoryBoard *stb , GapStoryBoard *stb_sample); void gap_story_remove_sel_elems(GapStoryBoard *stb); gint32 gap_story_count_active_elements(GapStoryBoard *stb_ptr, gint32 in_track); void gap_story_get_master_pixelsize(GapStoryBoard *stb_ptr ,gint32 *width ,gint32 *height); gdouble gap_story_get_master_size_respecting_aspect(GapStoryBoard *stb_ptr ,gint32 *width ,gint32 *height); gdouble gap_story_adjust_size_respecting_aspect(GapStoryBoard *stb_ptr ,gint32 *width ,gint32 *height); void gap_story_selection_all_set(GapStoryBoard *stb, gboolean sel_state); void gap_story_selection_by_story_id(GapStoryBoard *stb, gboolean sel_state, gint32 story_id); void gap_story_selection_from_ref_list_orig_ids(GapStoryBoard *stb, gboolean sel_state, GapStoryBoard *stb_ref); const char * gap_story_get_preferred_decoder(GapStoryBoard *stb, GapStoryElem *stb_elem); void gap_story_set_aud_movie_min_max(GapStoryBoard *stb); gboolean gap_story_elem_is_audio(GapStoryElem *stb_elem); gboolean gap_story_elem_is_video(GapStoryElem *stb_elem); gboolean gap_story_elem_is_video_relevant(GapStoryElem *stb_elem); gboolean gap_story_elem_is_same_resource(GapStoryElem *stb_elem, GapStoryElem *stb_elem_ref); GapStoryElem * gap_story_elem_find_by_same_resource(GapStoryBoard *stb_ptr, GapStoryElem *stb_elem_ref); void gap_story_del_audio_track(GapStoryBoard *stb, gint aud_track); gboolean gap_story_gen_otone_audio(GapStoryBoard *stb ,gint vid_track ,gint aud_track ,gint aud_seltrack ,gboolean replace_existing_aud_track ,gdouble *first_non_matching_framerate ); gdouble gap_story_get_default_attribute(gint att_typ_idx); void gap_story_file_calculate_render_attributes(GapStoryCalcAttr *result_attr , gint32 view_vid_width , gint32 view_vid_height , gint32 vid_width , gint32 vid_height , gint32 frame_width , gint32 frame_height , gboolean keep_proportions , gboolean fit_width , gboolean fit_height , gdouble opacity , gdouble scale_x , gdouble scale_y , gdouble move_x , gdouble move_y ); gboolean gap_story_update_mask_name_references(GapStoryBoard *stb_ptr , const char *mask_name_new , const char *mask_name_old ); char * gap_story_generate_unique_maskname(GapStoryBoard *stb_ptr); GapStoryElem * gap_story_find_maskdef_equal_to_ref_elem(GapStoryBoard *stb_ptr, GapStoryElem *stb_ref_elem); gint32 gap_story_get_current_vtrack (GapStoryBoard *stb, GapStorySection *section); void gap_story_set_current_vtrack (GapStoryBoard *stb, GapStorySection *section , gint32 current_vtrack); gint32 gap_story_get_mapped_master_frame_number(GapStoryFrameNumberMap *mapping , gint32 frame_number); GapStoryFrameNumberMap * gap_story_create_new_mapping_from_selection(GapStorySection *active_section , gint32 vtrack); GapStoryFrameNumberMap * gap_story_create_new_mapping_by_story_id(GapStorySection *active_section , gint32 vtrack, gint32 story_id); void gap_story_debug_print_mapping(GapStoryFrameNumberMap *mapping); void gap_story_free_GapStoryVideoFileRef(GapStoryVideoFileRef *vref_list); GapStoryVideoFileRef * p_new_GapStoryVideoFileRef(const char *videofile , gint32 seltrack , const char *preferred_decoder , gint32 max_ref_framenr); GapStoryVideoFileRef * gap_story_get_video_file_ref_list(GapStoryBoard *stb); char * gap_story_build_basename(const char *filename); #endif
utf-8
1
unknown
unknown
openmpi-4.1.2/ompi/mca/io/romio321/romio/test/hindexed.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * * (C) 2008 by Argonne National Laboratory. * See COPYRIGHT in top-level directory. */ /* Wei-keng Liao (wkliao@ece.northwestern.edu) September 8, 2008 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <mpi.h> #define YLEN 5 #define XLEN 10 #define SUB_XLEN 3 /* rjl: I was just too lazy to compute this at run-time */ char compare_buf[XLEN*4][YLEN*4] = { {'0','1','2',0,0,'3','4','5',0,0,'D','E','F',0,0,'G','H','I'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {'6','7','8',0,0,'9',':',';',0,0,'J','K','L',0,0,'M','N','O'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {'X','Y','Z',0,0,'[','\\',']',0,0,'l','m','n',0,0,'o','p','q'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {'^','_','`',0,0,'a','b','c',0,0,'r','s','t',0,0,'u','v','w'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {'0','1','2',0,0,'3','4','5',0,0,'D','E','F',0,0,'G','H','I'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {'6','7','8',0,0,'9',':',';',0,0,'J','K','L',0,0,'M','N','O'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {'X','Y','Z',0,0,'[','\\',']',0,0,'l','m','n',0,0,'o','p','q'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {'^','_','`',0,0,'a','b','c',0,0,'r','s','t',0,0,'u','v','w'}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0} }; /* set this if you want a dump of the global array #define VERBOSE 1 */ /*----< main() >------------------------------------------------------------*/ int main(int argc, char **argv) { int i, j, err, rank, np, num_io; char *buf, *filename; int rank_dim[2], array_of_sizes[2]; int array_of_subsizes[2]; int count, *blocklengths, global_array_size; MPI_Count ftype_size; MPI_Aint *displacements; MPI_File fh; MPI_Datatype ftype; MPI_Status status; MPI_Offset offset=0; int nr_errors=0; #ifdef VERBOSE int k; #endif MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &np); if (np != 4) { if (!rank) printf("Please run with 4 processes. Exiting ...\n\n"); MPI_Finalize(); return 1; } filename = argv[1]; num_io = 2; /*-----------------------------------------------------------------------*/ /* process rank in each dimension */ rank_dim[0] = rank / 2; rank_dim[1] = rank % 2; /* global 2D array size */ array_of_sizes[0] = YLEN * 2; array_of_sizes[1] = XLEN * 2; global_array_size = array_of_sizes[0] * array_of_sizes[1]; array_of_subsizes[0] = YLEN / 2; array_of_subsizes[1] = XLEN * SUB_XLEN / 5; offset = rank_dim[0] * YLEN * array_of_sizes[1] + rank_dim[1] * XLEN; /* define data type for file view */ count = array_of_subsizes[0] * 2; /* 2 is the no. blocks along X */ blocklengths = (int*) malloc(count*sizeof(int)); displacements = (MPI_Aint*)malloc(count*sizeof(MPI_Aint)); for (i=0; i<count; i++) blocklengths[i] = array_of_subsizes[1] / 2; for (i=0; i<array_of_subsizes[0]; i++) for (j=0; j<2; j++) displacements[i*2+j] = offset + i*2*array_of_sizes[1] + j * XLEN/2; MPI_Type_create_hindexed(count, blocklengths, displacements, MPI_CHAR, &ftype); MPI_Type_commit(&ftype); MPI_Type_size_x(ftype, &ftype_size); /* subarray's layout in the global array P0's 's layout P1's layout [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9] | [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9] [ 0] 0 1 2 3 4 5 | D E F G H I [ 1] | [ 2] 6 7 8 9 : ; | J K L M N O [ 3] | [ 4] | [ 5] | [ 6] | [ 7] | [ 8] | [ 9] | P2's 's layout P3's layout [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9] | [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9] [ 0] | [ 1] | [ 2] | [ 3] | [ 4] | [ 5] X Y Z [ \ ] | l m n o p q [ 6] | [ 7] ^ _ ` a b c | r s t u v w [ 8] | [ 9] | */ /* initialize the write buffer */ buf = (char*) malloc(array_of_subsizes[0]*array_of_subsizes[1]); for (i=0; i<array_of_subsizes[0]*array_of_subsizes[1]; i++) buf[i] = '0' + rank*20 + i%79; /* zero file contents ---------------------------------------------------*/ if (rank == 0) { char *wr_buf = (char*) calloc(num_io*global_array_size,1); MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh); MPI_File_write(fh, wr_buf, num_io*global_array_size, MPI_CHAR, &status); MPI_File_close(&fh); free(wr_buf); } /* open the file --------------------------------------------------------*/ err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh); if (err != MPI_SUCCESS) { printf("Error: MPI_File_open() filename %s\n",filename); MPI_Abort(MPI_COMM_WORLD, -1); exit(1); } /* MPI collective write */ for (i=0; i<num_io; i++) { offset = i * global_array_size; /* set the file view */ MPI_File_set_view(fh, offset, MPI_BYTE, ftype, "native", MPI_INFO_NULL); MPI_File_write_all(fh, buf, ftype_size, MPI_CHAR, &status); } MPI_File_close(&fh); /* read and print file contents -----------------------------------------*/ if (rank == 0) { char *ptr; char *rd_buf = (char*) calloc(num_io*global_array_size,1); MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh); MPI_File_read(fh, rd_buf, num_io*global_array_size, MPI_CHAR, &status); MPI_File_close(&fh); #ifdef VERBOSE printf("-------------------------------------------------------\n"); printf(" ["); for (i=0; i<2; i++) { for (j=0; j<XLEN; j++) printf(" %d",j); printf(" "); } printf("]\n\n"); ptr = rd_buf; for (k=0; k<num_io; k++) { for (i=0; i<2*YLEN; i++) { printf("[%2d]",k*2*YLEN+i); for (j=0; j<2*XLEN; j++) { if (j>0 && j%XLEN==0) printf(" "); if (*ptr != 0) printf(" %c",*ptr); else printf(" "); ptr++; } printf("\n"); } printf("\n"); } #endif ptr = rd_buf; for(i=0; i<2*YLEN*num_io; i++) { for(j=0; j<2*XLEN; j++) { if( *ptr != compare_buf[i][j]) { fprintf(stderr, "expected %d got %d at [%d][%d]\n", *ptr, compare_buf[i][j], i, j); nr_errors++; } ptr++; } } free(rd_buf); if (nr_errors == 0) fprintf(stdout, " No Errors\n"); else fprintf(stderr, "Found %d errors\n", nr_errors); } free(blocklengths); free(displacements); free(buf); MPI_Type_free(&ftype); MPI_Finalize(); return 0; } /* command-line outputs are: (the global array is written twice) % mpiexec -n 4 wkl_subarray ------------------------------------------------------- [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 ] [ 0] 0 1 2 3 4 5 D E F G H I [ 1] [ 2] 6 7 8 9 : ; J K L M N O [ 3] [ 4] [ 5] X Y Z [ \ ] l m n o p q [ 6] [ 7] ^ _ ` a b c r s t u v w [ 8] [ 9] [10] 0 1 2 3 4 5 D E F G H I [11] [12] 6 7 8 9 : ; J K L M N O [13] [14] [15] X Y Z [ \ ] l m n o p q [16] [17] ^ _ ` a b c r s t u v w [18] [19] */
utf-8
1
unknown
unknown
mpich-4.0/modules/libfabric/src/tree.c
/* * Copyright (c) 2015 Cray Inc. All rights reserved. * Copyright (c) 2018 Intel Corp, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * Copied from http://oopweb.com/Algorithms/Documents/Sman/VolumeFrames.html?/Algorithms/Documents/Sman/Volume/RedBlackTrees_files/s_rbt.htm * * Disclosure from the author's main page: * (http://oopweb.com/Algorithms/Documents/Sman/VolumeFrames.html?/Algorithms/Documents/Sman/Volume/RedBlackTrees_files/s_rbt.htm) * * Source code when part of a software project may be used freely * without reference to the author. * */ // reentrant red-black tree #include <assert.h> #include <ofi_tree.h> #include <ofi_osd.h> #include <rdma/fi_errno.h> static struct ofi_rbnode *ofi_rbnode_alloc(struct ofi_rbmap *map) { struct ofi_rbnode *node; if (!map->free_list) return malloc(sizeof(*node)); node = map->free_list; map->free_list = node->right; return node; } static void ofi_rbnode_free(struct ofi_rbmap *map, struct ofi_rbnode *node) { node->right = map->free_list ? map->free_list : NULL; map->free_list = node; } void ofi_rbmap_init(struct ofi_rbmap *map, int (*compare)(struct ofi_rbmap *map, void *key, void *data)) { map->compare = compare; map->root = &map->sentinel; map->sentinel.left = &map->sentinel; map->sentinel.right = &map->sentinel; map->sentinel.parent = NULL; map->sentinel.color = BLACK; map->sentinel.data = NULL; } struct ofi_rbmap * ofi_rbmap_create(int (*compare)(struct ofi_rbmap *map, void *key, void *data)) { struct ofi_rbmap *map; map = calloc(1, sizeof *map); if (map) ofi_rbmap_init(map, compare); return map; } static void ofi_delete_tree(struct ofi_rbmap *map, struct ofi_rbnode *node) { if (node == &map->sentinel) return; ofi_delete_tree(map, node->left); ofi_delete_tree(map, node->right); free(node); } void ofi_rbmap_cleanup(struct ofi_rbmap *map) { struct ofi_rbnode *node; ofi_delete_tree(map, map->root); while (map->free_list) { node = map->free_list; map->free_list = node->right; free(node); } } void ofi_rbmap_destroy(struct ofi_rbmap *map) { ofi_rbmap_cleanup(map); free(map); } int ofi_rbmap_empty(struct ofi_rbmap *map) { return map->root == &map->sentinel; } static void ofi_rotate_left(struct ofi_rbmap *map, struct ofi_rbnode *node) { struct ofi_rbnode *y = node->right; node->right = y->left; if (y->left != &map->sentinel) y->left->parent = node; if (y != &map->sentinel) y->parent = node->parent; if (node->parent) { if (node== node->parent->left) node->parent->left = y; else node->parent->right = y; } else { map->root = y; } y->left = node; if (node != &map->sentinel) node->parent = y; } static void ofi_rotate_right(struct ofi_rbmap *map, struct ofi_rbnode *node) { struct ofi_rbnode *y = node->left; node->left = y->right; if (y->right != &map->sentinel) y->right->parent = node; if (y != &map->sentinel) y->parent = node->parent; if (node->parent) { if (node == node->parent->right) node->parent->right = y; else node->parent->left = y; } else { map->root = y; } y->right = node; if (node != &map->sentinel) node->parent = y; } static void ofi_insert_rebalance(struct ofi_rbmap *map, struct ofi_rbnode *x) { struct ofi_rbnode *y; while (x != map->root && x->parent->color == RED) { if (x->parent == x->parent->parent->left) { y = x->parent->parent->right; if (y->color == RED) { x->parent->color = BLACK; y->color = BLACK; x->parent->parent->color = RED; x = x->parent->parent; } else { if (x == x->parent->right) { x = x->parent; ofi_rotate_left(map, x); } x->parent->color = BLACK; x->parent->parent->color = RED; ofi_rotate_right(map, x->parent->parent); } } else { y = x->parent->parent->left; if (y->color == RED) { x->parent->color = BLACK; y->color = BLACK; x->parent->parent->color = RED; x = x->parent->parent; } else { if (x == x->parent->left) { x = x->parent; ofi_rotate_right(map, x); } x->parent->color = BLACK; x->parent->parent->color = RED; ofi_rotate_left(map, x->parent->parent); } } } map->root->color = BLACK; } int ofi_rbmap_insert(struct ofi_rbmap *map, void *key, void *data, struct ofi_rbnode **ret_node) { struct ofi_rbnode *current, *parent, *node; int ret; current = map->root; parent = NULL; while (current != &map->sentinel) { ret = map->compare(map, key, current->data); if (ret == 0) { if (ret_node) *ret_node = current; return -FI_EALREADY; } parent = current; current = (ret < 0) ? current->left : current->right; } node = ofi_rbnode_alloc(map); if (!node) return -FI_ENOMEM; node->parent = parent; node->left = &map->sentinel; node->right = &map->sentinel; node->color = RED; node->data = data; if (parent) { if (map->compare(map, key, parent->data) < 0) parent->left = node; else parent->right = node; } else { map->root = node; } ofi_insert_rebalance(map, node); if (ret_node) *ret_node = node; return 0; } static void ofi_delete_rebalance(struct ofi_rbmap *map, struct ofi_rbnode *node) { struct ofi_rbnode *w; while (node != map->root && node->color == BLACK) { if (node == node->parent->left) { w = node->parent->right; if (w->color == RED) { w->color = BLACK; node->parent->color = RED; ofi_rotate_left(map, node->parent); w = node->parent->right; } if (w->left->color == BLACK && w->right->color == BLACK) { w->color = RED; node = node->parent; } else { if (w->right->color == BLACK) { w->left->color = BLACK; w->color = RED; ofi_rotate_right(map, w); w = node->parent->right; } w->color = node->parent->color; node->parent->color = BLACK; w->right->color = BLACK; ofi_rotate_left(map, node->parent); node = map->root; } } else { w = node->parent->left; if (w->color == RED) { w->color = BLACK; node->parent->color = RED; ofi_rotate_right(map, node->parent); w = node->parent->left; } if (w->right->color == BLACK && w->left->color == BLACK) { w->color = RED; node = node->parent; } else { if (w->left->color == BLACK) { w->right->color = BLACK; w->color = RED; ofi_rotate_left(map, w); w = node->parent->left; } w->color = node->parent->color; node->parent->color = BLACK; w->left->color = BLACK; ofi_rotate_right(map, node->parent); node = map->root; } } } node->color = BLACK; } static void ofi_rbmap_replace_node_ptr(struct ofi_rbmap *map, struct ofi_rbnode *old_node, struct ofi_rbnode *new_node) { if (new_node == old_node) return; *new_node = *old_node; if (!old_node->parent) map->root = new_node; else if (old_node == old_node->parent->left) old_node->parent->left = new_node; else old_node->parent->right = new_node; if (old_node->left != &map->sentinel) old_node->left->parent = new_node; if (old_node->right != &map->sentinel) old_node->right->parent = new_node; } void ofi_rbmap_delete(struct ofi_rbmap *map, struct ofi_rbnode *node) { struct ofi_rbnode *x, *y; if (node->left == &map->sentinel) { y = node; x = y->right; } else if (node->right == &map->sentinel) { y = node; x = y->left; } else { y = node->right; while (y->left != &map->sentinel) y = y->left; x = y->right; } x->parent = y->parent; if (y->parent) { if (y == y->parent->left) y->parent->left = x; else y->parent->right = x; } else { map->root = x; } if (y != node) node->data = y->data; if (y->color == BLACK) ofi_delete_rebalance(map, x); /* swap y in for node, so we can free node */ ofi_rbmap_replace_node_ptr(map, node, y); ofi_rbnode_free(map, node); } struct ofi_rbnode *ofi_rbmap_get_root(struct ofi_rbmap *map) { if (ofi_rbmap_empty(map)) return NULL; return map->root; } struct ofi_rbnode *ofi_rbmap_find(struct ofi_rbmap *map, void *key) { struct ofi_rbnode *node; int ret; node = map->root; while (node != &map->sentinel) { ret = map->compare(map, key, node->data); if (ret == 0) return node; node = (ret < 0) ? node->left : node->right; } return NULL; } int ofi_rbmap_find_delete(struct ofi_rbmap *map, void *key) { struct ofi_rbnode *node; node = ofi_rbmap_find(map, key); if (!node) return -FI_ENODATA; ofi_rbmap_delete(map, node); return 0; } struct ofi_rbnode *ofi_rbmap_search(struct ofi_rbmap *map, void *key, int (*compare)(struct ofi_rbmap *map, void *key, void *data)) { struct ofi_rbnode *node; int ret; node = map->root; while (node != &map->sentinel) { ret = compare(map, key, node->data); if (ret == 0) return node; node = (ret < 0) ? node->left : node->right; } return NULL; }
utf-8
1
other
2002 University of Chicago 1998--2020, Argonne National Laboratory
libreoffice-7.3.1~rc1/svx/source/sidebar/lists/ListsPropertyPanel.cxx
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* * This file is part of the LibreOffice project. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * This file incorporates work covered by the following license notice: * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 . */ #include "ListsPropertyPanel.hxx" #include <com/sun/star/lang/IllegalArgumentException.hpp> using namespace css; using namespace css::uno; namespace svx::sidebar { std::unique_ptr<PanelLayout> ListsPropertyPanel::Create(weld::Widget* pParent, const css::uno::Reference<css::frame::XFrame>& rxFrame) { if (pParent == nullptr) throw lang::IllegalArgumentException("no parent Window given to ListsPropertyPanel::Create", nullptr, 0); if (!rxFrame.is()) throw lang::IllegalArgumentException("no XFrame given to ListsPropertyPanel::Create", nullptr, 1); return std::make_unique<ListsPropertyPanel>(pParent, rxFrame); } ListsPropertyPanel::ListsPropertyPanel(weld::Widget* pParent, const css::uno::Reference<css::frame::XFrame>& rxFrame) : PanelLayout(pParent, "ListsPropertyPanel", "svx/ui/sidebarlists.ui") , mxTBxNumBullet(m_xBuilder->weld_toolbar("numberbullet")) , mxNumBulletDispatcher(new ToolbarUnoDispatcher(*mxTBxNumBullet, *m_xBuilder, rxFrame)) , mxTBxOutline(m_xBuilder->weld_toolbar("outline")) , mxOutlineDispatcher(new ToolbarUnoDispatcher(*mxTBxOutline, *m_xBuilder, rxFrame)) { } ListsPropertyPanel::~ListsPropertyPanel() { mxOutlineDispatcher.reset(); mxTBxOutline.reset(); mxNumBulletDispatcher.reset(); mxTBxNumBullet.reset(); } } // end of namespace svx::sidebar /* vim:set shiftwidth=4 softtabstop=4 expandtab: */
utf-8
1
MPL-2.0
Copyright 2000, 2010 Oracle and/or its affiliates. Copyright (c) 2000, 2010 LibreOffice contributors and/or their affiliates.
soapdenovo2-242+dfsg/standardPregraph/inc/def.h
/* * inc/def.h * * Copyright (c) 2008-2016 Ruibang Luo <aquaskyline.com>. * * This file is part of SOAPdenovo. * * SOAPdenovo is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * SOAPdenovo is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with SOAPdenovo. If not, see <http://www.gnu.org/licenses/>. * */ /* this file provides some datatype definition */ #ifndef _DEF #define _DEF #include "def2.h" #include "types.h" #include "stack.h" #include "darray.h" #include "sam.h" //support the samfile_t struct #define EDGE_BIT_SIZE 6 #define word_len 12 #define taskMask 0xf //the last 7 bits #define MaxEdgeCov 16000 #define base2int(base) (char)(((base)&0x06)>>1) //base ACTG => int 0123 #define int2base(seq) "ACTG"[seq] //int 0123 => base ACTG #define int2compbase(seq) "TGAC"[seq] //int 0123 => base TGAC complement of ACTG #define int_comp(seq) (char)(seq^0x02) //(char)((0x4E>>((seq)<<1))&0x03) extern int b_ban; #ifdef MER127 typedef struct kmer { unsigned long long high1, low1, high2, low2; } Kmer; #else typedef struct kmer { unsigned long long high, low; } Kmer; #endif typedef struct preedge { Kmer from_node; Kmer to_node; char *seq; int length; unsigned short cvg: 14; unsigned bal_edge: 2; //indicate whether it's bal_edge is the previous edge, next edge or itself } preEDGE; typedef struct readinterval //record two paths of bubble { int readid; unsigned int edgeid; int start; struct readinterval *bal_rv; struct readinterval *nextOnEdge; // the downstream in the path struct readinterval *prevOnEdge; // the upstream in the path struct readinterval *nextInRead; struct readinterval *prevInRead; } READINTERVAL; struct arc; typedef struct edge { unsigned int from_vt; //from kmer id unsigned int to_vt; //to kmer id int length; //edge length unsigned short cvg: 14; //coverage unsigned short bal_edge: 2; // 2:smaller 0:larger 1:rev-com equal to itself unsigned short multi: 14; unsigned short deleted : 1; unsigned short flag : 1; char *seq; //edge content READINTERVAL *rv; struct arc *arcs; long long *markers; //reads id } EDGE; typedef struct edge_sub { unsigned int from_vt; //from kmer id unsigned int to_vt; //to kmer id int length; //edge length char *seq; //edge content } EDGE_SUB; typedef struct edge_pt { EDGE *edge; struct edge_pt *next; } EDGE_PT; typedef struct vertex { Kmer kmer; } VERTEX; /* typedef struct connection { unsigned int contigID; int gapLen; short maxGap; unsigned char minGap; unsigned char bySmall:1; unsigned char weakPoint:1; unsigned char weightNotInherit; unsigned char weight; unsigned char maxSingleWeight; unsigned char mask : 1; unsigned char used : 1; unsigned char weak : 1; unsigned char deleted : 1; unsigned char prevInScaf : 1; unsigned char inherit : 1; unsigned char checking : 1; unsigned char singleInScaf : 1; struct connection *nextInScaf; struct connection *next; struct connection *nextInLookupTable; }CONNECT; */ typedef struct connection { unsigned int contigID; int gapLen; unsigned short maxGap; unsigned char minGap; unsigned char bySmall: 1; unsigned char weakPoint: 1; unsigned char smallIns: 1; unsigned char newIns: 1; unsigned char weightNotInherit; unsigned char weight; unsigned char maxSingleWeight; unsigned char mask : 1; unsigned char used : 1; unsigned char weak : 1; unsigned char deleted : 1; unsigned char prevInScaf : 1; unsigned char inherit : 1; unsigned char checking : 1; unsigned char singleInScaf : 1; struct connection *nextInScaf; struct connection *next; struct connection *nextInLookupTable; } CONNECT; typedef struct prearc { unsigned int to_ed; // the destination edge of prearc unsigned int multiplicity; struct prearc *next; } preARC; /* typedef struct contig { unsigned int from_vt; unsigned int to_vt; unsigned int length; int to_right; unsigned short indexInScaf; unsigned char cvg; unsigned char bal_edge:2; // 0, 1 or 2 unsigned char mask : 1; unsigned char flag : 1; unsigned char multi: 1; unsigned char inSubGraph: 1; char *seq; CONNECT *downwardConnect; preARC *arcs; STACK *closeReads; }CONTIG; */ typedef struct contig { unsigned int from_vt; // the first kmer of the contig unsigned int to_vt; // the last kmer of the contig unsigned int length; unsigned short indexInScaf; // the index in the scaffold unsigned char cvg; unsigned char bal_edge: 2; // 0, 1 or 2 unsigned char mask : 1; unsigned char flag : 1; unsigned char multi: 1; unsigned char inSubGraph: 1; unsigned char bubbleInScaff: 1; char *seq; CONNECT *downwardConnect; // record the links to other contigs preARC *arcs; STACK *closeReads; } CONTIG; typedef struct read_nearby { int len; int dis; // dis to nearby contig or scaffold's start position long long seqStarter; //sequence start position in dynamic array } READNEARBY; typedef struct annotation { unsigned long long readID; unsigned int contigID; int pos; } ANNOTATION; typedef struct parameter { unsigned char threadID; void **hash_table; unsigned char *mainSignal; unsigned char *selfSignal; } PARAMETER; typedef struct lightannot { int contigID; int pos; } LIGHTANNOT; typedef struct edgepatch { Kmer from_kmer, to_kmer; unsigned int length; char bal_edge; } EDGEPATCH; typedef struct lightctg { unsigned int index; int length; char *seq; } LIGHTCTG; typedef struct arc { unsigned int to_ed; unsigned int multiplicity; struct arc *prev; struct arc *next; struct arc *bal_arc; struct arc *nextInLookupTable; } ARC; typedef struct arcexist { Kmer kmer; struct arcexist *left; struct arcexist *right; } ARCEXIST; typedef struct lib_info { int min_ins; int max_ins; int avg_ins; int rd_len_cutoff; //read length cutoff int reverse; int asm_flag; int map_len; int pair_num_cut; int rank; //indicate which file is next to be read int curr_type; int curr_index; //file handlers to opened files FILE *fp1; FILE *fp2; boolean f1_start; boolean f2_start; //whether last read is read1 in pair int paired; // 0 -- single; 1 -- read1; 2 -- read2; //type1 char **a1_fname; char **a2_fname; int num_a1_file; int num_a2_file; //type2 char **q1_fname; char **q2_fname; int num_q1_file; int num_q2_file; //type3 char **p_fname; int num_p_file; //fasta only //type4 &5 char **s_a_fname; int num_s_a_file; char **s_q_fname; int num_s_q_file; samfile_t *fp3; //the file handle to read bam file char **b_fname; //the name of the bam file int num_b_file; //the number of the bam file } LIB_INFO; typedef struct ctg4heap { unsigned int ctgID; int dis; unsigned char ds_shut4dheap: 1; // ignore downstream connections unsigned char us_shut4dheap: 1; // ignore upstream connections unsigned char ds_shut4uheap: 1; // ignore downstream connections unsigned char us_shut4uheap: 1; // ignore upstream connections } CTGinHEAP; typedef struct ctg4scaf { unsigned int ctgID; int start; int end; //position in scaff unsigned int cutHead : 8; unsigned int cutTail : 7; unsigned int scaftig_start : 1; //is it a scaftig starter unsigned int mask : 1; // is it masked for further operations unsigned int gapSeqLen: 15; int gapSeqOffset; } CTGinSCAF; typedef struct pe_info { int insertS; long long PE_bound; int rank; int pair_num_cut; } PE_INFO; #endif
utf-8
1
GPL-3+
2008-2017 BGI-Shenzhen
oss4-4.2-build2017/attic/drv/oss_allegro/id.h
/****************************************************************************** * * * (C) 1998-1998 ESS Technology, Inc. * * * * This source code, its compiled object code, and its associated data sets * * are copyright (C) 1998-1998 ESS Technology, Inc. This source code and its * * associated data sets are trade secrets of ESS Technology, Inc. * * * ******************************************************************************/ /*--------------------------------------------------------------------------- * Copyright (C) 1998-1998, ESS Technology, Inc. *--------------------------------------------------------------------------- * FILENAME: id.h *--------------------------------------------------------------------------- * DESCRIPTION: Header file containing Allegro device and revision IDs *--------------------------------------------------------------------------- * AUTHOR: Henry Tang *--------------------------------------------------------------------------- * HISTORY: * 04/22/98 HT Created. *--------------------------------------------------------------------------- */ // // Device IDs // #define DEVICE_ID_1968 0x1968 #define DEVICE_ID_1978 0x1978 // // Revision IDs // #define REVISION_ID_1968 0x00 #define REVISION_ID_1978_REV_A 0x00 #define REVISION_ID_1978_REV_B 0x10 //--------------------------------------------------------------------------- // End of File: id.h //--------------------------------------------------------------------------- /****************************************************************************** * * * (C) 1998-1998 ESS Technology, Inc. * * * ******************************************************************************/
utf-8
1
unknown
unknown
ardour-6.9.0+ds0/libs/ardour/ardour/ladspa_plugin.h
/* * Copyright (C) 2006-2014 David Robillard <d@drobilla.net> * Copyright (C) 2007-2017 Paul Davis <paul@linuxaudiosystems.com> * Copyright (C) 2007 Sampo Savolainen <v2@iki.fi> * Copyright (C) 2009-2012 Carl Hetherington <carl@carlh.net> * Copyright (C) 2014-2019 Robin Gareus <robin@gareus.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef __ardour_ladspa_plugin_h__ #define __ardour_ladspa_plugin_h__ #include <set> #include <vector> #include <string> #include <glibmm/module.h> #include "pbd/stateful.h" #include "ardour/ladspa.h" #include "ardour/plugin.h" namespace ARDOUR { class AudioEngine; class Session; class LIBARDOUR_API LadspaPlugin : public ARDOUR::Plugin { public: LadspaPlugin (std::string module_path, ARDOUR::AudioEngine&, ARDOUR::Session&, uint32_t index, samplecnt_t sample_rate); LadspaPlugin (const LadspaPlugin &); ~LadspaPlugin (); /* Plugin interface */ std::string unique_id() const; const char* label() const { return _descriptor->Label; } const char* name() const { return _descriptor->Name; } const char* maker() const { return _descriptor->Maker; } uint32_t parameter_count() const { return _descriptor->PortCount; } float default_value (uint32_t port) { return _default_value (port); } void set_parameter (uint32_t port, float val, sampleoffset_t); float get_parameter (uint32_t port) const; int get_parameter_descriptor (uint32_t which, ParameterDescriptor&) const; uint32_t nth_parameter (uint32_t port, bool& ok) const; std::set<Evoral::Parameter> automatable() const; void activate () { if (!_was_activated && _descriptor->activate) _descriptor->activate (_handle); _was_activated = true; } void deactivate () { if (_was_activated && _descriptor->deactivate) _descriptor->deactivate (_handle); _was_activated = false; } void cleanup () { activate(); deactivate(); if (_descriptor->cleanup) _descriptor->cleanup (_handle); } int set_block_size (pframes_t /*nframes*/) { return 0; } int connect_and_run (BufferSet& bufs, samplepos_t start, samplepos_t end, double speed, ChanMapping const& in, ChanMapping const& out, pframes_t nframes, samplecnt_t offset); std::string describe_parameter (Evoral::Parameter); std::string state_node_name() const { return "ladspa"; } bool parameter_is_audio(uint32_t) const; bool parameter_is_control(uint32_t) const; bool parameter_is_input(uint32_t) const; bool parameter_is_output(uint32_t) const; bool parameter_is_toggled(uint32_t) const; boost::shared_ptr<ScalePoints> get_scale_points(uint32_t port_index) const; int set_state (const XMLNode&, int version); bool load_preset (PresetRecord); bool has_editor() const { return false; } /* LADSPA extras */ LADSPA_Properties properties() const { return _descriptor->Properties; } uint32_t index() const { return _index; } const char * copyright() const { return _descriptor->Copyright; } LADSPA_PortDescriptor port_descriptor(uint32_t i) const; const LADSPA_PortRangeHint* port_range_hints() const { return _descriptor->PortRangeHints; } const char * const * port_names() const { return _descriptor->PortNames; } void set_gain (float gain) { _descriptor->set_run_adding_gain (_handle, gain); } void run_adding (uint32_t nsamples) { _descriptor->run_adding (_handle, nsamples); } void connect_port (uint32_t port, float *ptr) { _descriptor->connect_port (_handle, port, ptr); } private: float _default_value (uint32_t port) const; std::string _module_path; Glib::Module* _module; const LADSPA_Descriptor* _descriptor; LADSPA_Handle _handle; samplecnt_t _sample_rate; LADSPA_Data* _control_data; LADSPA_Data* _shadow_data; LADSPA_Data* _latency_control_port; uint32_t _index; bool _was_activated; samplecnt_t plugin_latency() const; void find_presets (); void init (std::string module_path, uint32_t index, samplecnt_t rate); void run_in_place (pframes_t nsamples); void latency_compute_run (); int set_state_2X (const XMLNode&, int version); std::string do_save_preset (std::string name); void do_remove_preset (std::string name); std::string preset_envvar () const; std::string preset_source (std::string) const; bool write_preset_file (std::string); void add_state (XMLNode *) const; }; class LIBARDOUR_API LadspaPluginInfo : public PluginInfo { public: LadspaPluginInfo (); ~LadspaPluginInfo () { }; bool is_instrument () const { return false; } /* ladspa's are never instruments */ #ifdef MIXBUS /* for mixbus, relegate ladspa's to the Utils folder. */ bool is_effect () const { return false; } bool is_utility () const { return true; } #endif PluginPtr load (Session& session); std::vector<Plugin::PresetRecord> get_presets (bool user_only) const; }; typedef boost::shared_ptr<LadspaPluginInfo> LadspaPluginInfoPtr; } // namespace ARDOUR #endif /* __ardour_ladspa_plugin_h__ */
utf-8
1
GPL-2+
1998-2021 Paul Davis 2004-2021 Robin Gareus <robin@gareus.org> 2009-2016 David Robillard <d@drobilla.net> 2010-2012 Carl Hetherington <carl@carlh.net> 2007-2017 Tim Mayberry <mojofunk@gmail.com> 2006-2017 Nick Mainsbridge <mainsbridge@gmail.com> 2012-2019 Ben Loftis <ben@harrisonconsoles.com> 2005-2009 Taybin Rutkin <taybin@taybin.com> 2008-2017 John Emmas <john@creativepost.co.uk> 2006-2020 Ardour Developers and others
therion-6.0.4/thdb2dpt.cxx
/** * @file thdb2dpt.cxx */ /* Copyright (C) 2000 Stacho Mudrak * * $Date: $ * $RCSfile: $ * $Revision: $ * * -------------------------------------------------------------------- * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * -------------------------------------------------------------------- */ #include "thdb2dpt.h" #include "thexpmap.h" thdb2dpt::thdb2dpt() { this->x = 0.0; this->y = 0.0; this->xt = 0.0; this->yt = 0.0; this->zt = 0.0; this->at = 0.0; this->dbgx0 = 0.0; this->dbgy0 = 0.0; this->dbgx1 = 0.0; this->dbgy1 = 0.0; this->pscrap = NULL; this->join_item = NULL; } void thdb2dpt::export_mp(class thexpmapmpxs * out, int dbglevel) { switch (dbglevel) { case 1: fprintf(out->file,"(%.2f,%.2f)", thxmmxst(out, this->dbgx1, this->dbgy1)); break; case 0: fprintf(out->file,"(%.2f,%.2f)", thxmmxst(out, this->dbgx0, this->dbgy0)); break; default: fprintf(out->file,"(%.2f,%.2f)", thxmmxst(out, this->xt, this->yt)); } }
utf-8
1
GPL-2+
2000-2019 Martin Budaj <m.budaj@gmail.com> 2000-2019 Stacho Mudrák <s.m@group-s.sk>
gnubik-2.4.3/src/guile-hooks.h
/* Copyright (C) 2004, 2010 Dale Mellor This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef GUILE_HOOKS_H #define GUILE_HOOKS_H #include <gtk/gtk.h> /* The method which seeks out all scripts, and makes them known to the UI manager. The scripts can callback to the C world to register themselves. The function must be called exactly once. */ void startup_guile_scripts (GtkUIManager * uim); #endif /* defined GUILE_HOOKS_H. */
utf-8
1
GPL-3+
1998-2008 John Darrington <john@darrington.wattle.id.au> 2004 Dale Mellor <dale_mellor@users.sourceforge.net>
qt6-webengine-6.2.2+dfsg/src/3rdparty/chromium/third_party/tflite/src/tensorflow/core/framework/model.h
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_FRAMEWORK_MODEL_H_ #define TENSORFLOW_CORE_FRAMEWORK_MODEL_H_ #include <list> #include <memory> #include <string> // TODO(b/114492873): Move this include into core/platform. #include <thread> // NOLINT #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/lib/histogram/histogram.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/env.h" namespace tensorflow { namespace data { namespace model { // A constant that can be used to enable auto-tuning. constexpr int64 kAutotune = -1; constexpr char kParallelism[] = "parallelism"; constexpr char kBufferSize[] = "buffer_size"; // A key used to identify input time gradient. constexpr char kInputTimeKey[] = "input_time"; enum class AutotuneAlgorithm { HILL_CLIMB = 0, GRADIENT_DESCENT = 1, }; enum class TraversalOrder { BFS = 0, REVERSE_BFS = 1, }; // Represents thread-safe state that can be shared between an input pipeline and // the performance model. struct SharedState { public: SharedState(int64 value, std::shared_ptr<mutex> mu, std::shared_ptr<condition_variable> cond_var) : value(value), mu(std::move(mu)), cond_var(std::move(cond_var)), tunable(value == kAutotune) {} double value; const std::shared_ptr<mutex> mu; const std::shared_ptr<condition_variable> cond_var; const bool tunable; }; // Represents a parameter. struct Parameter { Parameter(const string& name, std::shared_ptr<SharedState> state, double min, double max) : name(name), value(state->value), min(min), max(max), state(std::move(state)) {} // Human-readable name of the parameter. const string name; // Identifies the model value of the parameter. This can be different from // the actual value (e.g. during optimization search). double value; // Identifies the minimum value of the parameter. const double min; // Identifies the maximum value of the parameter. const double max; // Shared state of the parameter. std::shared_ptr<SharedState> state; }; std::shared_ptr<Parameter> MakeParameter(const string& name, std::shared_ptr<SharedState> state, double min, double max); // Abstract representation of a TensorFlow input pipeline node. It collects // information about inputs to this node, processing time spent executing the // node logic, number of elements produced by the node, various other // information (e.g. batch size or execution parallelism). // // Developers of tf.data transformations are not expected to interact with // this class directly. Boiler plate code for creating the abstract // representation of the input pipeline and collecting common information has // been added to the implementation of `DatasetBase` and `DatasetBaseIterator` // respectively. // // In addition, `DatasetBaseIterator` provides wrappers that can be used for // transformation-specific information collection. The `SetMetadata` wrapper // can be used to pass arbitrary metadata to the modeling framework, while the // `StartWork` and `StopWork` wrappers should be used to correctly account for // processing time of multi-threaded transformation that yield the CPU; such // transformations should invoke `StartWork()` when a transformation thread // starts executing (e.g. when created or woken up) and `StopWork()` when a // transformation thread stops executing (e.g. when returning or waiting). class Node { public: // Arguments for `Node` constructor. struct Args { int64 id; string name; std::shared_ptr<Node> output; }; using Factory = std::function<std::shared_ptr<Node>(Args)>; using NodeVector = std::vector<std::shared_ptr<Node>>; using NodePairList = std::list<std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>>>; explicit Node(Args args) : id_(args.id), name_(std::move(args.name)), autotune_(true), buffered_bytes_(0), buffered_elements_(0), bytes_consumed_(0), bytes_produced_(0), num_elements_(0), processing_time_(0), record_metrics_(true), metrics_(name_), output_(args.output.get()) {} virtual ~Node() { // Clear the sub-nodes instead of relying on implicit shared pointer // destructor to avoid potential stack overflow when the tree is deep. std::deque<std::shared_ptr<Node>> queue; { mutex_lock l(mu_); while (inputs_.size() > 0) { queue.push_back(inputs_.front()); inputs_.pop_front(); } } while (!queue.empty()) { auto node = queue.back(); queue.pop_back(); { mutex_lock l(node->mu_); while (node->inputs_.size() > 0) { queue.push_back(node->inputs_.front()); node->inputs_.pop_front(); } } } FlushMetrics(); } // Adds an input. void add_input(std::shared_ptr<Node> node) TF_LOCKS_EXCLUDED(mu_) { mutex_lock l(mu_); inputs_.push_back(node); } // Increments the aggregate processing time by the given delta. void add_processing_time(int64 delta) TF_LOCKS_EXCLUDED(mu_) { processing_time_ += delta; } // Returns an indication whether autotuning is enabled for this node. bool autotune() const TF_LOCKS_EXCLUDED(mu_) { return autotune_; } // Returns the number of bytes stored in this node's buffer. int64 buffered_bytes() const TF_LOCKS_EXCLUDED(mu_) { return buffered_bytes_; } // Returns the number of elements stored in this node's buffer. int64 buffered_elements() const TF_LOCKS_EXCLUDED(mu_) { return buffered_elements_; } // Returns the number of bytes consumed by the node. int64 bytes_consumed() const TF_LOCKS_EXCLUDED(mu_) { return bytes_consumed_; } // Returns the number of bytes produced by the node. int64 bytes_produced() const TF_LOCKS_EXCLUDED(mu_) { return bytes_produced_; } // Indicates whether the node has tunable parameters. bool has_tunable_parameters() const TF_LOCKS_EXCLUDED(mu_) { tf_shared_lock l(mu_); for (const auto& pair : parameters_) { if (pair.second->state->tunable) return true; } return false; } // Returns the unique node ID. int64 id() const TF_LOCKS_EXCLUDED(mu_) { return id_; } // Returns the node inputs. std::list<std::shared_ptr<Node>> inputs() const TF_LOCKS_EXCLUDED(mu_) { tf_shared_lock l(mu_); return inputs_; } // Returns a longer node name that is guaranteed to be unique. string long_name() const { return strings::StrCat(name_, "(id:", id_, ")"); } // Returns the node name. const string& name() const { return name_; } // Returns the number of elements produced by the node. int64 num_elements() const TF_LOCKS_EXCLUDED(mu_) { return num_elements_; } // Returns the node output. Node* output() const { return output_; } // Returns the aggregate processing time. int64 processing_time() const TF_LOCKS_EXCLUDED(mu_) { return processing_time_; } // Records that the node consumed the given number of bytes. void record_bytes_consumed(int64 num_bytes) { bytes_consumed_ += num_bytes; } // Records that the node produced the given number of bytes. void record_bytes_produced(int64 num_bytes) { bytes_produced_ += num_bytes; } // Records the change in this node's buffer. void record_buffer_event(int64 bytes_delta, int64 elements_delta) { buffered_bytes_ += bytes_delta; buffered_elements_ += elements_delta; } // Records that the node produced an element. void record_element() TF_LOCKS_EXCLUDED(mu_) { num_elements_++; } // Records that a node thread has started executing. void record_start(int64 time_nanos) TF_LOCKS_EXCLUDED(mu_) { DCHECK_EQ(work_start_, 0); work_start_ = time_nanos; } // Records that a node thread has stopped executing. void record_stop(int64 time_nanos) TF_LOCKS_EXCLUDED(mu_) { // TODO(jsimsa): Use DCHECK_NE(work_start_, 0) here. if (work_start_ != 0) { processing_time_ += time_nanos - work_start_; work_start_ = 0; } else { VLOG(1) << "Encountered a stop event without a matching start event."; } } // Removes an input. void remove_input(std::shared_ptr<Node> input) TF_LOCKS_EXCLUDED(mu_) { mutex_lock l(mu_); inputs_.remove(input); } // Sets the value that determines whether autotuning is enabled for this node. void set_autotune(bool autotune) TF_LOCKS_EXCLUDED(mu_) { autotune_.store(autotune); } // Given the average time between output events (`output_time`), the average // time between input events (`input_time`) and the buffer size, the method // computes the expected time an input event will have to wait. // // The wait time is approximated as the product of the probability the buffer // will be empty and the time it takes to produce an element into the buffer. // // The formula used for computing the probability is derived by modeling the // problem as an M/M/1/K queue // (https://en.wikipedia.org/wiki/Birth%E2%80%93death_process#M/M/1/K_queue). // // Collects derivatives of `ComputeWaitTime` w.r.t `output_time`, `input_time' // and `buffer_size` if the corresponding pointers are not `nullptr`. static double ComputeWaitTime(const double& output_time, const double& input_time, const double& buffer_size, double* output_time_derivative, double* input_time_derivative, double* buffer_size_derivative); // Collects tunable parameters in the subtree rooted in this node. void CollectTunableParameters( absl::flat_hash_map<string, std::shared_ptr<Parameter>>* parameters) const TF_LOCKS_EXCLUDED(mu_); // Returns a human-readable representation of this node. string DebugString() const TF_LOCKS_EXCLUDED(mu_); // Flushes the metrics recorded by this node. void FlushMetrics() TF_LOCKS_EXCLUDED(mu_); // Returns the per-element output time for this node and if `gradients` is not // `nullptr`, collects the output time gradient w.r.t. tunable parameters of // the subtree rooted in this node. double OutputTime(absl::flat_hash_map<string, double>* input_times, absl::flat_hash_map<string, double>* gradients) const TF_LOCKS_EXCLUDED(mu_); // Returns a copy of this node, making a deep copy of its inputs and a // shallow copy of its tunable parameters. // // The purpose for this method is to allow the model optimization logic to // operate over immutable state while allowing concurrent model updates. std::shared_ptr<Node> Snapshot(std::shared_ptr<Node> output) const TF_LOCKS_EXCLUDED(mu_); // Returns the per-element processing time spent in this node. double SelfProcessingTime() const TF_LOCKS_EXCLUDED(mu_); // Returns the total number of bytes buffered in all nodes in the subtree for // which autotuning is enabled. double TotalBufferedBytes() const TF_LOCKS_EXCLUDED(mu_); // Collects the total buffer limit of all nodes in the subtree for which // autotuning is enabled. This number represents the amount of memory that // would be used by the subtree nodes if all of their buffers were full. double TotalMaximumBufferedBytes() const TF_LOCKS_EXCLUDED(mu_); // Returns the per-element CPU time spent in the subtree rooted in this node. // If `processing_times` is not `nullptr`, collects the per-element CPU time // spent in each node of the subtree. double TotalProcessingTime( absl::flat_hash_map<string, double>* processing_times) TF_LOCKS_EXCLUDED(mu_); protected: // Used for (incrementally) recording metrics. The class is thread-safe. class Metrics { public: explicit Metrics(const string& name) : bytes_consumed_counter_(metrics::GetTFDataBytesConsumedCounter(name)), bytes_produced_counter_(metrics::GetTFDataBytesProducedCounter(name)), num_elements_counter_(metrics::GetTFDataElementsCounter(name)), recorded_bytes_consumed_(0), recorded_bytes_produced_(0), recorded_num_elements_(0) {} // Expects the total number of bytes consumed and records the delta since // last invocation. void record_bytes_consumed(int64 total_bytes) { int64 delta = total_bytes - recorded_bytes_consumed_.exchange(total_bytes); bytes_consumed_counter_->IncrementBy(delta); } // Expects the total number of bytes produced and records the delta since // last invocation. void record_bytes_produced(int64 total_bytes) { int64 delta = total_bytes - recorded_bytes_produced_.exchange(total_bytes); bytes_produced_counter_->IncrementBy(delta); } // Expects the total number of elements produced and records the delta since // last invocation. void record_num_elements(int64 total_elements) { int64 delta = total_elements - recorded_num_elements_.exchange(total_elements); num_elements_counter_->IncrementBy(delta); } private: monitoring::CounterCell* const bytes_consumed_counter_; monitoring::CounterCell* const bytes_produced_counter_; monitoring::CounterCell* const num_elements_counter_; std::atomic<int64> recorded_bytes_consumed_; std::atomic<int64> recorded_bytes_produced_; std::atomic<int64> recorded_num_elements_; }; // Returns the number of inputs. int64 num_inputs() const TF_SHARED_LOCKS_REQUIRED(mu_) { int64 num_inputs = 0; for (auto& input : inputs_) { // Inputs for which autotuning is disabled are excluded. if (input->autotune()) { ++num_inputs; } } return num_inputs; } // Creates a clone of this node. virtual std::shared_ptr<Node> Clone(std::shared_ptr<Node> output) const TF_SHARED_LOCKS_REQUIRED(mu_) = 0; // Returns the average size of an element buffered in this node. double AverageBufferedElementSize() const TF_SHARED_LOCKS_REQUIRED(mu_); // Returns the sum of per-element output time for the tunable inputs of this // node. double OutputTimeForInputs( const absl::flat_hash_map<string, double>& output_times) const TF_SHARED_LOCKS_REQUIRED(mu_); // Returns the sum of output time gradient w.r.t. input time for the tunable // inputs of this node. double OutputTimeGradientsForInputs( const absl::flat_hash_map<string, double>& output_time_gradients) const TF_SHARED_LOCKS_REQUIRED(mu_); // Computes the input time for this node and stores it in `input_times`. virtual void InputTimeLocked(absl::flat_hash_map<string, double>* input_times) const TF_SHARED_LOCKS_REQUIRED(mu_) = 0; // Computes the per-element output time for this node and stores it in // `output_times`. If `gradients` is not `nullptr`, computes the output time // gradient w.r.t. tunable parameters of the subtree rooted in this node and // stores it in `gradients`, also computes the output time gradient w.r.t. // input time and stores it in `output_time_gradients`. virtual void OutputTimeLocked( const absl::flat_hash_map<string, double>& input_times, absl::flat_hash_map<string, double>* gradients, absl::flat_hash_map<string, double>* output_times, absl::flat_hash_map<string, double>* output_time_gradients) const TF_SHARED_LOCKS_REQUIRED(mu_) = 0; // Returns the sum of per-element processing time for the inputs of this node // by adding values for input nodes in `total_processing_times`. Processing // time for a given input is a weighted combination of a statistic based on // history of input processing time and the actual time. This is done to // improve accuracy of processing time estimation for newly created inputs. // // Uniform distribution of per-element processing times across different // inputs is assumed. double TotalProcessingTimeForInputs( const absl::flat_hash_map<string, double>& total_processing_times) TF_SHARED_LOCKS_REQUIRED(mu_); // Returns the per-element processing time spent in this node. double SelfProcessingTimeLocked() const TF_SHARED_LOCKS_REQUIRED(mu_); // Computes the per-element CPU time spent in the subtree rooted in this node // and stores it in `total_processing_times`. If `processing_times` is not // `nullptr`, collects the per-element CPU time spent in each node of the // subtree. virtual void TotalProcessingTimeLocked( absl::flat_hash_map<string, double>* processing_times, absl::flat_hash_map<string, double>* total_processing_times) TF_SHARED_LOCKS_REQUIRED(mu_) = 0; // Returns a vector of nodes of the subtree rooted in this node. The nodes are // either in breadth-first search or reverse breadth-first search order // depending on the `order` argument. The root node itself is not collected. NodeVector CollectNodes(TraversalOrder order) const TF_SHARED_LOCKS_REQUIRED(mu_); // Collect tunable parameters for the node. void CollectTunableParametersHelper( absl::flat_hash_map<string, std::shared_ptr<Parameter>>* parameters) const TF_SHARED_LOCKS_REQUIRED(mu_); // Build up debug string for the node and store in the debug strings map. void DebugStringHelper(absl::flat_hash_map<string, string>* debug_strings) const TF_SHARED_LOCKS_REQUIRED(mu_); // Copy the node and add the (input, copy) pairs to the NodePairList. std::shared_ptr<Node> SnapshotHelper(std::shared_ptr<Node> clone_base, NodePairList* node_pairs) const; // Compute total buffered bytes for the node and store in the total bytes map. void TotalBufferedBytesHelper( absl::flat_hash_map<string, double>* total_bytes) const TF_SHARED_LOCKS_REQUIRED(mu_); // Compute total maximum buffered bytes for the node and store in the total // bytes map. void TotalMaximumBufferedBytesHelper( absl::flat_hash_map<string, double>* total_bytes) const TF_SHARED_LOCKS_REQUIRED(mu_); // Stores the time passed to the last call to `Node::record_start()` on the // current thread. // // NOTE: This thread-local variable is shared between all instances of `Node` // on which the same thread calls `record_start()` or `record_stop()`. It // relies on the invariant that at most one `Node` can be "active" on a // particular thread at any time. Therefore if `n->record_start()` is called // on thread `t`, then `n->record_stop()` must be called before another call // to `Node::record_start()` (for any node). static thread_local int64 work_start_; // Will be initialized to zero. mutable mutex mu_; const int64 id_; const string name_; // Indicates whether the subtree rooted in this node should be included in // autotuning. In particular, if this is `false`, then the subtree is excluded // from computation of output time and processing time. std::atomic<bool> autotune_; std::atomic<int64> buffered_bytes_; std::atomic<int64> buffered_elements_; std::atomic<int64> bytes_consumed_; std::atomic<int64> bytes_produced_; std::atomic<int64> num_elements_; std::atomic<int64> processing_time_; std::atomic<bool> record_metrics_; Metrics metrics_; absl::flat_hash_map<string, std::shared_ptr<Parameter>> parameters_ TF_GUARDED_BY(mu_); // Statistic of inputs processing time history. double input_processing_time_sum_ = 0.0L; int64 input_processing_time_count_ = 0; // Inputs of this node. These can represent an iterator created from the input // dataset but also other input iterators (e.g. created by the user-defined // functions of `flat_map` or `interleave`). std::list<std::shared_ptr<Node>> inputs_ TF_GUARDED_BY(mu_); // The reference to the output node is not owned so that deletion of a // node results in recursive deletion of the subtree rooted in the node. Node* const output_; }; // InterleaveMany is used to model datasets whose inputs are used to create // datasets whose elements are then interleaved. std::shared_ptr<Node> MakeInterleaveManyNode(Node::Args args); // AsyncInterleaveMany nodes are the asynchronous version of InterleaveMany // nodes. std::shared_ptr<Node> MakeAsyncInterleaveManyNode( Node::Args args, std::vector<std::shared_ptr<Parameter>> parameters); // KnownMany nodes model datasets that synchronously consume known number of // input element per output element. std::shared_ptr<Node> MakeKnownRatioNode(Node::Args args, double ratio); // AsyncKnownRatio nodes are the asynchronous version of KnownRate nodes. std::shared_ptr<Node> MakeAsyncKnownRatioNode( Node::Args args, double ratio, std::vector<std::shared_ptr<Parameter>> parameters); // Source nodes represent data sources. std::shared_ptr<Node> MakeSourceNode(Node::Args args); // UnknownMany nodes represent datasets that synchronously consume an // unknown number of input elements per output. // // Unlike KnownRatio nodes which expect the ratio between inputs and outputs is // specified as a parameter, UnknownRatio estimates the ratio empirically. std::shared_ptr<Node> MakeUnknownRatioNode(Node::Args args); // Unknown nodes represent datasets for which we do not have a model. It acts // as pass-through between inputs and output. std::shared_ptr<Node> MakeUnknownNode(Node::Args args); // Abstract representation of a TensorFlow input pipeline that can be used // for collecting runtime information and optimizing performance. It collects // runtime information about execution of the input pipeline that is used to // create a performance model, which is in turn used to identify optimal values // of tunable parameters. // // Developers of tf.data transformations are not expected to interact with this // class directly. Boiler plate code for creating the abstract representation of // the input pipeline and collecting runtime information has been added to the // implementation of `DatasetBase` and `DatasetBaseIterator` respectively. class Model { public: // Creates a new model. Model() : collect_resource_usage_(false) {} // Indicates whether to collect resource usage. bool collect_resource_usage() const { return collect_resource_usage_; } // Adds a node with the given name and given parent. void AddNode(Node::Factory factory, const string& name, std::shared_ptr<Node> parent, std::shared_ptr<Node>* out_node) TF_LOCKS_EXCLUDED(mu_); // Flushes metrics record by the model. void FlushMetrics() TF_LOCKS_EXCLUDED(mu_); // Uses the given algorithm to perform the autotuning optimization. void Optimize(AutotuneAlgorithm algorithm, int64 cpu_budget, int64 ram_budget) TF_LOCKS_EXCLUDED(mu_); // Removes the given node. void RemoveNode(std::shared_ptr<Node> node) TF_LOCKS_EXCLUDED(mu_); private: // Collects tunable parameters in the tree rooted in the given node, returning // a mapping from a (unique) node name to a tunable parameter. absl::flat_hash_map<string, std::shared_ptr<Parameter>> CollectTunableParameters(std::shared_ptr<Node> node); // Collects "essential" parallelism parameters of transformations in the tree // rooted in the given node. Which parameters are essential is determined by // comparison the processing time spent in the corresponding transformation // relative to other transformations. The collected parameters are returned // as a mapping from a (unique) node name to a parallelism parameter. absl::flat_hash_map<string, std::shared_ptr<Parameter>> CollectEssentialParallelism(std::shared_ptr<Node> node); // This optimization algorithm starts by setting all tunable parallelism // parameters to the minimum value. It then repeatedly identifies the // parameter whose increase in parallelism decreases the output time the most. // This process is repeated until all parameters reach their maximum values or // the projected output time is less than or equal to the processing time // needed to produce an element divided by CPU budget. void OptimizeHillClimb(int64 cpu_budget, int64 ram_budget); // This optimization algorithm starts by setting all tunable parallelism // parameters to the minimum value. It then improves current parameters by // making a step in the direction opposite to the gradient of `OutputTime` and // projecting resulting values on the feasible intervals. Improvement step is // repeated until either the output time improvement is smaller than threshold // value or the output time is less than the processing time needed to produce // an element divided by CPU budget. void OptimizeGradientDescent(int64 cpu_budget, int64 ram_budget); // Collects the output time and if `gradients` is not `nullptr`, the output // time gradient w.r.t. tunable parameters of the subtree rooted in the given // node. double OutputTime(std::shared_ptr<Node> node, absl::flat_hash_map<string, double>* gradients); // Collects the processing time for the given node. double TotalProcessingTime(std::shared_ptr<Node> node); // Collects the total number of bytes buffered in all nodes in the subtree // rooted in the given node for which autotuning is enabled. double TotalBufferedBytes(std::shared_ptr<Node> node); // Collects the total buffer limit of all nodes in the subtree rooted in the // given node for which autotuning is enabled. This number represents the // amount of memory that would be used by the subtree nodes if all of their // buffers were full. double TotalMaximumBufferedBytes(std::shared_ptr<Node> node); // Used for coordination between different input pipeline threads. Exclusive // access is required only when adding or removing nodes. Concurrent access to // existing nodes is protected by a node mutex. mutex mu_; int64 id_counter_ TF_GUARDED_BY(mu_) = 1; std::shared_ptr<Node> output_ TF_GUARDED_BY(mu_); // Indicates whether the modeling framework should collect resource usage // (e.g. CPU, memory). The logic for collecting this information assumes that // the collection is not repeatedly disabled and enabled. As a consequence, // the implementation starts collecting resource usage when it encounters a // tunable parameter (because the information is used for for tuning the value // of the parameter) and never stops. std::atomic<bool> collect_resource_usage_; }; } // namespace model } // namespace data } // namespace tensorflow #endif // TENSORFLOW_CORE_FRAMEWORK_MODEL_H_
utf-8
1
LGPL-3 or GPL-2
2006-2021 The Chromium Authors 2016-2021 The Qt Company Ltd.
db5.3-5.3.28+dfsg1/src/btree/bt_search.c
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 1996, 2013 Oracle and/or its affiliates. All rights reserved. */ /* * Copyright (c) 1990, 1993, 1994, 1995, 1996 * Keith Bostic. All rights reserved. */ /* * Copyright (c) 1990, 1993, 1994, 1995 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Mike Olson. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id$ */ #include "db_config.h" #include "db_int.h" #include "dbinc/db_page.h" #include "dbinc/btree.h" #include "dbinc/lock.h" #include "dbinc/mp.h" /* * __bam_get_root -- * Fetch the root of a tree and see if we want to keep * it in the stack. * * PUBLIC: int __bam_get_root __P((DBC *, db_pgno_t, int, u_int32_t, int *)); */ int __bam_get_root(dbc, root_pgno, slevel, flags, stack) DBC *dbc; db_pgno_t root_pgno; int slevel; u_int32_t flags; int *stack; { BTREE_CURSOR *cp; DB *dbp; DB_LOCK lock; DB_MPOOLFILE *mpf; PAGE *h; db_lockmode_t lock_mode; u_int32_t get_mode; int ret, t_ret; COMPQUIET(h, NULL); LOCK_INIT(lock); dbp = dbc->dbp; mpf = dbp->mpf; cp = (BTREE_CURSOR *)dbc->internal; /* * If write-locking pages, we need to know whether or not to acquire a * write lock on a page before getting it. This depends on how deep it * is in tree, which we don't know until we acquire the root page. So, * if we need to lock the root page we may have to upgrade it later, * because we won't get the correct lock initially. * * Retrieve the root page. */ try_again: *stack = LF_ISSET(SR_STACK) && (dbc->dbtype == DB_RECNO || F_ISSET(cp, C_RECNUM)); lock_mode = DB_LOCK_READ; if (*stack || LF_ISSET(SR_DEL) || (LF_ISSET(SR_NEXT) && LF_ISSET(SR_WRITE))) lock_mode = DB_LOCK_WRITE; /* * Get the root. If the root happens to be a leaf page then * we are supposed to get a read lock on it before latching * it. So if we have not locked it do a try get first. * If we can't get the root shared, then get a lock on it and * then wait for the latch. */ retry: if (lock_mode == DB_LOCK_WRITE) get_mode = DB_MPOOL_DIRTY; else if (LOCK_ISSET(lock) || !STD_LOCKING(dbc) || F_ISSET(dbc, DBC_DOWNREV) || dbc->dbtype == DB_RECNO || F_ISSET(cp, C_RECNUM)) get_mode = 0; else get_mode = DB_MPOOL_TRY; BAM_GET_ROOT(dbc, root_pgno, h, get_mode, lock_mode, lock, ret); if (ret == DB_LOCK_NOTGRANTED && get_mode == DB_MPOOL_TRY) { DB_ASSERT(dbp->env, !LOCK_ISSET(lock)); if ((ret = __db_lget(dbc, 0, root_pgno == PGNO_INVALID ? BAM_ROOT_PGNO(dbc) : root_pgno, lock_mode, 0, &lock)) != 0) return (ret); goto retry; } if (ret != 0) { /* Did not read it, so we can release the lock */ (void)__LPUT(dbc, lock); return (ret); } DB_ASSERT(dbp->env, TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO || TYPE(h) == P_LBTREE || TYPE(h) == P_LRECNO || TYPE(h) == P_LDUP); /* * Decide if we need to dirty and/or lock this page. * We must not hold the latch while we get the lock. */ if (!*stack && ((LF_ISSET(SR_PARENT) && (u_int8_t)(slevel + 1) >= LEVEL(h)) || LEVEL(h) == LEAFLEVEL || (LF_ISSET(SR_START) && slevel == LEVEL(h)))) { *stack = 1; /* If we already have the write lock, we are done. */ if (dbc->dbtype == DB_RECNO || F_ISSET(cp, C_RECNUM)) { if (lock_mode == DB_LOCK_WRITE) goto done; if ((ret = __LPUT(dbc, lock)) != 0) return (ret); } /* * Now that we know what level the root is at, do we need a * write lock? If not or we got the lock before latching * we are done. */ if (LEVEL(h) != LEAFLEVEL || LF_ISSET(SR_WRITE)) { lock_mode = DB_LOCK_WRITE; /* Drop the read lock if we got it above. */ if ((ret = __LPUT(dbc, lock)) != 0) return (ret); } else if (LOCK_ISSET(lock)) goto done; if (!STD_LOCKING(dbc)) { if (lock_mode != DB_LOCK_WRITE) goto done; if ((ret = __memp_dirty(mpf, &h, dbc->thread_info, dbc->txn, dbc->priority, 0)) != 0) { if (h != NULL) (void)__memp_fput(mpf, dbc->thread_info, h, dbc->priority); return (ret); } } else { /* Try to lock the page without waiting first. */ if ((ret = __db_lget(dbc, 0, root_pgno, lock_mode, DB_LOCK_NOWAIT, &lock)) == 0) { if (lock_mode == DB_LOCK_WRITE && (ret = __memp_dirty(mpf, &h, dbc->thread_info, dbc->txn, dbc->priority, 0)) != 0) { if (h != NULL) (void)__memp_fput(mpf, dbc->thread_info, h, dbc->priority); return (ret); } goto done; } t_ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority); h = NULL; if (ret == DB_LOCK_DEADLOCK || ret == DB_LOCK_NOTGRANTED) ret = 0; if (ret == 0) ret = t_ret; if (ret != 0) return (ret); get_mode = 0; if (lock_mode == DB_LOCK_WRITE) get_mode = DB_MPOOL_DIRTY; if ((ret = __db_lget(dbc, 0, root_pgno, lock_mode, 0, &lock)) != 0) return (ret); if ((ret = __memp_fget(mpf, &root_pgno, dbc->thread_info, dbc->txn, (atomic_read(&mpf->mfp->multiversion) == 0 && lock_mode == DB_LOCK_WRITE) ? DB_MPOOL_DIRTY : 0, &h)) != 0) { /* Did not read it, release the lock */ (void)__LPUT(dbc, lock); return (ret); } } /* * While getting dirty or locked we need to drop the mutex * so someone else could get in and split the root. */ if (!((LF_ISSET(SR_PARENT) && (u_int8_t)(slevel + 1) >= LEVEL(h)) || LEVEL(h) == LEAFLEVEL || (LF_ISSET(SR_START) && slevel == LEVEL(h)))) { /* Someone else split the root, start over. */ ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority); h = NULL; if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) ret = t_ret; if (ret != 0) return (ret); goto try_again; } else if (atomic_read(&mpf->mfp->multiversion) != 0 && lock_mode == DB_LOCK_WRITE && (ret = __memp_dirty(mpf, &h, dbc->thread_info, dbc->txn, dbc->priority, 0)) != 0) { (void)__memp_fput(mpf, dbc->thread_info, h, dbc->priority); (void)__LPUT(dbc, lock); } } done: BT_STK_ENTER(dbp->env, cp, h, 0, lock, lock_mode, ret); return (ret); } /* * __bam_search -- * Search a btree for a key. * * PUBLIC: int __bam_search __P((DBC *, db_pgno_t, * PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *)); */ int __bam_search(dbc, root_pgno, key, flags, slevel, recnop, exactp) DBC *dbc; db_pgno_t root_pgno; const DBT *key; u_int32_t flags; int slevel, *exactp; db_recno_t *recnop; { BTREE *t; BTREE_CURSOR *cp; DB *dbp; DB_LOCK lock, saved_lock; DB_MPOOLFILE *mpf; ENV *env; PAGE *h, *parent_h; db_indx_t base, i, indx, *inp, lim; db_lockmode_t lock_mode; db_pgno_t pg, saved_pg, start_pgno; db_recno_t recno; int adjust, cmp, deloffset, ret, set_stack, stack, t_ret; int getlock, was_next; int (*func) __P((DB *, const DBT *, const DBT *)); u_int32_t get_mode, wait; u_int8_t level, saved_level; if (F_ISSET(dbc, DBC_OPD)) LOCK_CHECK_OFF(dbc->thread_info); dbp = dbc->dbp; env = dbp->env; mpf = dbp->mpf; cp = (BTREE_CURSOR *)dbc->internal; h = NULL; parent_h = NULL; t = dbp->bt_internal; recno = 0; t_ret = 0; BT_STK_CLR(cp); LOCK_INIT(saved_lock); LOCK_INIT(lock); was_next = LF_ISSET(SR_NEXT); wait = DB_LOCK_NOWAIT; /* * There are several ways we search a btree tree. The flags argument * specifies if we're acquiring read or write latches, if we position * to the first or last item in a set of duplicates, if we return * deleted items, and if we are latching pairs of pages. In addition, * if we're modifying record numbers, we have to latch the entire tree * regardless. See btree.h for more details. */ start_pgno = saved_pg = root_pgno; saved_level = MAXBTREELEVEL; retry: if ((ret = __bam_get_root(dbc, start_pgno, slevel, flags, &stack)) != 0) goto err; lock_mode = cp->csp->lock_mode; get_mode = lock_mode == DB_LOCK_WRITE ? DB_MPOOL_DIRTY : 0; h = cp->csp->page; root_pgno = pg = PGNO(h); lock = cp->csp->lock; set_stack = stack; /* * Determine if we need to lock interior nodes. * If we have record numbers we always lock. Otherwise we only * need to do this if we are write locking and we are returning * a stack of nodes. SR_NEXT will eventually get a stack and * release the locks above that level. */ if (F_ISSET(dbc, DBC_DOWNREV)) { getlock = 1; wait = 0; } else getlock = F_ISSET(cp, C_RECNUM) || (lock_mode == DB_LOCK_WRITE && (stack || LF_ISSET(SR_NEXT | SR_DEL))); /* * If we are asked a level that is above the root, * just return the root. This can happen if the tree * collapses while we are trying to lock the root. */ if (!LF_ISSET(SR_START) && LEVEL(h) < slevel) goto done; BT_STK_CLR(cp); /* Choose a comparison function. */ func = F_ISSET(dbc, DBC_OPD) ? (dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare) : t->bt_compare; for (;;) { if (TYPE(h) == P_LBTREE) adjust = P_INDX; else { /* * It is possible to catch an internal page as a change * is being backed out. Its leaf pages will be locked * but we must be sure we get to one. If the page * is not populated enough lock it. */ if (TYPE(h) != P_LDUP && NUM_ENT(h) == 0) { getlock = 1; level = LEVEL(h) + 1; if ((ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority)) != 0) goto err; goto lock_next; } adjust = O_INDX; } inp = P_INP(dbp, h); if (LF_ISSET(SR_MIN | SR_MAX)) { if (LF_ISSET(SR_MIN) || NUM_ENT(h) == 0) indx = 0; else if (TYPE(h) == P_LBTREE) indx = NUM_ENT(h) - 2; else indx = NUM_ENT(h) - 1; if (LEVEL(h) == LEAFLEVEL || (!LF_ISSET(SR_START) && LEVEL(h) == slevel)) { if (LF_ISSET(SR_NEXT)) goto get_next; goto found; } goto next; } /* * Do a binary search on the current page. If we're searching * a Btree leaf page, we have to walk the indices in groups of * two. If we're searching an internal page or a off-page dup * page, they're an index per page item. If we find an exact * match on a leaf page, we're done. */ DB_BINARY_SEARCH_FOR(base, lim, NUM_ENT(h), adjust) { DB_BINARY_SEARCH_INCR(indx, base, lim, adjust); if ((ret = __bam_cmp(dbc, key, h, indx, func, &cmp)) != 0) goto err; if (cmp == 0) { if (LEVEL(h) == LEAFLEVEL || (!LF_ISSET(SR_START) && LEVEL(h) == slevel)) { if (LF_ISSET(SR_NEXT)) goto get_next; goto found; } goto next; } if (cmp > 0) DB_BINARY_SEARCH_SHIFT_BASE(indx, base, lim, adjust); } /* * No match found. Base is the smallest index greater than * key and may be zero or a last + O_INDX index. * * If it's a leaf page or the stopping point, * return base as the "found" value. * Delete only deletes exact matches. */ if (LEVEL(h) == LEAFLEVEL || (!LF_ISSET(SR_START) && LEVEL(h) == slevel)) { *exactp = 0; if (LF_ISSET(SR_EXACT)) { ret = DB_NOTFOUND; goto err; } if (LF_ISSET(SR_STK_ONLY)) { BT_STK_NUM(env, cp, h, base, ret); if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) ret = t_ret; if ((t_ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority)) != 0 && ret == 0) ret = t_ret; h = NULL; if (ret != 0) goto err; goto done; } if (LF_ISSET(SR_NEXT)) { get_next: /* * The caller could have asked for a NEXT * at the root if the tree recently collapsed. */ if (PGNO(h) == root_pgno) { ret = DB_NOTFOUND; goto err; } indx = cp->sp->indx + 1; if (indx == NUM_ENT(cp->sp->page)) { ret = DB_NOTFOUND; cp->csp++; goto err; } /* * If we want both the key page and the next * page, push the key page on the stack * otherwise save the root of the subtree * and drop the rest of the subtree. * Search down again starting at the * next child of the root of this subtree. */ LF_SET(SR_MIN); LF_CLR(SR_NEXT); set_stack = stack = 1; if (LF_ISSET(SR_BOTH)) { cp->csp++; BT_STK_PUSH(env, cp, h, indx, lock, lock_mode, ret); if (ret != 0) goto err; LOCK_INIT(lock); h = cp->sp->page; pg = GET_BINTERNAL(dbp, h, indx)->pgno; level = LEVEL(h); h = NULL; goto lock_next; } else { if ((ret = __LPUT(dbc, lock)) != 0) goto err; if ((ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority)) != 0) goto err; h = cp->sp->page; cp->sp->page = NULL; lock = cp->sp->lock; LOCK_INIT(cp->sp->lock); if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) goto err; goto next; } } /* * !!! * Possibly returning a deleted record -- DB_SET_RANGE, * DB_KEYFIRST and DB_KEYLAST don't require an exact * match, and we don't want to walk multiple pages here * to find an undeleted record. This is handled by the * calling routine. */ if (LF_ISSET(SR_DEL) && cp->csp == cp->sp) cp->csp++; BT_STK_ENTER(env, cp, h, base, lock, lock_mode, ret); if (ret != 0) goto err; goto done; } /* * If it's not a leaf page, record the internal page (which is * a parent page for the key). Decrement the base by 1 if it's * non-zero so that if a split later occurs, the inserted page * will be to the right of the saved page. */ indx = base > 0 ? base - O_INDX : base; /* * If we're trying to calculate the record number, sum up * all the record numbers on this page up to the indx point. */ next: if (recnop != NULL) for (i = 0; i < indx; ++i) recno += GET_BINTERNAL(dbp, h, i)->nrecs; pg = GET_BINTERNAL(dbp, h, indx)->pgno; level = LEVEL(h); /* See if we are at the level to start stacking. */ if (LF_ISSET(SR_START) && slevel == level) set_stack = stack = 1; if (LF_ISSET(SR_STK_ONLY)) { if (slevel == LEVEL(h)) { BT_STK_NUM(env, cp, h, indx, ret); if ((t_ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority)) != 0 && ret == 0) ret = t_ret; h = NULL; if (ret != 0) goto err; goto done; } BT_STK_NUMPUSH(env, cp, h, indx, ret); (void)__memp_fput(mpf, dbc->thread_info, h, dbc->priority); h = NULL; } else if (stack) { /* Return if this is the lowest page wanted. */ if (LF_ISSET(SR_PARENT) && slevel == level) { BT_STK_ENTER(env, cp, h, indx, lock, lock_mode, ret); if (ret != 0) goto err; goto done; } if (LF_ISSET(SR_DEL) && NUM_ENT(h) > 1) { /* * There was a page with a singleton pointer * to a non-empty subtree. */ cp->csp--; if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) goto err; set_stack = stack = 0; goto do_del; } BT_STK_PUSH(env, cp, h, indx, lock, lock_mode, ret); if (ret != 0) goto err; LOCK_INIT(lock); get_mode = DB_MPOOL_DIRTY; lock_mode = DB_LOCK_WRITE; getlock = 1; goto lock_next; } else { /* * Decide if we want to return a reference to the next * page in the return stack. If so, latch it and don't * unlatch it. We will want to stack things on the * next iteration. The stack variable cannot be * set until we leave this clause. If we are locking * then we must lock this level before getting the page. */ if ((LF_ISSET(SR_PARENT) && (u_int8_t)(slevel + 1) >= (level - 1)) || (level - 1) == LEAFLEVEL) set_stack = 1; /* * Check for a normal search. If so, we need to * latch couple the parent/chid buffers. */ if (!LF_ISSET(SR_DEL | SR_NEXT)) { parent_h = h; goto lock_next; } /* * Returning a subtree. See if we have hit the start * point if so save the parent and set stack. * Otherwise free the parent and temporarily * save this one. * For SR_DEL we need to find a page with 1 entry. * For SR_NEXT we want find the minimal subtree * that contains the key and the next page. * We save pages as long as we are at the right * edge of the subtree. When we leave the right * edge, then drop the subtree. */ if ((LF_ISSET(SR_DEL) && NUM_ENT(h) == 1)) { /* * We are pushing the things on the stack, * set the stack variable now to indicate this * has happened. */ stack = set_stack = 1; LF_SET(SR_WRITE); /* Push the parent. */ cp->csp++; /* Push this node. */ BT_STK_PUSH(env, cp, h, indx, lock, DB_LOCK_NG, ret); if (ret != 0) goto err; LOCK_INIT(lock); } else { /* * See if we want to save the tree so far. * If we are looking for the next key, * then we must save this node if we are * at the end of the page. If not then * discard anything we have saved so far. * For delete only keep one node until * we find a singleton. */ do_del: if (cp->csp->page != NULL) { if (LF_ISSET(SR_NEXT) && indx == NUM_ENT(h) - 1) cp->csp++; else if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) goto err; } /* Save this node. */ BT_STK_ENTER(env, cp, h, indx, lock, lock_mode, ret); if (ret != 0) goto err; LOCK_INIT(lock); } lock_next: h = NULL; if (set_stack && LF_ISSET(SR_WRITE)) { lock_mode = DB_LOCK_WRITE; get_mode = DB_MPOOL_DIRTY; getlock = 1; } /* * If we are retrying and we are back at the same * page then we already have it locked. If we are * at a different page we want to lock couple and * release that lock. */ if (level - 1 == saved_level) { if ((ret = __LPUT(dbc, lock)) != 0) goto err; lock = saved_lock; LOCK_INIT(saved_lock); saved_level = MAXBTREELEVEL; if (pg == saved_pg) goto skip_lock; } if ((getlock || level - 1 == LEAFLEVEL) && (ret = __db_lget(dbc, LCK_COUPLE_ALWAYS, pg, lock_mode, wait, &lock)) != 0) { /* * If we are doing DEL or NEXT then we * have an extra level saved in the stack, * push it so it will get freed. */ if (LF_ISSET(SR_DEL | SR_NEXT) && !stack) cp->csp++; PERFMON6(env, race, bam_search, dbp->fname, dbp->dname, ret, h, parent_h, flags); /* * If we fail, discard the lock we held. * This is ok because we will either search * again or exit without actually looking * at the data. */ if ((t_ret = __LPUT(dbc, lock)) != 0) ret = t_ret; /* * If we blocked at a different level release * the previous saved lock. */ if ((t_ret = __LPUT(dbc, saved_lock)) != 0 && ret == 0) ret = t_ret; if (wait == 0 || (ret != DB_LOCK_NOTGRANTED && ret != DB_LOCK_DEADLOCK)) goto err; /* Release the parent if we are holding it. */ if (parent_h != NULL && (ret = __memp_fput(mpf, dbc->thread_info, parent_h, dbc->priority)) != 0) goto err; parent_h = NULL; BT_STK_POP(cp); if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) goto err; if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &saved_lock)) != 0) goto err; /* * A very strange case: if this page was * freed while we wait then we cannot hold * the lock on it while we reget the root * latch because allocation is one place * we lock while holding a latch. * We want to hold the lock but must ensure * that the page is not free or cannot become * free. If we are at the LEAF level we can * hold on to the lock if the page is still * of the right type. Otherwise we need to * be sure this page cannot move to an off page * duplicate tree (which are not locked) and * masquerade as the page we want. */ /* * If the page is not at leaf level * then see if OPD trees are around. * If the page could appear as an * interior offpage duplicate node * at the right level the it will * not be locked and subsequently be * freed. If there are multiple * databases in the file then they * could have OPDs. */ if (level - 1 > LEAFLEVEL && (F_ISSET(dbp, DB_AM_SUBDB) || (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_DUPSORT)))) goto drop_lock; /* * Take a look at the page. If it got * freed it could be very gone. */ if ((ret = __memp_fget(mpf, &pg, dbc->thread_info, dbc->txn, 0, &h)) != 0 && ret != DB_PAGE_NOTFOUND) goto err; /* * Check for right level and page type. */ if (ret != 0 || LEVEL(h) != level - 1 || (LEVEL(h) == LEAFLEVEL ? TYPE(h) != (dbc->dbtype == DB_BTREE ? P_LBTREE : P_LRECNO) : TYPE(h) != (dbc->dbtype == DB_BTREE ? P_IBTREE : P_IRECNO))) { drop_lock: ret = __LPUT(dbc, saved_lock); if (ret != 0) goto err; pg = root_pgno; saved_level = MAXBTREELEVEL; } if (h != NULL && (ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority)) != 0) goto err; h = NULL; if (was_next) { LF_CLR(SR_MIN); LF_SET(SR_NEXT); } /* * We have the lock but we dropped the * latch so we need to search again. If * we get back to the same page then all * is good, otherwise we need to try to * lock the new page. */ saved_pg = pg; saved_level = level - 1; goto retry; } skip_lock: stack = set_stack; } /* Get the child page. */ if ((ret = __memp_fget(mpf, &pg, dbc->thread_info, dbc->txn, get_mode, &h)) != 0) goto err; /* Release the parent. */ if (parent_h != NULL && (ret = __memp_fput(mpf, dbc->thread_info, parent_h, dbc->priority)) != 0) goto err; parent_h = NULL; } /* NOTREACHED */ found: *exactp = 1; /* * If we got here, we know that we have a Btree leaf or off-page * duplicates page. If it's a Btree leaf page, we have to handle * on-page duplicates. * * If there are duplicates, go to the first/last one. This is * safe because we know that we're not going to leave the page, * all duplicate sets that are not on overflow pages exist on a * single leaf page. */ if (TYPE(h) == P_LBTREE && NUM_ENT(h) > P_INDX) { if (LF_ISSET(SR_DUPLAST)) while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) && inp[indx] == inp[indx + P_INDX]) indx += P_INDX; else if (LF_ISSET(SR_DUPFIRST)) while (indx > 0 && inp[indx] == inp[indx - P_INDX]) indx -= P_INDX; } /* * Now check if we are allowed to return deleted items; if not, then * find the next (or previous) non-deleted duplicate entry. (We do * not move from the original found key on the basis of the SR_DELNO * flag.) */ DB_ASSERT(env, recnop == NULL || LF_ISSET(SR_DELNO)); if (LF_ISSET(SR_DELNO)) { deloffset = TYPE(h) == P_LBTREE ? O_INDX : 0; if (LF_ISSET(SR_DUPLAST)) while (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type) && indx > 0 && inp[indx] == inp[indx - adjust]) indx -= adjust; else while (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type) && indx < (db_indx_t)(NUM_ENT(h) - adjust) && inp[indx] == inp[indx + adjust]) indx += adjust; /* * If we weren't able to find a non-deleted duplicate, return * DB_NOTFOUND. */ if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type)) { ret = DB_NOTFOUND; goto err; } /* * Increment the record counter to point to the found element. * Ignore any deleted key/data pairs. There doesn't need to * be any correction for duplicates, as Btree doesn't support * duplicates and record numbers in the same tree. */ if (recnop != NULL) { DB_ASSERT(env, TYPE(h) == P_LBTREE); for (i = 0; i < indx; i += P_INDX) if (!B_DISSET( GET_BKEYDATA(dbp, h, i + O_INDX)->type)) ++recno; /* Correct the number for a 0-base. */ *recnop = recno + 1; } } if (LF_ISSET(SR_STK_ONLY)) { BT_STK_NUM(env, cp, h, indx, ret); if ((t_ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority)) != 0 && ret == 0) ret = t_ret; h = NULL; } else { if (LF_ISSET(SR_DEL) && cp->csp == cp->sp) cp->csp++; BT_STK_ENTER(env, cp, h, indx, lock, lock_mode, ret); } if (ret != 0) goto err; cp->csp->lock = lock; DB_ASSERT(env, parent_h == NULL); done: if (F_ISSET(dbc, DBC_OPD)) LOCK_CHECK_ON(dbc->thread_info); if ((ret = __LPUT(dbc, saved_lock)) != 0) return (ret); return (0); err: if (ret == 0) ret = t_ret; if (h != NULL && (t_ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority)) != 0 && ret == 0) ret = t_ret; if (parent_h != NULL && (t_ret = __memp_fput(mpf, dbc->thread_info, parent_h, dbc->priority)) != 0 && ret == 0) ret = t_ret; /* Keep any not-found page locked for serializability. */ if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0) ret = t_ret; (void)__LPUT(dbc, saved_lock); BT_STK_POP(cp); (void)__bam_stkrel(dbc, 0); if (F_ISSET(dbc, DBC_OPD)) LOCK_CHECK_ON(dbc->thread_info); return (ret); } /* * __bam_stkrel -- * Release all pages currently held in the stack. * * PUBLIC: int __bam_stkrel __P((DBC *, u_int32_t)); */ int __bam_stkrel(dbc, flags) DBC *dbc; u_int32_t flags; { BTREE_CURSOR *cp; DB *dbp; DB_MPOOLFILE *mpf; EPG *epg; int ret, t_ret; DB_ASSERT(NULL, dbc != NULL); dbp = dbc->dbp; mpf = dbp->mpf; cp = (BTREE_CURSOR *)dbc->internal; /* * Release inner pages first. * * The caller must be sure that setting STK_NOLOCK will not effect * either serializability or recoverability. */ for (ret = 0, epg = cp->sp; epg <= cp->csp; ++epg) { if (epg->page != NULL) { if (LF_ISSET(STK_CLRDBC) && cp->page == epg->page) { cp->page = NULL; LOCK_INIT(cp->lock); } if ((t_ret = __memp_fput(mpf, dbc->thread_info, epg->page, dbc->priority)) != 0 && ret == 0) ret = t_ret; epg->page = NULL; } /* * We set this if we need to release our pins, * but are not logically ready to have the pages * visible. */ if (LF_ISSET(STK_PGONLY)) continue; if (LF_ISSET(STK_NOLOCK) && (epg->lock.mode == DB_LOCK_READ || atomic_read(&mpf->mfp->multiversion) == 0)) { if ((t_ret = __LPUT(dbc, epg->lock)) != 0 && ret == 0) ret = t_ret; } else if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) ret = t_ret; } /* Clear the stack, all pages have been released. */ if (!LF_ISSET(STK_PGONLY)) BT_STK_CLR(cp); return (ret); } /* * __bam_stkgrow -- * Grow the stack. * * PUBLIC: int __bam_stkgrow __P((ENV *, BTREE_CURSOR *)); */ int __bam_stkgrow(env, cp) ENV *env; BTREE_CURSOR *cp; { EPG *p; size_t entries; int ret; entries = cp->esp - cp->sp; if ((ret = __os_calloc(env, entries * 2, sizeof(EPG), &p)) != 0) return (ret); memcpy(p, cp->sp, entries * sizeof(EPG)); if (cp->sp != cp->stack) __os_free(env, cp->sp); cp->sp = p; cp->csp = p + entries; cp->esp = p + entries * 2; return (0); }
utf-8
1
unknown
unknown
fceux-2.5.0+dfsg1/src/boards/232.cpp
/* FCE Ultra - NES/Famicom Emulator * * Copyright notice for this file: * Copyright (C) 2012 CaH4e3 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mapinc.h" static uint8 bank, preg; static SFORMAT StateRegs[] = { { &bank, 1, "BANK" }, { &preg, 1, "PREG" }, { 0 } }; static void Sync(void) { // uint32 bbank = (bank & 0x18) >> 1; uint32 bbank = ((bank & 0x10) >> 2) | (bank & 8); // some dumps have bbanks swapped, if swap commands, // then all roms can be played, but with some swapped // games in menu. if not, some dumps are unplayable // make hard dump for both cart types to check setprg16(0x8000, bbank | (preg & 3)); setprg16(0xC000, bbank | 3); setchr8(0); } static DECLFW(M232WriteBank) { bank = V; Sync(); } static DECLFW(M232WritePreg) { preg = V; Sync(); } static void M232Power(void) { bank = preg = 0; Sync(); SetWriteHandler(0x8000, 0xBFFF, M232WriteBank); SetWriteHandler(0xC000, 0xFFFF, M232WritePreg); SetReadHandler(0x8000, 0xFFFF, CartBR); } static void StateRestore(int version) { Sync(); } void Mapper232_Init(CartInfo *info) { info->Power = M232Power; AddExState(&StateRegs, ~0, 0, 0); GameStateRestore = StateRestore; }
utf-8
1
GPL-2+
The FCEUX Team
xfconf-4.16.0/tests/get-properties/t-get-string.c
/* * xfconf * * Copyright (c) 2007 Brian Tarricone <bjt23@cornell.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License ONLY. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "tests-common.h" #ifdef HAVE_STRING_H #include <string.h> #endif int main(int argc, char **argv) { XfconfChannel *channel; if(!xfconf_tests_start()) return 1; channel = xfconf_channel_new(TEST_CHANNEL_NAME); TEST_OPERATION(!strcmp(xfconf_channel_get_string(channel, test_string_property, ""), test_string)); g_object_unref(G_OBJECT(channel)); xfconf_tests_end(); return 0; }
utf-8
1
unknown
unknown
r-cran-openmx-2.20.3+dfsg/src/ComputeNM.cpp
/* * Copyright 2007-2021 by the individuals mentioned in the source code history * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "omxDefines.h" #include "omxState.h" #include "omxFitFunction.h" #include "omxExportBackendState.h" #include "omxNLopt.h" #include "Compute.h" #include "glue.h" #include "ComputeGD.h" #include "ComputeNM.h" #include <Eigen/Core> #include <Eigen/Cholesky> #include <Eigen/Dense> #include <Rmath.h> #include <R_ext/Utils.h> #include "nlopt.h" #include "nlopt-internal.h" #include "EnableWarnings.h" static const char engineName[] = "NldrMd"; class omxCompute *newComputeNelderMead() { return new omxComputeNM(); } omxComputeNM::omxComputeNM() { } void omxComputeNM::initFromFrontend(omxState *globalState, SEXP rObj){ super::initFromFrontend(globalState, rObj); //TODO: use const defined in Rmath.h: const double myPI = 3.141592653589793238462643383280; SEXP slotValue; fitMatrix = omxNewMatrixFromSlot(rObj, globalState, "fitfunction"); omxCompleteFitFunction(fitMatrix); ScopedProtect p1(slotValue, R_do_slot(rObj, Rf_install("verbose"))); verbose = Rf_asInteger(slotValue); if(OMX_DEBUG){ mxLog("omxComputeNM member 'verbose' is %d", verbose); } ScopedProtect p2(slotValue, R_do_slot(rObj, Rf_install("nudgeZeroStarts"))); nudge = Rf_asLogical(slotValue); if(verbose){ mxLog("omxComputeNM member 'nudgeZeroStarts' is %d", nudge); } ScopedProtect p3(slotValue, R_do_slot(rObj, Rf_install("defaultMaxIter"))); defaultMaxIter = Rf_asLogical(slotValue); ScopedProtect p4(slotValue, R_do_slot(rObj, Rf_install("maxIter"))); if(defaultMaxIter){maxIter = Global->majorIterations * 10;} else{maxIter = Rf_asInteger(slotValue);} if(verbose){ mxLog("omxComputeNM member 'maxIter' is %d", maxIter); } ScopedProtect p5(slotValue, R_do_slot(rObj, Rf_install("alpha"))); alpha = Rf_asReal(slotValue); if(alpha<=0){mxThrow("reflection coefficient 'alpha' must be positive");} if(verbose){ mxLog("omxComputeNM member 'alpha' is %f", alpha); } ScopedProtect p6(slotValue, R_do_slot(rObj, Rf_install("betao"))); betao = Rf_asReal(slotValue); if(verbose){ mxLog("omxComputeNM member 'betao' is %f", betao); } ScopedProtect p7(slotValue, R_do_slot(rObj, Rf_install("betai"))); betai = Rf_asReal(slotValue); if(verbose){ mxLog("omxComputeNM member 'betai' is %f", betai); } if(betao<=0 || betao>=1 || betai<=0 || betai>=1){ mxThrow("contraction coefficients 'betao' and 'betai' must both be within unit interval (0,1)"); } ScopedProtect p8(slotValue, R_do_slot(rObj, Rf_install("gamma"))); gamma = Rf_asReal(slotValue); if(gamma>0 && gamma<=alpha){ mxThrow("if positive, expansion coefficient 'gamma' must be greater than reflection coefficient 'alpha'"); } if(verbose){ mxLog("omxComputeNM member 'gamma' is %f", gamma); } ScopedProtect p9(slotValue, R_do_slot(rObj, Rf_install("sigma"))); sigma = Rf_asReal(slotValue); if(sigma>=1){mxThrow("shrink coefficient 'sigma' must be less than 1.0");} if(verbose){ mxLog("omxComputeNM member 'sigma' is %f", sigma); } ScopedProtect p10(slotValue, R_do_slot(rObj, Rf_install("bignum"))); bignum = Rf_asReal(slotValue); if(verbose){ mxLog("omxComputeNM member 'bignum' is %f", bignum); } ScopedProtect p11(slotValue, R_do_slot(rObj, Rf_install("iniSimplexType"))); if(strEQ(CHAR(Rf_asChar(slotValue)),"regular")){iniSimplexType = 1;} else if(strEQ(CHAR(Rf_asChar(slotValue)),"right")){iniSimplexType = 2;} else if(strEQ(CHAR(Rf_asChar(slotValue)),"smartRight")){iniSimplexType = 3;} else if(strEQ(CHAR(Rf_asChar(slotValue)),"random")){iniSimplexType = 4;} else{mxThrow("unrecognized character string provided for Nelder-Mead 'iniSimplexType'");} if(verbose){ mxLog("omxComputeNM member 'iniSimplexType' is %d", iniSimplexType); } ScopedProtect p12(slotValue, R_do_slot(rObj, Rf_install("iniSimplexEdge"))); iniSimplexEdge = Rf_asReal(slotValue); if(verbose){ mxLog("omxComputeNM member 'iniSimplexEdge' is %f", iniSimplexEdge); } ScopedProtect p13(slotValue, R_do_slot(rObj, Rf_install("iniSimplexMat"))); if (Rf_length(slotValue)) { SEXP matrixDims; ScopedProtect pipm(matrixDims, Rf_getAttrib(slotValue, R_DimSymbol)); int *dimList = INTEGER(matrixDims); int rows = dimList[0]; int cols = dimList[1]; iniSimplexMat = Eigen::Map< Eigen::MatrixXd >(REAL(slotValue), rows, cols); } ScopedProtect p26(slotValue, R_do_slot(rObj, Rf_install(".iniSimplexColnames"))); int cnameslen = Rf_length(slotValue); if(cnameslen){ iniSimplexColnames.resize(cnameslen); int i; for(i=0; i<cnameslen; i++){ SEXP elem; { ScopedProtect p27(elem, STRING_ELT(slotValue, i)); iniSimplexColnames[i] = CHAR(elem); } } } ScopedProtect p14(slotValue, R_do_slot(rObj, Rf_install("greedyMinimize"))); greedyMinimize = Rf_asLogical(slotValue); if(verbose){ mxLog("omxComputeNM member 'greedyMinimize' is %d", greedyMinimize); } ScopedProtect p15(slotValue, R_do_slot(rObj, Rf_install("altContraction"))); altContraction = Rf_asLogical(slotValue); if(verbose){ mxLog("omxComputeNM member 'altContraction' is %d", altContraction); } ScopedProtect p16(slotValue, R_do_slot(rObj, Rf_install("degenLimit"))); degenLimit = Rf_asReal(slotValue); if(degenLimit<0 || degenLimit>myPI){mxThrow("'degenLimit' must ge within interval [0,pi]");} if(verbose){ mxLog("omxComputeNM member 'degenLimit' is %f", degenLimit); } ScopedProtect p17(slotValue, R_do_slot(rObj, Rf_install("stagnCtrl"))); if(Rf_length(slotValue)!=2){mxThrow("'stagnCtrl' must be an integer vector of length 2");} stagnCtrl[0] = INTEGER(slotValue)[0]; stagnCtrl[1] = INTEGER(slotValue)[1]; if(verbose){ mxPrintMat("omxComputeNM member 'stagnCtrl':", stagnCtrl); } ScopedProtect p18(slotValue, R_do_slot(rObj, Rf_install("validationRestart"))); validationRestart = Rf_asLogical(slotValue); if(verbose){ mxLog("omxComputeNM member 'validationRestart' is %d", validationRestart); } ScopedProtect p19(slotValue, R_do_slot(rObj, Rf_install("xTolProx"))); xTolProx = Rf_asReal(slotValue); if(verbose){ mxLog("omxComputeNM member 'xTolProx' is %f", xTolProx); } ScopedProtect p20(slotValue, R_do_slot(rObj, Rf_install("fTolProx"))); fTolProx = Rf_asReal(slotValue); if(verbose){ mxLog("omxComputeNM member 'fTolProx' is %f", fTolProx); } //Prevent user blunder w/r/t convergence criteria: if(xTolProx<=0 && fTolProx<=0){ fTolProx = 1e-14; Rf_warning("both 'xTolProx' and 'fTolProx' are non-positive; 'fTolProx' will be assigned a value of 1e-14"); } ScopedProtect p30(slotValue, R_do_slot(rObj, Rf_install("doPseudoHessian"))); doPseudoHessian = Rf_asLogical(slotValue); if(verbose){ mxLog("omxComputeNM member 'doPseudoHessian' is %d", doPseudoHessian); } ScopedProtect p24(slotValue, R_do_slot(rObj, Rf_install("ineqConstraintMthd"))); if(strEQ(CHAR(Rf_asChar(slotValue)),"soft")){ineqConstraintMthd = 0;} else if(strEQ(CHAR(Rf_asChar(slotValue)),"eqMthd")){ineqConstraintMthd = 1;} else{mxThrow("unrecognized character string provided for Nelder-Mead 'ineqConstraintMthd'");} if(verbose){ mxLog("omxComputeNM member 'ineqConstraintMthd' is %d", ineqConstraintMthd); } ScopedProtect p25(slotValue, R_do_slot(rObj, Rf_install("eqConstraintMthd"))); if(strEQ(CHAR(Rf_asChar(slotValue)),"soft")){eqConstraintMthd = 1;} else if(strEQ(CHAR(Rf_asChar(slotValue)),"backtrack")){eqConstraintMthd = 2;} else if(strEQ(CHAR(Rf_asChar(slotValue)),"GDsearch")){eqConstraintMthd = 3;} else if(strEQ(CHAR(Rf_asChar(slotValue)),"l1p")){eqConstraintMthd = 4;} else{mxThrow("unrecognized character string provided for Nelder-Mead 'eqConstraintMthd'");} if(verbose){ mxLog("omxComputeNM member 'eqConstraintMthd' is %d", eqConstraintMthd); } ScopedProtect p28(slotValue, R_do_slot(rObj, Rf_install("backtrackCtrl1"))); backtrackCtrl1 = Rf_asReal(slotValue); if(verbose){ mxLog("omxComputeNM member 'backtrackCtrl1' is %f", backtrackCtrl1); } ScopedProtect p29(slotValue, R_do_slot(rObj, Rf_install("backtrackCtrl2"))); backtrackCtrl2 = Rf_asInteger(slotValue); if(verbose){ mxLog("omxComputeNM member 'backtrackCtrl2' is %d", backtrackCtrl2); } ScopedProtect p31(slotValue, R_do_slot(rObj, Rf_install("centerIniSimplex"))); centerIniSimplex = Rf_asLogical(slotValue); if(verbose){ mxLog("omxComputeNM member 'centerIniSimplex' is %d", centerIniSimplex); } feasTol = Global->feasibilityTolerance; } void omxComputeNM::computeImpl(FitContext *fc) { omxAlgebraPreeval(fitMatrix, fc); fc->ensureParamWithinBox(nudge); fc->createChildren(fitMatrix, true); NelderMeadOptimizerContext nmoc(fc, this); if (nmoc.numFree <= 0) { complainNoFreeParam(); return; } nmoc.verbose = verbose; nmoc.maxIter = maxIter; nmoc.iniSimplexType = iniSimplexType; nmoc.iniSimplexEdge = iniSimplexEdge; nmoc.centerIniSimplex = centerIniSimplex; nmoc.fit2beat = R_PosInf; nmoc.bignum = bignum; nmoc.iniSimplexMat = iniSimplexMat; nmoc.ineqConstraintMthd = ineqConstraintMthd; nmoc.eqConstraintMthd = eqConstraintMthd; nmoc.countConstraintsAndSetupBounds(); if(nmoc.eqConstraintMthd==4 && (nmoc.EqC.getCount() || (nmoc.ineqConstraintMthd && nmoc.IneqC.getCount()))){ if(verbose){mxLog("starting l1-penalty algorithm");} fc->iterations = 0; //<--Not sure about this nmoc.maxIter = maxIter/10; nmoc.addPenalty = true; int k; for(k=0; k<=10; k++){ if(verbose){mxLog("l1p iteration %d",k);} if(k>0){ if(nmoc.iniSimplexMat.rows() || nmoc.iniSimplexMat.cols()){nmoc.iniSimplexMat.resize(0,0);} if(nmoc.statuscode==10){break;} if( !nmoc.estInfeas && nmoc.statuscode==0 ){ if(verbose){mxLog("l1p solution found");} break; } if(nmoc.estInfeas){ nmoc.rho *= 10.0; if(verbose){mxLog("penalty factor rho = %f",nmoc.rho);} nmoc.iniSimplexEdge = iniSimplexEdge; } else{ //It's making progress w/r/t the constraints, so re-initialize the simplex with a small edge: nmoc.iniSimplexEdge = sqrt((nmoc.vertices[nmoc.n] - nmoc.vertices[0]).dot(nmoc.vertices[nmoc.n] - nmoc.vertices[0])); //It's a good idea to reduce the penalty coefficient if the algorithm is making progress. //That helps prevent it from stopping at a non-optimal point: nmoc.rho /= 5.0; if(verbose){mxLog("penalty factor rho = %f",nmoc.rho);} } if(fc->iterations >= maxIter){ nmoc.statuscode = 4; if(verbose){mxLog("l1p algorithm ended with status 4");} break; } } nmoc.invokeNelderMead(); fc->iterations += nmoc.itersElapsed; if(verbose){mxLog("total Nelder-Mead iterations elapsed: %d",fc->iterations);} } } else{ nmoc.invokeNelderMead(); fc->iterations = nmoc.itersElapsed; } if(validationRestart && nmoc.statuscode==0){ NelderMeadOptimizerContext nmoc2(fc, this); nmoc2.verbose = verbose; nmoc2.maxIter = 2 * nmoc.n; nmoc2.iniSimplexType = 1; nmoc2.iniSimplexEdge = sqrt((nmoc.vertices[nmoc.n] - nmoc.vertices[0]).dot(nmoc.vertices[nmoc.n] - nmoc.vertices[0])); nmoc2.centerIniSimplex = true; nmoc2.fit2beat = nmoc.bestfit; nmoc2.bignum = nmoc.bignum; nmoc2.est = nmoc.est; nmoc2.rho = nmoc.rho; nmoc2.addPenalty = nmoc.addPenalty; nmoc2.eqConstraintMthd = nmoc.eqConstraintMthd; nmoc2.ineqConstraintMthd = nmoc.ineqConstraintMthd; nmoc2.countConstraintsAndSetupBounds(); nmoc2.invokeNelderMead(); if(nmoc2.statuscode==10){ fc->resetIterationError(); } if(nmoc2.bestfit < nmoc.bestfit && (nmoc2.statuscode==0 || nmoc2.statuscode==4)){ nmoc.bestfit = nmoc2.bestfit; nmoc.est = nmoc2.est; nmoc.estInfeas = nmoc2.estInfeas; if(nmoc2.statuscode==0){ nmoc.fvals = nmoc2.fvals; nmoc.vertices = nmoc2.vertices; nmoc.vertexInfeas = nmoc2.vertexInfeas; nmoc.subcentroid = nmoc2.subcentroid; nmoc.eucentroidPrev = nmoc2.eucentroidPrev; nmoc.equality = nmoc2.equality; nmoc.inequality = nmoc2.inequality; } else if(Global->timedOut){ //i.e., if time ran out during the validation restart nmoc.statuscode = 4; } } //Not sure about this: fc->iterations += nmoc2.itersElapsed; } if(doPseudoHessian && (nmoc.statuscode==0 || nmoc.statuscode==4) && !nmoc.vertexInfeas.sum() && !nmoc.EqC.getCount() && !nmoc.addPenalty){ nmoc.calculatePseudoHessian(); } if(nmoc.estInfeas && nmoc.statuscode!=10){nmoc.statuscode = 3;} switch(nmoc.statuscode){ case -1: mxThrow("unknown Nelder-Mead optimizer error"); break; case 0: fc->setInform(INFORM_CONVERGED_OPTIMUM); break; case 3: fc->setInform(INFORM_NONLINEAR_CONSTRAINTS_INFEASIBLE); break; case 4: fc->setInform(INFORM_ITERATION_LIMIT); break; case 10: fc->setInform(INFORM_STARTING_VALUES_INFEASIBLE); break; } size_t i=0; Eigen::VectorXd xdiffs(nmoc.n); Eigen::MatrixXd fdiffs(nmoc.n,1); Eigen::MatrixXd Q(nmoc.n, nmoc.n); verticesOut.resize(nmoc.vertices.size(), nmoc.vertices[0].size()); for(i=0; i < nmoc.vertices.size(); i++){ verticesOut.row(i) = nmoc.vertices[i]; } fvalsOut = nmoc.fvals; vertexInfeasOut = nmoc.vertexInfeas; for(i=0; i < size_t(nmoc.n); i++){ fdiffs(i,0) = fabs(fvalsOut[i+1] - fvalsOut[0]); } fproxOut = fdiffs.array().maxCoeff(); for(i=0; i < size_t(nmoc.n); i++){ if(!nmoc.EqC.getCount()){ Q.col(i) = verticesOut.row(i+1) - verticesOut.row(0); xdiffs[i] = (Q.col(i)).array().abs().maxCoeff(); } else{ xdiffs[i] = (verticesOut.row(i+1) - verticesOut.row(0)).array().abs().maxCoeff(); } } xproxOut = xdiffs.array().maxCoeff(); if(!nmoc.vertexInfeas.sum() && !nmoc.EqC.getCount() && !nmoc.addPenalty){ Eigen::FullPivLU< Eigen::MatrixXd > luq(Q); if(luq.isInvertible()){ Eigen::MatrixXd Qinv(nmoc.n, nmoc.n); Qinv = luq.inverse(); //This is the "simplex gradient" of Kelley (1999): simplexGradient = Qinv.transpose() * fdiffs; if(verbose){mxPrintMat("simplex gradient: ",simplexGradient);} } } nmoc.finalize(); fc->wanted |= FF_COMPUTE_BESTFIT; return; } void omxComputeNM::reportResults(FitContext *fc, MxRList *slots, MxRList *out){ omxPopulateFitFunction(fitMatrix, out); MxRList output; SEXP pn, cv, vrt, fv, vinf, fpm, xpm, phess, sg, bf; size_t i=0; if( fc->varGroup->vars.size() ){ Rf_protect(pn = Rf_allocVector( STRSXP, fc->varGroup->vars.size() )); for(i=0; i < fc->varGroup->vars.size(); i++){ SET_STRING_ELT( pn, i, Rf_mkChar(fc->varGroup->vars[i]->name) ); } output.add("paramNames", pn); } fc->state->reportConstraints(output); if( fc->constraintFunVals.size() ){ Rf_protect(cv = Rf_allocVector( REALSXP, fc->constraintFunVals.size() )); memcpy( REAL(cv), fc->constraintFunVals.data(), sizeof(double) * fc->constraintFunVals.size() ); output.add("constraintFunctionValues", cv); } if( verticesOut.rows() && verticesOut.cols() ){ Rf_protect(vrt = Rf_allocMatrix( REALSXP, verticesOut.rows(), verticesOut.cols() )); memcpy( REAL(vrt), verticesOut.data(), sizeof(double) * verticesOut.rows() * verticesOut.cols() ); output.add("finalSimplexMat", vrt); } if( fvalsOut.size() ){ Rf_protect(fv = Rf_allocVector( REALSXP, fvalsOut.size() )); memcpy( REAL(fv), fvalsOut.data(), sizeof(double) * fvalsOut.size() ); output.add("finalFitValues", fv); } if( vertexInfeasOut.size() ){ Rf_protect(vinf = Rf_allocVector( INTSXP, vertexInfeasOut.size() )); memcpy( INTEGER(vinf), vertexInfeasOut.data(), sizeof(int) * vertexInfeasOut.size() ); output.add("finalVertexInfeas", vinf); } if( pseudohess.rows() && pseudohess.cols() ){ Rf_protect(phess = Rf_allocMatrix( REALSXP, pseudohess.rows(), pseudohess.cols() )); memcpy( REAL(phess), pseudohess.data(), sizeof(double) * pseudohess.rows() * pseudohess.cols() ); output.add("pseudoHessian", phess); } if( simplexGradient.rows() && simplexGradient.cols() ){ Rf_protect(sg = Rf_allocVector( REALSXP, simplexGradient.rows() )); memcpy( REAL(sg), simplexGradient.data(), sizeof(double) * simplexGradient.rows() ); output.add("simplexGradient", sg); } Rf_protect(fpm = Rf_allocVector(REALSXP, 1)); //it would also work to do 'REAL(fpm)[0] = fproxOut;': memcpy( REAL(fpm), &fproxOut, sizeof(double) ); output.add("rangeProximityMeasure", fpm); Rf_protect(xpm = Rf_allocVector(REALSXP, 1)); memcpy( REAL(xpm), &xproxOut, sizeof(double) ); output.add("domainProximityMeasure", xpm); Rf_protect(bf = Rf_allocVector(REALSXP, 1)); memcpy( REAL(bf), &bestfitOut, sizeof(double) ); output.add("penalizedFit", bf); slots->add("output", output.asR()); } //------------------------------------------------------- NelderMeadOptimizerContext::NelderMeadOptimizerContext(FitContext* u_fc, omxComputeNM* u_nmo) : fc(u_fc), NMobj(u_nmo), numFree(u_fc->getNumFree()), IneqC(u_fc, "ineq", [](const omxConstraint &con){ return con.opCode != omxConstraint::EQUALITY; }), EqC(u_fc, "eq", [](const omxConstraint &con){ return con.opCode == omxConstraint::EQUALITY; }), subsidiarygoc(u_fc, 0L, u_nmo) { est.resize(numFree); copyParamsFromFitContext(est.data()); statuscode = -1; addPenalty = false; rho = 1; } void NelderMeadOptimizerContext::copyBounds() { fc->copyBoxConstraintToOptimizer(solLB, solUB); } void NelderMeadOptimizerContext::countConstraintsAndSetupBounds() { solLB.resize(numFree); solUB.resize(numFree); copyBounds(); int numEqC = EqC.getCount(); int numIneqC = IneqC.getCount(); if(verbose){ mxLog("counted %d equality constraints",numEqC); mxLog("counted %d inequality constraints",numIneqC); } //If there aren't any of one of the two constraint types, then the //method for handling them shouldn't matter. But, switching the //method to the simplest setting helps simplify programming logic: if(!numEqC && !ineqConstraintMthd){eqConstraintMthd = 1;} if(!numIneqC){ineqConstraintMthd = 0;} equality.resize(numEqC); inequality.resize(numIneqC); if(numEqC + numIneqC || eqConstraintMthd==3){ subsidiarygoc.setEngineName("SLSQP"); subsidiarygoc.ControlTolerance = 2 * Global->optimalityTolerance; subsidiarygoc.maxMajorIterations = Global->majorIterations; subsidiarygoc.setupSimpleBounds(); //mxThrow("so far, so good"); } } void NelderMeadOptimizerContext::copyParamsFromFitContext(double *ocpars) { Eigen::Map<Eigen::VectorXd> vec(ocpars, numFree); fc->copyEstToOptimizer(vec); } //---------------------------------------------------------------------- void NelderMeadOptimizerContext::enforceBounds(Eigen::VectorXd &x){ int i=0; for(i=0; i < x.size(); i++){ if(x[i] < solLB[i]){x[i] = solLB[i];} if(x[i] > solUB[i]){x[i] = solUB[i];} } } bool NelderMeadOptimizerContext::checkBounds(Eigen::VectorXd &x){ bool retval=true; int i=0; for(i=0; i < x.size(); i++){ if(x[i] < solLB[i] && x[i] > solUB[i]){ retval=false; break; } } return(retval); } void NelderMeadOptimizerContext::evalIneqC() { if (!IneqC.getCount()) return; IneqC.eval(fc, inequality.data()); if (NMobj->verbose >= 3) { mxPrintMat("inequality", inequality); } } void NelderMeadOptimizerContext::evalEqC() { if(!EqC.getCount()) return; EqC.eval(fc, equality.data()); if (NMobj->verbose >= 3) { mxPrintMat("equality", equality); } } double NelderMeadOptimizerContext::evalFit(Eigen::VectorXd &x) { copyParamsFromOptimizer(x,fc); ComputeFit(engineName, NMobj->fitMatrix, FF_COMPUTE_FIT, fc); if( fc->outsideFeasibleSet() ){ return(bignum); } else{ double fv = fc->getUnscaledFit(); if(fv > bignum){bignum = 10 * fv;} if(eqConstraintMthd==4 && addPenalty){ int i; for(i=0; i < equality.size(); i++){ fv += rho * fabs(equality[i]); } if(ineqConstraintMthd){ for(i=0; i < inequality.size(); i++){ fv += rho * fabs(inequality[i]); } } } return(fv); } } void NelderMeadOptimizerContext::checkNewPointInfeas(Eigen::VectorXd &x, Eigen::Vector2i &ifcr) { int i=0; double feasTol = NMobj->feasTol; ifcr.setZero(2); int numEqC = EqC.getCount(); int numIneqC = IneqC.getCount(); if(!numIneqC && !numEqC){return;} copyParamsFromOptimizer(x,fc); evalIneqC(); evalEqC(); if(numIneqC){ for(i=0; i < inequality.size(); i++){ if(inequality[i] > feasTol){ ifcr[0] = 1; break; } } } if(numEqC){ for(i=0; i < equality.size(); i++){ if(fabs(equality[i]) > feasTol){ ifcr[1] = 1; break; } } } } void NelderMeadOptimizerContext::evalFirstPoint(Eigen::VectorXd &x, double &fv, int &infeas) { Eigen::Vector2i ifcr; enforceBounds(x); checkNewPointInfeas(x, ifcr); if(!ifcr.sum()){ infeas = 0L; fv = evalFit(x); if(fv==bignum){infeas=1L;} return; } else if(ifcr[1] || (ifcr[0] && ineqConstraintMthd)){ switch(eqConstraintMthd){ case 1: infeas = 1L; fv = bignum; return; case 2: //Can't backtrack to someplace else if it's the very first point. infeas = 1L; fv = bignum; break; case 3: gdfsIter = 0; tentativpt = x; if (NMobj->verbose >= 3) { mxPrintMat("tentative point", tentativpt); } omxInvokeSLSQPfromNelderMead(this, x); if (NMobj->verbose >= 3) { mxPrintMat("replacement point", x); } checkNewPointInfeas(x, ifcr); if(!ifcr.sum()){ infeas = 0L; fv = evalFit(x); if(fv==bignum){infeas=1L;} return; } else{ fv = bignum; infeas = 1L; return; } //mxThrow("'GDsearch' Not Yet Implemented"); case 4: fv = evalFit(x); infeas = 1L; return; } } else if(ifcr[0]){ fv = bignum; infeas = 1L; return; } } //oldpt is used for backtracking: void NelderMeadOptimizerContext::evalNewPoint(Eigen::VectorXd &newpt, Eigen::VectorXd oldpt, double &fv, int &newInfeas, int oldInfeas) { Eigen::Vector2i ifcr; enforceBounds(newpt); checkNewPointInfeas(newpt, ifcr); if(!ifcr.sum()){ newInfeas = 0L; fv = (evalFit(newpt)); if(fv==bignum){newInfeas=1L;} return; } else if(ifcr[1] || (ifcr[0] && ineqConstraintMthd)){ switch(eqConstraintMthd){ case 1: newInfeas = 1L; fv = bignum; return; case 2: //If old point is not feasible, there's no sense in backtracking toward it: if(oldInfeas){ newInfeas = 1L; fv = bignum; return; } else{ int i; for(i=1; i <= NMobj->backtrackCtrl2; i++){ ifcr.setZero(); newpt = oldpt + NMobj->backtrackCtrl1*(newpt - oldpt); enforceBounds(newpt); checkNewPointInfeas(newpt, ifcr); if(!ifcr.sum()){ newInfeas = 0L; fv = evalFit(newpt); if(fv==bignum){continue;} return; } } fv = bignum; newInfeas = 1L; return; } case 3: gdfsIter = 0; tentativpt = newpt; if (NMobj->verbose >= 3) { mxPrintMat("tentative point", tentativpt); } omxInvokeSLSQPfromNelderMead(this, newpt); if (NMobj->verbose >= 3) { mxPrintMat("replacement point", newpt); } checkNewPointInfeas(newpt, ifcr); if(!ifcr.sum()){ newInfeas = 0L; fv = evalFit(newpt); if(fv==bignum){newInfeas=1L;} return; } else{ fv = bignum; newInfeas = 1L; return; } //mxThrow("'GDsearch' Not Yet Implemented"); case 4: fv = evalFit(newpt); newInfeas = 1L; return; } } else if(ifcr[0]){ fv = bignum; newInfeas = 1L; return; } } void NelderMeadOptimizerContext::jiggleCoord(Eigen::VectorXd &xin, Eigen::VectorXd &xout, double scal){ double a,b; int i; BorrowRNGState grs; for(i=0; i < xin.size(); i++){ b = Rf_runif(1.0-scal,1.0+scal); a = Rf_runif(0.0-scal,0.0+scal); xout[i] = b*xin[i] + a; } } //TODO: make the different parts of the printing subject to different verbose levels void NelderMeadOptimizerContext::printProblemState() { int i=0; Eigen::MatrixXd tmpvrt(n+1,numFree); for(i=0; i<n+1; i++){tmpvrt.row(i) = vertices[i];} mxPrintMat("working simplex:",tmpvrt); mxPrintMat("fitfunction values:",fvals); mxPrintMat("infeasibility states:",vertexInfeas); } void NelderMeadOptimizerContext::printNewPoint(Eigen::VectorXd &x, double fv, int isbad) { mxPrintMat("coordinates:",x); mxLog("fitfunction value: %f",fv); mxLog("infeasible?: %d",isbad); } //Want to pass startpt as value, not reference: void NelderMeadOptimizerContext::initializeSimplex(Eigen::VectorXd startpt, double edgeLength, bool isRestart) { if(verbose){mxLog("(re-)initializing simplex");} int i=0; Eigen::VectorXd xin, xout, newpt, oldpt; if(iniSimplexMat.rows() && iniSimplexMat.cols() && !isRestart){ Eigen::MatrixXd SiniSupp, iniSimplexMat2; Eigen::VectorXi paramMap(numFree); if(iniSimplexMat.cols() != numFree){ mxThrow("'iniSimplexMat' has %d columns, but %d columns expected",iniSimplexMat.cols(), numFree); } if( int(NMobj->iniSimplexColnames.size()) != numFree){ mxThrow("'iniSimplexMat' has %d column names, but %d column names expected", int(NMobj->iniSimplexColnames.size()), numFree); } if(iniSimplexMat.rows()>n+1){ Rf_warning("'iniSimplexMat' has %d rows, but %d rows expected; extraneous rows will be ignored",iniSimplexMat.rows(), n+1); iniSimplexMat.conservativeResize(n+1,numFree); } iniSimplexMat2.resize(iniSimplexMat.rows(), numFree); int gx=0; /*If there are no problems, then every time vx gets incremented, it should become equal to the current value of gx*/ for (int vx=0; vx < int(fc->varGroup->vars.size()); ++vx) { for (int nx=0; nx < int(NMobj->iniSimplexColnames.size()); ++nx) { if (strEQ(NMobj->iniSimplexColnames[nx], fc->varGroup->vars[vx]->name)) { paramMap[gx] = vx; ++gx; break; } } } if ( gx != int(NMobj->iniSimplexColnames.size()) ){ mxThrow("error in mapping column names of 'iniSimplexMat' to free-parameter labels"); } for(i=0; i < iniSimplexMat.cols(); i++){ iniSimplexMat2.col(paramMap[i]) = iniSimplexMat.col(i); } if(iniSimplexMat.rows()<n+1){ Rf_warning("'iniSimplexMat' has %d rows, but %d rows expected; omitted rows will be generated randomly",iniSimplexMat.rows(),n+1); SiniSupp.resize(n + 1 - iniSimplexMat.rows(), numFree); xin=iniSimplexMat2.row(0); for(i=0; i<SiniSupp.rows(); i++){ xout=SiniSupp.row(i); jiggleCoord(xin, xout, edgeLength/4.0); SiniSupp.row(i) = xout; } } for(i=0; i < iniSimplexMat.rows(); i++){ vertices[i] = iniSimplexMat2.row(i); } if(SiniSupp.rows()){ for(i=0; i<SiniSupp.rows(); i++){ vertices[i+iniSimplexMat.rows()] = SiniSupp.row(i); } } } else{ double k = (double) n; double shhp = edgeLength*(1.0/k/sqrt(2.0))*(-1.0 + k + sqrt(1.0+k)); double shhq = edgeLength*(1.0/k/sqrt(2.0))*(sqrt(1.0+k)-1.0); Eigen::VectorXd xu, xd; double fu=0, fd=0; int badu=0, badd=0; switch(iniSimplexType){ case 1: vertices[0] = startpt; if(n==numFree){ for(i=1; i<n+1; i++){ vertices[i].setConstant(numFree,shhq); vertices[i][i-1] = shhp; vertices[i] += startpt; } } else{ for(i=1; i<n+1; i++){ vertices[i].setConstant(numFree,shhq); vertices[i] += startpt; } int j=1; for(i=0; i<numFree; i++){ vertices[j%(n+1)][i] += (shhp - shhq); j++; if(j==n+1){j = 1;} } } break; case 2: vertices[0] = startpt; if(n==numFree){ for(i=1; i<n+1; i++){ vertices[i] = startpt; vertices[i][i-1] += edgeLength; } } else{ for(i=1; i<n+1; i++){ vertices[i] = startpt; } int j=1; for(i=0; i<numFree; i++){ vertices[j%(n+1)][i] += edgeLength; j++; if(j==n+1){j = 1;} } } break; case 3: //TODO: this could be even smarter if it also figured out different edge lengths //to account for different scaling of the free parameters: if(n==numFree){ vertices[0] = startpt; evalFirstPoint(vertices[0], fvals[0], vertexInfeas[0]); for(i=0; i<n; i++){ xu = vertices[0]; xu[i] += edgeLength; xd = vertices[0]; xd[i] -= edgeLength; evalNewPoint(xu, vertices[0], fu, badu, vertexInfeas[0]); evalNewPoint(xd, vertices[0], fd, badd, vertexInfeas[0]); vertices[i+1] = fu<fd ? xu : xd; fvals[i+1] = fu<fd ? fu : fd; vertexInfeas[i+1] = fu<fd ? badu : badd; } if(verbose){printProblemState();} return; } else{ vertices[0] = startpt; evalFirstPoint(vertices[0], fvals[0], vertexInfeas[0]); for(i=1; i<n+1; i++){ vertices[i] = startpt; } int j=1; for(i=0; i<numFree; i++){ xu = vertices[j%(n+1)]; xu[i] += edgeLength; xd = vertices[j%(n+1)]; xd[i] -= edgeLength; evalNewPoint(xu, vertices[0], fu, badu, vertexInfeas[0]); evalNewPoint(xd, vertices[0], fd, badd, vertexInfeas[0]); vertices[j%(n+1)] = fu<fd ? xu : xd; fvals[j%(n+1)] = fu<fd ? fu : fd; vertexInfeas[j%(n+1)] = fu<fd ? badu : badd; j++; if(j==n+1){j = 1;} } if(verbose){printProblemState();} return; } case 4: vertices[0] = startpt; for(i=1; i<n+1; i++){ vertices[i].setZero(numFree); jiggleCoord(vertices[0],vertices[i],edgeLength/4.0); } break; } if(centerIniSimplex && !isRestart){ eucentroidCurr.setZero(numFree); for(i=0; i<n+1; i++){ eucentroidCurr += vertices[i] / (n+1.0); } for(i=0; i<n+1; i++){ vertices[i] += startpt - eucentroidCurr; } } } //Now evaluate each vertex: evalFirstPoint(vertices[0], fvals[0], vertexInfeas[0]); for(i=1; i<n+1; i++){ evalNewPoint(vertices[i], vertices[0], fvals[i], vertexInfeas[i], vertexInfeas[0]); } } void NelderMeadOptimizerContext::fullSort() { int i=0; Eigen::VectorXi ind(n+1); for(i=0; i<=n; i++){ ind[i] = i; } Eigen::VectorXi tmpVertexInfeas = vertexInfeas; std::vector<Eigen::VectorXd> tmpVertices = vertices; //If we don't care about tie-breaking rules: if( (fvals.tail(n).array() < fvals[0]).any() ){ unchangedx0count = 0; rsort_with_index(fvals.data(), ind.data(), n+1); for(i=0; i<n+1; i++){ vertices[i] = tmpVertices[ind[i]]; vertexInfeas[i] = tmpVertexInfeas[ind[i]]; } } else{ unchangedx0count++; Eigen::VectorXi ind_tail = ind.tail(n); Eigen::VectorXd fvals_tail = fvals.tail(n); rsort_with_index(fvals_tail.data(), ind_tail.data(), n); for(i=1; i<n+1; i++){ fvals[i] = fvals_tail[i-1]; vertices[i] = tmpVertices[ind_tail[i-1]]; vertexInfeas[i] = tmpVertexInfeas[ind_tail[i-1]]; } } //Calculate centroids: subcentroid.setZero(numFree); eucentroidCurr.setZero(numFree); for(i=0; i<n+1; i++){ eucentroidCurr += vertices[i] / (n+1); if(i<n){subcentroid += vertices[i] / n;} } Eigen::Vector2i scfcr; scfcr.setZero(); checkNewPointInfeas(subcentroid, scfcr); badsc = (scfcr.sum()) ? 1 : 0; needFullSort = false; if(verbose){ mxLog("full sort complete..."); printProblemState(); } return; } void NelderMeadOptimizerContext::fastSort() { int i=0, j; Eigen::VectorXi tmpVertexInfeas = vertexInfeas; std::vector<Eigen::VectorXd> tmpVertices = vertices; Eigen::VectorXd tmpFvals = fvals; if(tmpFvals[n]<tmpFvals[0]){ unchangedx0count = 0; fvals[0] = tmpFvals[n]; vertices[0] = tmpVertices[n]; vertexInfeas[0] = tmpVertexInfeas[n]; for(i=1; i<=n; i++){ fvals[i] = tmpFvals[i-1]; vertices[i] = tmpVertices[i-1]; vertexInfeas[i] = tmpVertexInfeas[i-1]; } } else{ unchangedx0count++; for(i=n-1; i>=0; i--){ if(tmpFvals[i] > tmpFvals[n]){ fvals[i+1] = tmpFvals[i]; vertices[i+1] = tmpVertices[i]; vertexInfeas[i+1] = tmpVertexInfeas[i]; } else{ fvals[i+1] = tmpFvals[n]; vertices[i+1] = tmpVertices[n]; vertexInfeas[i+1] = tmpVertexInfeas[n]; break; } } for(j=i; j>=0; j--){ fvals[j] = tmpFvals[j]; vertices[j] = tmpVertices[j]; vertexInfeas[j] = tmpVertexInfeas[j]; } } //TODO: this could be made faster, since we do fastSort() when only one vertex of the simplex has changed: subcentroid.setZero(numFree); eucentroidCurr.setZero(numFree); for(i=0; i<n+1; i++){ eucentroidCurr += vertices[i] / (n+1); if(i<n){subcentroid += vertices[i] / n;} } Eigen::Vector2i scfcr; scfcr.setZero(); checkNewPointInfeas(subcentroid, scfcr); badsc = (scfcr.sum()) ? 1 : 0; if(verbose){ mxLog("fast sort complete..."); printProblemState(); } return; } void NelderMeadOptimizerContext::simplexTransformation() { failedContraction = false; oldWorstVertex = vertices[n]; //Reflection transformation: xr = subcentroid + NMobj->alpha*(subcentroid - vertices[n]); evalNewPoint(xr, subcentroid, fr, badr, badsc); if(verbose){ mxLog("reflection point..."); printNewPoint(xr, fr, badr); } if(fr<fvals[n-1]){ //<--If fit at reflection point is better than second worst fit //If fit at reflection point is worse than best fit, or expansions are turned off, accept reflection point: if(fr>=fvals[0] || NMobj->gamma<0){ fvals[n] = fr; vertices[n] = xr; vertexInfeas[n] = badr; needFullSort=false; if(verbose){mxLog("reflection point accepted");} return; } else{ //<--If fit at reflection point is better than best fit and expansions are turned on //Expansion transformation: xe = subcentroid + NMobj->gamma*(xr - subcentroid); evalNewPoint(xe, xr, fe, bade, badr); if(verbose){ mxLog("expansion point..."); printNewPoint(xe, fe, bade); } if(NMobj->greedyMinimize){ //<--If using greedy minimization //Accept the better of the reflection and expansion points: fvals[n] = (fr<fe) ? fr : fe; vertices[n] = (fr<fe) ? xr : xe; vertexInfeas[n] = (fr<fe) ? badr : bade; needFullSort=false; if(verbose){ if(fr<fe){mxLog("reflection point accepted");} else{mxLog("expansion point accepted");} } return; } else{ //<--If using greedy expansion //Accept expansion point unless reflection point is strictly better: fvals[n] = (fe<fvals[0]) ? fe : fr; vertices[n] = (fe<fvals[0]) ? xe : xr; vertexInfeas[n] = (fe<fvals[0]) ? bade : badr; needFullSort=false; if(verbose){ if(fe<fvals[0]){mxLog("expansion point accepted");} else{mxLog("reflection point accepted");} } return; } } } else{ if(fr<fvals[n]){ //<--If fit at reflection point is at least better than the worst fit //Outside-contraction transformation: if(!NMobj->altContraction){ xoc = subcentroid + NMobj->betao*(xr - subcentroid); evalNewPoint(xoc, subcentroid, foc, badoc, badsc); } else{ xoc = vertices[0] + NMobj->betao*(xr - vertices[0]); evalNewPoint(xoc, vertices[0], foc, badoc, vertexInfeas[0]); } if(verbose){ mxLog("outside contraction point..."); printNewPoint(xoc, foc, badoc); } if(foc<=fr){ //<--If fit at xoc is no worse than fit at reflection point //Accept xoc: fvals[n] = foc; vertices[n] = xoc; vertexInfeas[n] = badoc; needFullSort=false; if(verbose){mxLog("outside contraction point accepted");} return; } else if(NMobj->sigma<=0){ //<--If fit at xoc is worse than fit at reflection point, and shrinks are turned off //This case is considered a failed contraction: failedContraction = true; if(verbose){mxLog("outside contraction failed and shrinks are switched off...");} return; } } else{ //<--If fit at reflection point is no better than worst fit //Inside-contraction transformation: if(!NMobj->altContraction){ xic = subcentroid + NMobj->betai*(vertices[n] - subcentroid); evalNewPoint(xic, subcentroid, fic, badic, badsc); } else{ xic = vertices[0] + NMobj->betai*(vertices[n] - vertices[0]); evalNewPoint(xic, vertices[0], fic, badic, vertexInfeas[0]); } if(verbose){ mxLog("inside contraction point..."); printNewPoint(xic, fic, badic); } if(fic<fvals[n]){ //<--If fit at xic is better than worst fit //Accept xic: fvals[n] = fic; vertices[n] = xic; vertexInfeas[n] = badic; needFullSort=false; if(verbose){mxLog("inside contraction point accepted");} return; } else if(NMobj->sigma<=0){ failedContraction = true; if(verbose){mxLog("inside contraction failed and shrinks are switched off...");} return; } } //Shrink transformation: if(NMobj->sigma>0){ int i=0; std::vector<Eigen::VectorXd> tmpVertices = vertices; Eigen::VectorXi tmpVertexInfeas = vertexInfeas; for(i=1; i<n+1; i++){ vertices[i] = vertices[0] + NMobj->sigma*(vertices[i] - vertices[0]); evalNewPoint(vertices[i], tmpVertices[i], fvals[i], vertexInfeas[i], tmpVertexInfeas[i]); } needFullSort=true; if(verbose){mxLog("shrink transformation complete");} return; } } } bool NelderMeadOptimizerContext::checkConvergence(){ int i=0; Eigen::VectorXd xdiffs(n); Eigen::VectorXd fdiffs(n); double fprox, xprox; //Range-convergence test: if(NMobj->fTolProx > 0){ for(i=0; i<n; i++){ fdiffs[i] = fabs(fvals[i+1] - fvals[0]); } fprox = fdiffs.array().maxCoeff(); if(verbose){mxLog("range proximity measure: %f",fprox);} if(fprox < NMobj->fTolProx && fvals[0] < fit2beat){ statuscode = 0; return(true); } } //Domain-convergence test: if(NMobj->fTolProx > 0){ for(i=0; i<n; i++){ xdiffs[i] = (vertices[i+1] - vertices[0]).array().abs().maxCoeff(); } xprox = xdiffs.array().maxCoeff(); if(verbose){mxLog("domain proximity measure: %f",xprox);} if(xprox < NMobj->xTolProx && fvals[0] < fit2beat){ statuscode = 0; return(true); } } if(itersElapsed >= maxIter || isErrorRaised()){ statuscode = 4; return(true); } return(false); } bool NelderMeadOptimizerContext::checkProgress(){ //TODO: use const defined in Rmath.h: const double myPI = 3.141592653589793238462643383280; Eigen::VectorXd d1, d2; double t; int i, j, k; if(failedContraction && NMobj->sigma<=0){ return(true); } if(NMobj->stagnCtrl[0]>0 && NMobj->stagnCtrl[1]>0 && unchangedx0count>=NMobj->stagnCtrl[0] && NMobj->stagnCtrl[1]<restartsUsed){ return(true); } if(NMobj->degenLimit>0){ for(i=0; i<n+1; i++){ for(j=0; j<n; j++){ if(j==i){continue;} for(k=j+1; k<n+1; k++){ d1 = vertices[i] - vertices[j]; d2 = vertices[i] - vertices[k]; t = acos( d1.dot(d2) / sqrt(d1.dot(d1)) / sqrt(d2.dot(d2)) ); if(t < NMobj->degenLimit || myPI - t < NMobj->degenLimit){ return(true); } } } } } return(false); } void NelderMeadOptimizerContext::invokeNelderMead(){ n = numFree - EqC.getCount(); vertices.resize(n+1); fvals.resize(n+1); vertexInfeas.resize(n+1); subcentroid.resize(numFree); eucentroidCurr.resize(numFree); initializeSimplex(est, iniSimplexEdge, false); if( (vertexInfeas.sum()==n+1 && eqConstraintMthd != 4) || (fvals.array()==bignum).all()){ fc->recordIterationError("initial simplex is not feasible; specify it differently, try different start values, or use mxTryHard()"); statuscode = 10; return; } fullSort(); needFullSort=false; bool needRestart = false; bool stopflag=false; itersElapsed = 0; restartsUsed = 0; //Loop is: sort, check convergence, check progress, transform; do{ if(verbose){ mxLog("Nelder-Mead iteration %d / %d",itersElapsed,maxIter); } fc->resetOrdinalRelativeError(); if(itersElapsed){ //Order the vertices by fit value: if(needFullSort){fullSort();} else{fastSort();} stopflag = checkConvergence(); if(stopflag){ break; } needRestart = checkProgress(); if(needRestart){ initializeSimplex(vertices[0], sqrt((vertices[0]-vertices[1]).dot(vertices[0]-vertices[1])), true); needRestart = false; restartsUsed++; needFullSort = true; itersElapsed++; continue; } } simplexTransformation(); eucentroidPrev = eucentroidCurr; itersElapsed++; Global->reportProgress("MxComputeNelderMead", fc); } while (!stopflag); est = vertices[0]; bestfit = fvals[0]; estInfeas = vertexInfeas[0]; double centFit; int centInfeas; evalNewPoint(subcentroid, vertices[0], centFit, centInfeas, vertexInfeas[0]); if(centFit < bestfit && !centInfeas){ est = subcentroid; bestfit = centFit; estInfeas = 0; } evalNewPoint(eucentroidCurr, vertices[0], centFit, centInfeas, vertexInfeas[0]); if(centFit < bestfit && !centInfeas){ est = eucentroidCurr; bestfit = centFit; estInfeas = 0; } //if(estInfeas){statuscode = 3;} if(verbose){mxPrintMat("solution?",est);} } void NelderMeadOptimizerContext::calculatePseudoHessian() { int numpts = (n+1)*(n+2)/2; bool canDoAnalyt=true; int i, j, k, pminInfeas; double a0, pminfit; NMobj->pseudohess.resize(n, n); NMobj->phpts.resize(numpts, n); NMobj->phFvals.resize(numpts, 1); NMobj->phInfeas.resize(numpts); Eigen::VectorXd currpt(n); Eigen::VectorXd currpt2(n); Eigen::VectorXi jvec(numpts); Eigen::VectorXi kvec(numpts); Eigen::VectorXd a(n), pmin(n); Eigen::MatrixXd B(n,n), Q(n, n); NMobj->pseudohess.setZero(n, n); NMobj->phpts.setZero(numpts, n); NMobj->phFvals.setZero(numpts, 1); NMobj->phInfeas.setZero(numpts); for(i=0; i<n; i++){ Q.col(i) = vertices[i+1] - vertices[0]; } Eigen::FullPivLU< Eigen::MatrixXd > luq(Q); for(i=0; i<n+1; i++){ NMobj->phpts.row(i) = vertices[i]; NMobj->phFvals(i,0) = fvals[i]; NMobj->phInfeas[i] = 0; //<--Assuming that this function is not called if any vertices are infeasible. kvec[i] = -1; jvec[i] = -1; } i=n+1; for(j=0; j<n; j++){ for(k=j+1; k<n+1; k++){ jvec[i] = j; kvec[i] = k; currpt = (vertices[j] + vertices[k])/2; currpt2 = currpt; evalNewPoint(currpt, vertices[j], NMobj->phFvals(i,0), NMobj->phInfeas[i], 0); if(NMobj->phInfeas[i]){ //TODO: export a message about the pseudohessian for the user NMobj->pseudohess.resize(0,0); NMobj->phpts.resize(0,0); NMobj->phFvals(0,0); NMobj->phInfeas.resize(0); return; } else if(NMobj->phFvals(i,0) < bestfit){ est = currpt; bestfit = NMobj->phFvals(i,0); estInfeas = 0; } //We can't use Nelder & Mead's analytic solution if the midpoints of the edges aren't actually such: if( (currpt.array() != currpt2.array()).any() ){ canDoAnalyt = false; } NMobj->phpts.row(i) = currpt; i++; } } if(canDoAnalyt && luq.isInvertible()){ if(verbose){mxLog("analytically calculating pseudoHessian");} a0 = fvals[0]; for(i=0; i<n; i++){ a[i] = 2*NMobj->phFvals(i+(n+1),0) - (fvals[i+1] + 3*a0)/2; B(i,i) = 2*( fvals[i+1] + a0 - 2*NMobj->phFvals(i+(n+1),0) ); } for(i=n+n+1; i<numpts; i++){ if(jvec[i] == kvec[i]){continue;} B(jvec[i]-1,kvec[i]-1) = 2*( NMobj->phFvals(i,0) + a0 - NMobj->phFvals(jvec[i]+(n+1)-1, 0) - NMobj->phFvals(kvec[i]+(n+1)-1, 0) ); B(kvec[i]-1,jvec[i]-1) = B(jvec[i]-1,kvec[i]-1); } Eigen::FullPivLU< Eigen::MatrixXd > lub(B); if(lub.isInvertible()){ pmin = vertices[0] - (Q * lub.inverse() * a); evalNewPoint(pmin, vertices[0], pminfit, pminInfeas, vertexInfeas[0]); if(pminfit<bestfit && !pminInfeas){ est = pmin; bestfit = pminfit; estInfeas = 0; } } Eigen::MatrixXd Qinv = luq.inverse(); //NMobj->pseudohess = luq.inverse().transpose() * B * luq.inverse(); NMobj->pseudohess = Qinv.transpose() * B * Qinv; } else{ if(verbose){mxLog("numerically calculating pseudoHessian");} Eigen::MatrixXd X(numpts, numpts), polynomb(numpts,1), Binv; for(i=0; i<numpts; i++){ X(i,0) = 1; } for(i=0; i<n; i++){ X.col(i+1) = NMobj->phpts.col(i); } i=n+1; for(j=0; j<n; j++){ for(k=j; k<n; k++){ X.col(i) = (NMobj->phpts.col(j).array() * NMobj->phpts.col(k).array()); i++; } } polynomb.setZero(numpts,1); Eigen::ColPivHouseholderQR< Eigen::MatrixXd > qrx(X); if(qrx.info() != Eigen::Success){ NMobj->pseudohess.resize(0,0); NMobj->phpts.resize(0,0); NMobj->phFvals(0,0); NMobj->phInfeas.resize(0); return; } polynomb = qrx.solve(NMobj->phFvals); if(verbose){mxPrintMat("polynomial coefficients:",polynomb);} i=n+1; for(j=0; j<n; j++){ for(k=j; k<n; k++){ NMobj->pseudohess(j,k) = polynomb(i,0); if(j != k){NMobj->pseudohess(k,j) = polynomb(i,0);} i++; } } Eigen::FullPivLU< Eigen::MatrixXd > lub(NMobj->pseudohess); if(lub.isInvertible()){ Binv = lub.inverse(); for(i=0; i<n; i++){ a[i] = polynomb(i+1,0); } pmin = vertices[0] - (Binv * a); evalNewPoint(pmin, vertices[0], pminfit, pminInfeas, vertexInfeas[0]); if(pminfit<bestfit && !pminInfeas){ est = pmin; bestfit = pminfit; estInfeas = pminInfeas; } } NMobj->Xout = X; } if(verbose){ mxPrintMat("pseudoHessian is ", NMobj->pseudohess); } return; } void NelderMeadOptimizerContext::finalize() { //The omxComputeNM object stows the possibly penalized fit value; the FitContext here recomputes the unpenalized fit value, at the //best parameter values: NMobj->bestfitOut = bestfit; copyParamsFromOptimizer(est,fc); ComputeFit(engineName, NMobj->fitMatrix, FF_COMPUTE_FIT, fc); /*Doing this here ensures (1) that the fit has just been freshly evaluated at the solution, (2) that this check is done as part of the MxComputeNelderMead step (necessary for bootstrapping), and (3) that Nelder-Mead reports status code 3 for solutions that violate MxConstraints, and status code 10 for all other kinds of infeasible solutions:*/ if(!fc->insideFeasibleSet() && (statuscode==0 || statuscode==4)){fc->setInform(INFORM_STARTING_VALUES_INFEASIBLE);} ConstraintVec cv(fc, "constraint", [](const omxConstraint &con){ return true; }); fc->constraintFunVals.resize(cv.getCount()); cv.eval(fc, fc->constraintFunVals.data()); } double nmgdfso(unsigned n, const double *x, double *grad, void *f_data) { NelderMeadOptimizerContext *nmoc = (NelderMeadOptimizerContext *) f_data; nlopt_opt opt = (nlopt_opt) nmoc->extraData; unsigned i; double ssq=0, currdiff=0; if(grad){ if(nmoc->gdfsIter >= nmoc->subsidiarygoc.maxMajorIterations){ nlopt_force_stop(opt); } (nmoc->gdfsIter)++; } for(i=0; i < n; i++){ currdiff = x[i] - nmoc->tentativpt[i]; if(grad){grad[i] = 2*currdiff;} currdiff *= currdiff; ssq += currdiff; } return(ssq); }
utf-8
1
Apache-2.0
2010-2019 Steven M. Boker, Michael C. Neale, Hermine H. Maes, Michael J. Wilde, Michael Spiegel, Timothy R. Brick, Ryne Estabrook, Timothy C. Bates, Paras Mehta, Timo von Oertzen, Ross J. Gore, Michael D. Hunter, Daniel C. Hackett, Julian Karch, Andreas M. Brandmaier, Joshua N. Pritikin, Mahsa Zahery, Robert M. Kirkpatrick, Yang Wang, Charles Driver, Massachusetts Institute of Technology, S. G. Johnson, Association for Computing Machinery, Dieter Kraft, Stefan Wilhelm, Sarah Medland, Carl F. Falk, Matt Keller, Manjunath B G, The Regents of the University of California, Lester Ingber, Wong Shao Voon, Juan Palacios, Jiang Yang, Gavin Band, Yann Collet, Facebook, Inc., Yuta Mori, Shaun Purcell, Christopher Chang
shapetools-1.4pl6/src/vc/retrv/doretrv.c
/* Copyright (C) 1993,1994 by the author(s). This software is published in the hope that it will be useful, but WITHOUT ANY WARRANTY for any part of this software to work correctly or as described in the manuals. See the ShapeTools Public License for details. Permission is granted to use, copy, modify, or distribute any part of this software but only under the conditions described in the ShapeTools Public License. A copy of this license is supposed to have been given to you along with ShapeTools in a file named LICENSE. Among other things, this copyright notice and the Public License must be preserved on all copies. */ /* * ShapeTools Version Control System * * doretrv.c - "retrv" command * * by Axel.Mahler@cs.tu-berlin.de * and Andreas.Lampen@cs.tu-berlin.de * * $Header: doretrv.c[6.2] Sun Jan 23 21:55:33 1994 axel@cs.tu-berlin.de frozen $ */ #include "atfs.h" #include "atfstk.h" #include "sttk.h" extern int copyFlag; extern char *destPath; extern int fixFlag; extern int forceFlag; extern char *intent; extern int lockFlag; extern int stdinFlag; extern int expandFlag; extern int vcatFlag; extern Af_key *newLock; extern int af_errno; /* permission status for retrieveAFile */ #define DOIT 01 #define DENIED 02 #define RECREATE 04 #define UNCHANGED 010 /*========================= * vcat *=========================*/ EXPORT void showAFile (path, aso) char *path; Af_key *aso; { FILE *versionFile; int nbytes; char *contentsBuf; /* an empty argument indicates, that "vcat" was called * withour arguments or with "-" as argument -> read from stdin */ if (!aso) { # define IOBUFLEN 2048 char iobuf[IOBUFLEN]; int cr, cw; stLog ("stdin", ST_LOG_MSGERR); while ((cr = fread (iobuf, sizeof (char), IOBUFLEN, stdin))) { cw = fwrite (iobuf, sizeof (char), cr, stdout); fflush (stdout); if (cw != cr) stLog ("Oops! Write error on stdout", ST_LOG_ERROR); } clearerr (stdin); return; } /* ToDo: show file */ if (!(versionFile = af_open (aso, "r"))) { stLog (af_errmsg ("af_open"), ST_LOG_ERROR); stAbortThis (TRUE); } if (path[0]) sprintf (stMessage, "%s/%s", path, af_retattr (aso, AF_ATTBOUND)); else sprintf (stMessage, "%s", af_retattr (aso, AF_ATTBOUND)); stLog (stMessage, ST_LOG_MSGERR); if ((contentsBuf = malloc ((unsigned) af_retnumattr (aso, AF_ATTSIZE)+1)) == NULL) { stLog ("Out of memory.", ST_LOG_ERROR); stAbortThis (TRUE); } nbytes = fread (contentsBuf, sizeof (char), af_retnumattr (aso, AF_ATTSIZE), versionFile); contentsBuf[nbytes] = '\0'; af_close (versionFile); if (expandFlag) atExpandAttrs (aso, contentsBuf, nbytes, stdout, 0, AT_EXPAND_FILE); else fwrite(contentsBuf, sizeof (char), nbytes, stdout); fflush (stdout); free (contentsBuf); } /*========================= * retrv *=========================*/ EXPORT void retrieveAFile (path, aso) char *path; Af_key *aso; { char *busyLocation, destName[PATH_MAX], tmpName[PATH_MAX], *contentsBuf; char *reserveDate, *attrPtr; unsigned int permit = 0; int nbytes, i; FILE *newFile, *versionFile, *tmpFile; Af_key busyAso, lastAso; Af_attrs asoAttrBuf; Af_user *locker; struct stat iBuf; struct utimbuf oldDate; busyLocation = destPath ? destPath : (path[0] ? path : "."); if (destPath || path[0]) sprintf (destName, "%s/%s", busyLocation, af_retattr (aso, AF_ATTUNIXNAME)); else strcpy (destName, af_retattr (aso, AF_ATTUNIXNAME)); af_allattrs (aso, &asoAttrBuf); if (!lockFlag) { /* This creates a plain UNIX file from the specified ASO * The created copy is - in general - an object without * history. If, however, the copy happens to go into the * history-directory (the one containing the archive) it will * 'automatically' be considered the busy-version. * If - in this case - a copy replaces a formerly locked busy-version, * the lock will be released. */ if ((newFile = fopen (destName, "r")) == NULL) { /* take this as test for presence */ if (access (busyLocation, W_OK) == 0) { /* may we create ? */ permit |= DOIT; /* No scruples if no busyvers current */ } else { sprintf (stMessage, "write permission for directory %s denied.", busyLocation); stLog (stMessage, ST_LOG_ERROR); permit |= DENIED; } } else { /* file exists */ /* check if it is already restored */ if (!copyFlag && (fstat (fileno(newFile), &iBuf) != -1)) { if ((iBuf.st_size == af_retnumattr (aso, AF_ATTSIZE)) && (iBuf.st_mtime == af_rettimeattr (aso, AF_ATTMTIME)) && (iBuf.st_mode == (af_retnumattr (aso, AF_ATTMODE) & ~0222))) { if (stQuietFlag) permit |= UNCHANGED; else if (forceFlag) /* do nothing */; else { sprintf (stMessage, "%s and %s are identical, retrieve anyway ?", af_retattr (aso, AF_ATTBOUND), destName); if (stAskConfirm (stMessage, "no")) permit |= UNCHANGED; } } } fclose (newFile); if (permit & UNCHANGED) { sprintf (stMessage, "%s not retrieved", af_retattr (aso, AF_ATTBOUND)); stLog (stMessage, ST_LOG_MSGERR); return; } if (access (destName, W_OK) < 0) { if (access (busyLocation, W_OK) == 0) { if (stQuietFlag) permit |= forceFlag ? (RECREATE | DOIT) : DENIED; else if (forceFlag) permit |= RECREATE | DOIT; else { sprintf (stMessage, "%s write-protected, re-create it ?", destName); if (stAskConfirm (stMessage, "no")) permit |= DENIED; else permit |= (RECREATE | DOIT); } } else { sprintf (stMessage, "no write permission for %s", destName); stLog (stMessage, ST_LOG_ERROR); permit |= DENIED; } } else { /* write access on destfile */ if (strcmp (busyLocation, ".")) { if (stQuietFlag) permit |= forceFlag ? DOIT : 0; else if (forceFlag) permit |= DOIT; else { sprintf (stMessage, "%s exists and is writable. Overwrite it ?", destName); if (!stAskConfirm (stMessage, "no")) permit |= DOIT; } } else { /* current dir! - test for lock */ /* this test looks only for a lock in the last generation. Locks on other generations will not be recognized. However, this is not serious as these will go into the else part where "exists and is writable..." is asked. */ if (af_getkey (path, asoAttrBuf.af_name, asoAttrBuf.af_type, AF_LASTVERS, AF_LASTVERS, &lastAso) == -1) { /* No version -- this is impossible here */ sprintf (stMessage, "%s", af_errmsg (af_retattr (aso, AF_ATTUNIXNAME))); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } else { if (atUserUid (af_retuserattr (&lastAso, AF_ATTLOCKER)) == geteuid ()) { if (stQuietFlag) permit |= forceFlag ? DOIT : 0; if (forceFlag) permit |= DOIT; else { sprintf (stMessage, "Give up lock on %s and overwrite it ?", destName); if (!stAskConfirm (stMessage, "no")) { permit |= DOIT; atUnlock (&lastAso); } else { permit |= DENIED; } } } else { if (stQuietFlag) permit |= forceFlag ? DOIT : 0; else if (forceFlag) permit |= DOIT; else { sprintf (stMessage, "%s exists and is writable. Overwrite it ?", destName); if (!stAskConfirm (stMessage, "no")) permit |= DOIT; } } af_dropkey (&lastAso); } } } } if (permit & DOIT) { if ((versionFile = af_open (aso, "r")) == NULL) { stLog (af_errmsg ("af_open"), ST_LOG_ERROR); stAbortThis (TRUE); } strcpy (tmpName, stTmpFile (busyLocation)); if ((tmpFile = fopen (tmpName, "w")) == NULL) { sprintf (stMessage, "cannot create temporary file %s for writing.", tmpName); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } if (path[0]) sprintf (stMessage, "%s/%s -> %s", path, af_retattr (aso, AF_ATTBOUND), destName); else sprintf (stMessage, "%s -> %s", af_retattr (aso, AF_ATTBOUND), destName); stLog (stMessage, ST_LOG_MSGERR); if ((contentsBuf = malloc ((unsigned) asoAttrBuf.af_size+1)) == NULL) { stLog ("Out of memory", ST_LOG_ERROR); stAbortThis(TRUE); } nbytes = fread (contentsBuf, sizeof (char), asoAttrBuf.af_size, versionFile); contentsBuf[nbytes] = '\0'; af_close (versionFile); if (expandFlag) atExpandAttrs (aso, contentsBuf, nbytes, tmpFile, 0, AT_EXPAND_FILE); else fwrite(contentsBuf, sizeof (char), nbytes, tmpFile); free (contentsBuf); fclose (tmpFile); oldDate.actime = asoAttrBuf.af_atime; oldDate.modtime = asoAttrBuf.af_mtime; utime (tmpName, &oldDate); unlink (destName); if (link (tmpName, destName) < 0) { sprintf (stMessage, "cannot link %s to %s.", tmpName, destName); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } chmod (destName, asoAttrBuf.af_mode & ~0222); stUnRegisterFile (tmpName); unlink (tmpName); } else { sprintf (stMessage, "%s not retrieved", af_retattr (aso, AF_ATTBOUND)); stLog (stMessage, ST_LOG_MSGERR); stThisTransaction.tr_rc += 1; } return; } /* else lockFlag is set */ /* * Before a version is retrieved, set-busy, and locked, the * following preconditions must be fulfilled: * - the retrieve must go to the directory containing the * archive directory. -> current directory * - the retrieved version must not be locked by anybody but * the calling user. * - the current directory must grant write access to the * calling user. * - if some busy-version would be overwritten by the retrieve, * the user is asked if she wants that */ /* * The following checks are based on the permission information * stored in the archive files. It is unclear how * to properly handle vanilla filesystem related inquiries. */ if (af_getkey (path, asoAttrBuf.af_name, asoAttrBuf.af_type, fixFlag ? asoAttrBuf.af_gen : AF_LASTVERS, AF_LASTVERS, &lastAso) == -1) { sprintf (stMessage, "%s", af_errmsg (af_retattr (aso, AF_ATTBOUND))); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } /* there is a version */ if ((atUserUid (locker = af_retuserattr (&lastAso, AF_ATTLOCKER)) == geteuid ()) || !atUserValid (locker)) { if (access (destName, W_OK) == 0) { if (stQuietFlag) permit |= forceFlag ? DOIT : DENIED; else if (forceFlag) permit |= DOIT; else { sprintf (stMessage, "Writable %s exists, overwrite it ?", destName); permit |= (stAskConfirm (stMessage, "no")) ? DENIED : DOIT; } } else if (access (busyLocation, W_OK) == 0) { if (access (destName, F_OK) == 0) { if (stQuietFlag) permit |= forceFlag ? DOIT : DENIED; else if (forceFlag) permit |= DOIT; else { sprintf (stMessage, "Write access on %s denied. Overwrite it anyway ?", destName); permit |= (stAskConfirm (stMessage, "no")) ? DENIED : DOIT; } } else permit |= DOIT; } else { /* no write access on current dir */ sprintf (stMessage, "Can't create in %s", busyLocation); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } if (!atUserValid (locker)) { if (!af_lock (&lastAso, af_afuser (geteuid()))) { sprintf (stMessage, "%s", af_errmsg (af_retattr (aso, AF_ATTBOUND))); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } newLock = &lastAso; } if (af_commit () < 0) { stLog ("Cannot commit lock!", ST_LOG_ERROR); } af_transaction (); } else { /* busy version locked by someone else */ permit |= DENIED; sprintf (stMessage, "%s already locked by %s.", destName, atUserName (locker)); stLog (stMessage, ST_LOG_MSGERR); sprintf (stMessage, "%s not restored", af_retattr (aso, AF_ATTBOUND)); stLog (stMessage, ST_LOG_MSGERR); stAbortThis (FALSE); } /* now all the checks are done. set retrieved version busy and * create it in busyLocation. */ if ((permit & DOIT) && (!(permit & DENIED))) { /* * Try to get a description of intended changes. */ if (intent || !(stQuietFlag || forceFlag) || stdinFlag) { atSetComment (&lastAso, AT_COMMENT_INTENT, intent, AT_REUSE | AT_CONFIRM | (stdinFlag ? AT_FROMSTDIN : 0)); } /* * Create the file, containing the retrieved version. * "busyAso" is set up to be the AtFS reference to the file. */ /* setbusy sets just the attributes. data must be moved manually */ if ((versionFile = af_open (aso, "r")) == NULL) { stLog (af_errmsg ("af_open"), ST_LOG_ERROR); stAbortThis (TRUE); } strcpy (tmpName, stTmpFile (busyLocation)); if ((tmpFile = fopen (tmpName, "w")) == NULL) { sprintf (stMessage, "cannot create temporary file %s for writing.", tmpName); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } if (path[0]) sprintf (stMessage, "%s/%s -> %s", path, af_retattr (aso, AF_ATTBOUND), destName); else sprintf (stMessage, "%s -> %s", af_retattr (aso, AF_ATTBOUND), destName); stLog (stMessage, ST_LOG_MSGERR); if ((contentsBuf = malloc ((unsigned) asoAttrBuf.af_size+1)) == NULL) { stLog ("Out of memory", ST_LOG_ERROR); stAbortThis(TRUE); } nbytes = fread (contentsBuf, sizeof (char), asoAttrBuf.af_size, versionFile); contentsBuf[nbytes] = '\0'; af_close (versionFile); if (fwrite(contentsBuf, sizeof (char), nbytes, tmpFile) != nbytes) { stLog ("couldn't write busy file.", ST_LOG_ERROR); stAbortThis (TRUE); } free (contentsBuf); fclose (tmpFile); oldDate.actime = asoAttrBuf.af_atime; oldDate.modtime = asoAttrBuf.af_mtime; utime (tmpName, &oldDate); unlink (destName); if (link (tmpName, destName) < 0) { sprintf (stMessage, "cannot link %s to %s.", tmpName, destName); stLog (stMessage, ST_LOG_ERROR); stAbortThis (TRUE); } chmod (destName, asoAttrBuf.af_mode); stThisTransaction.tr_done = TRUE; stUnRegisterFile (tmpName); unlink (tmpName); if (af_getkey (path, asoAttrBuf.af_name, asoAttrBuf.af_type, AF_BUSYVERS, AF_BUSYVERS, &busyAso) == -1) { stLog (af_errmsg ("af_getkey"), ST_LOG_ERROR); stAbortThis (TRUE); } /* * "busyAso" points to the key of a newly created file, which * has been retrieved from the version archive in case no * file was present. */ /* * Register the busyversion appropriately: set busy if * needed, attach (most of) the user-defined attributes * of the original version, attach intent-description. */ if (!fixFlag) { Af_key previous_busy; if (af_setbusy (&busyAso, aso, &previous_busy) == -1) { stLog (af_errmsg ("af_getkey"), ST_LOG_ERROR); stAbortThis (TRUE); } af_dropkey (&previous_busy); } i = 0; while ((attrPtr = asoAttrBuf.af_udattrs[i++])) { if (af_setattr (&busyAso, AF_REPLACE, attrPtr) == -1) af_setattr (&busyAso, AF_ADD, attrPtr); } af_setattr (&busyAso, AF_REMOVE, AT_ATTALIAS); if ((reserveDate = malloc ((unsigned) (strlen ("rtime") + 32)))) { sprintf (reserveDate, "%s=%s", "rtime", af_retattr (&busyAso, AF_ATTCTIME)); if ((af_setattr (&busyAso, AF_REPLACE, reserveDate) == -1) && (af_setattr (&busyAso, AF_ADD, reserveDate) == -1)) { sprintf (stMessage, "Can't set reservation date for %s.", af_retattr (aso, AF_ATTUNIXNAME)); stLog (stMessage, ST_LOG_WARNING); } free (reserveDate); } else { sprintf (stMessage, "Can't set reservation date for %s (no memory).", af_retattr (aso, AF_ATTUNIXNAME)); stLog (stMessage, ST_LOG_WARNING); } af_dropkey (&busyAso); } else { /* denied or not doit */ sprintf (stMessage, "%s not restored", stThisTransaction.tr_fname); stLog (stMessage, ST_LOG_MSGERR); stThisTransaction.tr_rc += 1; } af_dropkey (&lastAso); af_freeattrbuf (&asoAttrBuf); }
utf-8
1
unknown
unknown
linux-5.16.7/tools/perf/arch/powerpc/util/unwind-libunwind.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2016 Chandan Kumar, IBM Corporation. */ #include <errno.h> #include <libunwind.h> #include <asm/perf_regs.h> #include "../../util/unwind.h" #include "../../util/debug.h" int libunwind__arch_reg_id(int regnum) { switch (regnum) { case UNW_PPC64_R0: return PERF_REG_POWERPC_R0; case UNW_PPC64_R1: return PERF_REG_POWERPC_R1; case UNW_PPC64_R2: return PERF_REG_POWERPC_R2; case UNW_PPC64_R3: return PERF_REG_POWERPC_R3; case UNW_PPC64_R4: return PERF_REG_POWERPC_R4; case UNW_PPC64_R5: return PERF_REG_POWERPC_R5; case UNW_PPC64_R6: return PERF_REG_POWERPC_R6; case UNW_PPC64_R7: return PERF_REG_POWERPC_R7; case UNW_PPC64_R8: return PERF_REG_POWERPC_R8; case UNW_PPC64_R9: return PERF_REG_POWERPC_R9; case UNW_PPC64_R10: return PERF_REG_POWERPC_R10; case UNW_PPC64_R11: return PERF_REG_POWERPC_R11; case UNW_PPC64_R12: return PERF_REG_POWERPC_R12; case UNW_PPC64_R13: return PERF_REG_POWERPC_R13; case UNW_PPC64_R14: return PERF_REG_POWERPC_R14; case UNW_PPC64_R15: return PERF_REG_POWERPC_R15; case UNW_PPC64_R16: return PERF_REG_POWERPC_R16; case UNW_PPC64_R17: return PERF_REG_POWERPC_R17; case UNW_PPC64_R18: return PERF_REG_POWERPC_R18; case UNW_PPC64_R19: return PERF_REG_POWERPC_R19; case UNW_PPC64_R20: return PERF_REG_POWERPC_R20; case UNW_PPC64_R21: return PERF_REG_POWERPC_R21; case UNW_PPC64_R22: return PERF_REG_POWERPC_R22; case UNW_PPC64_R23: return PERF_REG_POWERPC_R23; case UNW_PPC64_R24: return PERF_REG_POWERPC_R24; case UNW_PPC64_R25: return PERF_REG_POWERPC_R25; case UNW_PPC64_R26: return PERF_REG_POWERPC_R26; case UNW_PPC64_R27: return PERF_REG_POWERPC_R27; case UNW_PPC64_R28: return PERF_REG_POWERPC_R28; case UNW_PPC64_R29: return PERF_REG_POWERPC_R29; case UNW_PPC64_R30: return PERF_REG_POWERPC_R30; case UNW_PPC64_R31: return PERF_REG_POWERPC_R31; case UNW_PPC64_LR: return PERF_REG_POWERPC_LINK; case UNW_PPC64_CTR: return PERF_REG_POWERPC_CTR; case UNW_PPC64_XER: return PERF_REG_POWERPC_XER; case UNW_PPC64_NIP: return PERF_REG_POWERPC_NIP; default: pr_err("unwind: invalid reg id %d\n", regnum); return -EINVAL; } return -EINVAL; }
utf-8
1
GPL-2
1991-2012 Linus Torvalds and many others
rtorrent-0.9.8/test/rpc/object_storage_test.h
#include <cppunit/extensions/HelperMacros.h> #include "rpc/object_storage.h" class ObjectStorageTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ObjectStorageTest); CPPUNIT_TEST(test_basics); CPPUNIT_TEST(test_conversions); CPPUNIT_TEST(test_validate_keys); CPPUNIT_TEST(test_access); CPPUNIT_TEST_SUITE_END(); public: void setUp() { } void tearDown() {} void test_basics(); void test_conversions(); void test_validate_keys(); void test_access(); private: rpc::object_storage m_storage; };
utf-8
1
GPL-2+ with OpenSSL exception
© 2005-2011 Jari Sundell <jaris@ifi.uio.no>
krita-5.0.2+dfsg/libs/ui/operations/kis_filter_selection_operation.h
/* * SPDX-FileCopyrightText: 2012 Dmitry Kazakov <dimula73@gmail.com> * SPDX-FileCopyrightText: 2013 Sven Langkamp <sven.langkamp@gmail.com> * * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef KIS_FILTER_SELECTION_OPERATION_H #define KIS_FILTER_SELECTION_OPERATION_H #include <kritaui_export.h> #include "kis_operation.h" class KisSelectionFilter; class KisViewManager; struct KRITAUI_EXPORT KisFilterSelectionOperation : public KisOperation { KisFilterSelectionOperation(const QString& id) : KisOperation(id) {} void runFilter(KisSelectionFilter* filter, KisViewManager *view, const KisOperationConfiguration &config); }; #endif // KIS_FILTER_SELECTION_OPERATION_H
utf-8
1
GPL-and-LGPL
1982-1989, Donald H. House <x@unknown.com> 1989, Robert Allen <x@unknown.com> 1992, Karl Berry <karl@cs.umb.edu> 1992, Kathryn Hargreaves <letters@cs.umb.edu> 1997-2000, Jens Lautenbacher <jtl@gimp.org> 1997, Eiichi Takamori <taka@ma1.seikyou.ne.jp> 1997, Federico Mena Quintero <federico@nuclecu.unam.mx> 1997, Martin Jones <mjones@kde.org> 1998-1999, Reginald Stadlbauer <reggie@kde.org> 1998-2000, Torben Weis <weis@kde.org> 1998-2001, Marti Maria 1998, 2003, Stefan Taferner <taferner@kde.org> 1998-2007, David Faure <faure@kde.org> 1998, Thomas Tanghus <tanghus@earthling.net> 1999, Cristian Tibirna <ctibirna@kde.org> 1999-2000, Matthias Elter <elter@kde.org> 1999-2000, Michael Koch <koch@kde.org> 1999-2000, Simon Hausmann <hausmann@kde.org> 1999, Carsten Pfeiffer <pfeiffer@kde.org> 1999, Daniel M. Duley <mosfet@kde.org> 2000-2001, John Califf <jcaliff@compuzone.net> 2000-2002, Werner Trobin <trobin@kde.org> 2000, Kurt Granroth <granroth@kde.org> 2000, Nicolas Hadacek <haadcek@kde.org> 2000, S.R.Haque <shaheedhaque@hotmail.com>. 2000, Sven Neumann <sven@gimp.org> 2001-2002, Beno�t Vautrin <benoit.vautrin@free.fr> 2001-2004, 2006-2007, Montel Laurent <montel@kde.org> 2001-2008, Rob Buis <buis@kde.org> 2001, 2014, Stuart Dickson <stuartmd@kogmbh.com> 2001, Holger Freyther <freyther@kde.org> 2002-2003, Patrick Julien <freak@codepimps.org> 2002-2004, Nicolas Goutte <goutte@kde.org> 2002, Ellis Whitehead <ellis@kde.org> 2002, Igor Jansen <rm@kde.org> 2002, Joseph Wenninger <jowenn@kde.org> 2002, Lars Siebold <khandha5@gmx.net> 2002, Lennart Kudling <kudling@kde.org> 2002, Tomislav Lukman <tomislav.lukman@ck.t-com.hr> 2003-2019, Boudewijn Rempt <boud@valdyas.org> 2003, Andras Mantia <amantia@kde.org> 2003, Lukas Tinkl <lukas@kde.org> 2003, Thierry Lorthiois <lorthioist@wanadoo.fr> 2004-2005, Max Howell <max.howell@methylblue.com> 2004-2006, 2008, Bart Coppens <kde@bartcoppens.be> 2004-2006, 2010, Ariya Hidayat <ariya@kde.org> 2004-2006, Bulia Byak <buliabyak@users.sf.net> 2004-2006, Josh Andler <scislac@users.sf.net> 2004-2006, Michael Thaler <michael.thaler@physik.tu-muenchen.de> 2004-2006, Seb Ruiz <ruiz@kde.org> 2004-2007, 2009-2010, Adrian Page <adrian@pagenet.plus.com> 2004, 2007, Clarence Dang <dang@kde.org> 2004-2011, Cyrille Berger <cberger@cberger.net> 2004-2014, C. Boemann <cbo@boemann.dk> 2004-2014, 2016, 2018, Sven Langkamp <sven.langkamp@gmail.com> 2004, Christian Muehlhaeuser <chris@chris.de> 2005-2006, Gábor Lehel <illissius@gmail.com> 2005-2006, Hamish Rodda <rodda@kde.org> 2005-2006, Tim Beaulen <tbscope@gmail.com> 2005-2009, Peter Simonsson <psn@linux.se> 2005-2011, Thomas Zander <zander@kde.org> 2005-2012, Inge Wallin <inge@lysator.liu.se> 2005-2012, Jan Hambrecht <jaham@gmx.net> 2005, Frerich Raabe <raabe@kde.org> 2005, Johannes Schaub <litb_devel@web.de> 2005, Raphael Langerhorst <raphael.langerhorst@kdemail.net> 2005, Tom Albers <tomalbers@kde.nl> 2006, 2008, Brad Hards <bradh@kde.org> 2006-2008, Martin Pfeiffer <hubipete@gmx.net> 2006-2011, Sebastian Sauer <mail@dipe.org> 2006-2013, 2015-2016, Thorsten Zachmann <zachmann@kde.org> 2006-2013, Gilles Caulier <caulier dot gilles at gmail dot com> 2006, Christian Mueller <cmueller@gmx.de> 2006, Frederic Coiffier <fcoiffie@gmail.com> 2006, Gary Cramblitt <garycramblitt@comcast.net> 2007-2008, Emanuele Tamponi <emanuele@valinor.it> 2007-2008, Fredy Yanardi <fyanardi@gmail.com> 2007-2011, Pierre Ducroquet <pinaraf@pinaraf.info> 2007, Aurélien Gâteau <agateau@kde.org> 2007, Dirk Mueller <mueller@kde.org> 2007, Eric Lamarque <eric.lamarque@free.fr> 2007, John Marshall 2007, Marijn Kruisselbrink <mkruisselbrink@kde.org> 2007, Matthias Kretz <kretz@kde.org> 2008-2009, Mark Kretschmann <kretschmann@kde.org> 2008, 2010, Carlos Licea <carlos.licea@kdemail.net> 2008-2012, Pierre Stirnweiss <pstirnweiss@googlemail.com> 2008-2015, Lukáš Tvrdý <lukast.dev@gmail.com> 2008, Benoit Jacob <jacob.benoit.1@gmail.com> 2008, Fela Winkelmolen <fela.kde@gmail.com> 2008, Girish Ramakrishnan <girish@forwardbias.in> 2008, Martin Renold <martinxyz@gmx.ch> 2008, Patrick Spendrin <ps_ml@gmx.de> 2008, Roopesh Chander <roop@forwardbias.in> 2009-2010, Edward Apap <schumifer@hotmail.com> 2009-2010, Johannes Simon <johannes.simon@gmail.com> 2009, 2011-2013, Jean-Nicolas Artaud <jeannicolasartaud@gmail.com> 2009, 2011, Ganesh Paramasivam <ganesh@crystalfab.com> 2009-2011, Nokia Corporation and/or its subsidiary(-ies). 2009-2013, KO GmbH 2009, Elvis Stansvik <elvstone@gmail.com> 2009, Ilya Portnov 2009, Jos van den Oever <jos@vandenoever.info> 2009, Vera Lukman <shicmap@gmail.com> 2010-2011, 2013-1014, Yue Liu <yue.liu@mail.com> 2010-2011, Geoffry Song <goffrie@gmail.com> 2010, 2012, 2014-2015, Jarosław Staniek <staniek@kde.org> 2010-2012, José Luis Vergara Toloza <pentalis@gmail.com> 2010-2021, Dmitry Kazakov <dimula73@gmail.com> 2010, Adam Celarek <kdedev@xibo.at> 2010, Ajay Pundhir <ajay.pratap@iiitb.net> 2010, Benjamin Port <port.benjamin@gmail.com> 2010, Jean Nicolas Artaud <jean.nicolas.artaud@kogmbh.com> 2010, Jeremy Lugagne <lugagne.jeremy@gmail.com> 2010, Justin Noel <justin@ics.com> 2010, Marc Pegon <pe.marc@free.fr> 2010, Matus Talcik <matus.talcik@gmail.com> 2010, Nandita Suri <suri.nandita@gmail.com> 2010, Ricardo Cabello <hello@mrdoob.com> 2011-2012, Silvio Heinrich <plassy@web.de> 2011-2013, Gopalakrishna Bhat A <gopalakbhat@gmail.com> 2011-2013, Mojtaba Shahi Senobari <mojtaba.shahi3000@gmail.com> 2011-2014, Arjen Hiemstra <ahiemstra@heimr.nl> 2011, Brijesh Patel <brijesh3105@gmail.com> 2011, Hanna Skott <hannaetscott@gmail.com> 2011, Matus Hanzes <matus.hanzes@ixonos.com> 2011, Pavol Korinek <pavol.korinek@ixonos.com> 2011, Robert Mathias Marmorstein <robert@narnia.homeunix.com> 2011, Siddharth Sharma <siddharth.kde@gmail.com> 2011, Smit Patel <smitpatel24@gmail.com> 2011, Srikanth Tiyyagura <srikanth.tulasiram@gmail.com> 2011, Torio Mlshi <mlshi@lavabit.com> 2012-2013, 2015, Friedrich W. H. Kossebau <kossebau@kde.org> 2012-2013, Daniel Nicoletti <dantti12@gmail.com> 2012, 2014, Dan Leinir Turthra Jensen <admin@leinir.dk> 2012, <hanna.et.scott@gmail.com> 2013-2015, Elle Stone 2013, Aman Madaan <madaan.amanmadaan@gmail.com> 2013, David Revoy <info@davidrevoy.com> 2013, Digia Plc and/or its subsidiary(-ies). 2013, Juan Palacios <jpalaciosdev@gmail.com> 2013, Luke De Mouy <lukewolf101010devel@gmail.com> 2013, Sahil Nagpal <nagpal.sahil01@gmail.com> 2013, Sascha Suelzer <s.suelzer@gmail.com> 2013, Somsubhra Bairi <somsubhra.bairi@gmail.com> 2014-2015, Denis Kuplaykov <dener.kup@gmail.com> 2014-2015, Denis Kuplyakov <dener.kup@gmail.com> 2014-2017, 2019-2020, Wolthera van Hövell tot Westerflier <griffinvalley@gmail.com> 2014, Alexander Potashev <aspotashev@gmail.com> 2014, Manuel Riecke <spell1337@gmail.com> 2014, Mohit Goyal <mohit.bits2011@gmail.com> 2014, Nicholas Guttenberg <ngutten@gmail.com> 2014, Timothée Giet <animtim@gmail.com> 2014, Victor Lafon <metabolic.ewilan@hotmail.fr> 2015-2016, 2018, Jouni Pentikäinen <joupent@gmail.com> 2015-2016, Michael Abrahams <miabraha@gmail.com> 2015, Moritz Molch <kde@moritzmolch.de> 2015, Soma Schliszka <soma.schliszka@gmail.com> 2015, Stefano Bonicatti <smjert@gmail.com> 2015-2016, The Qt Company Ltd. 2016-2017, 2021, Alvin Wong <alvinhochun@gmail.com> <alvin@alvinhc.com> 2016-2017, Eugene Ingerman <geneing@gmail.com> 2016, Julian Thijssen <julianthijssen@gmail.com> 2016, Kapustin Alexey <akapust1n@yandex.ru> 2016-2017, 2020, L. E. Segovia <amy@amyspark.me> 2016, Laszlo Fazekas <mneko@freemail.hu> 2016, Laurent Valentin Jospin <laurent.valentin@famillejospin.ch> 2016, Miroslav Talasek <miroslav.talasek@seznam.cz> 2016, Nishant Rodrigues <nishantjr@gmail.com> 2016, Spencer Brown <sbrown655@gmail.com> 2017, Bernhard Liebl <poke1024@gmx.de> 2017, Eliakin Costa <eliakim170@gmail.com> 2017, Nikita Smirnov <pakrentos@gmail.com> 2018, Andrey Kamakin <a.kamakin@icloud.com> 2018-2020, Anna Medonosova <anna.medonosova@gmail.com> 2018-2020, Eoin O'Neill <eoinoneill1991@gmail.com> 2018-2019, Emmet O'Neill <emmetoneill.pdx@gmail.com> 2018, 2020, Ivan Santa Maria <ghevan@gmail.com> 2018, Mehmet Salih Çalışkan <msalihcaliskan@gmail.com> 2018, Michael Zhou <simerixh@gmail.com> 2018, 2020, Scott Petrovic <scottpetrovic@gmail.com> 2018, Victor Wåhlström <victor.wahlstrom@initiali.se> 2019, Aaron Boxer <boxerab@gmail.com> 2019-2020, Agata Cacko <cacko.azh@gmail.com> 2019, Carl Olsson <carl.olsson@gmail.com> 2019, Dmitrii Utkin <loentar@gmail.com> 2019, Grum999 2019, Kuntal Majumder <hellozee@disroot.org> 2019, Miguel Lopez <reptillia39@live.com> 2019, Rebecca Breu <rebecca@rbreu.de> 2019-2020, Sharaf Zaman <sharafzaz121@gmail.com> 2019, Tusooa Zhu <tusooa@vista.aero> 2020, Deif Lou <ginoba@gmail.com> 2020, Mathias Wein <lynx.mw+kde@gmail.com> 2020, Peter Schatz <voronwe13@gmail.com>
macaulay2-1.19.1+ds/M2/Macaulay2/e/unit-tests/RingZZpTest.cpp
// Copyright 2013 Michael E. Stillman #include "RingTest.hpp" #include "ZZp.hpp" template <> ring_elem getElement<Z_mod>(const Z_mod& R, int index) { ring_elem a = getElement<RingZZ>(*globalZZ, index); return R.from_int(a.get_mpz()); } TEST(RingZZmod32003, fromStream) { std::istringstream i("+1234 +345 -235*a"); Z_mod* R = Z_mod::create(32003); ring_elem a; while (fromStream(i, *R, a)) { buffer o; R->elem_text_out(o, a); std::cout << o.str() << " peek: " << "." << static_cast<char>(i.peek()) << "." << std::endl; } } /////////////////////////////////////////////// TEST(RingZZmod101, create) { Ring* R = Z_mod::create(101); EXPECT_TRUE(R != 0); EXPECT_TRUE(dynamic_cast<const Z_mod*>(R) != 0); EXPECT_EQ(R->coefficient_type(), Ring::COEFF_BASIC); EXPECT_FALSE(R->is_ZZ()); EXPECT_EQ(ringName(*R), "ZZ/101"); } TEST(RingZZmod101, ones) { Z_mod* R = Z_mod::create(101); EXPECT_TRUE(R->is_equal(R->one(), R->from_long(1))); EXPECT_TRUE(R->is_equal(R->minus_one(), R->from_long(-1))); EXPECT_TRUE(R->is_equal(R->zero(), R->from_long(0))); EXPECT_TRUE(R->is_zero(R->from_long(0))); } TEST(RingZZmod101, negate) { Z_mod* R = Z_mod::create(101); testRingNegate(R, ntrials); } TEST(RingZZmod101, add) { Z_mod* R = Z_mod::create(101); testRingAdd(R, ntrials); } TEST(RingZZmod101, subtract) { Z_mod* R = Z_mod::create(101); testRingSubtract(R, ntrials); } TEST(RingZZmod101, multDivide) { Z_mod* R = Z_mod::create(101); testRingDivide(R, ntrials); } TEST(RingZZmod101, axioms) { Z_mod* R = Z_mod::create(101); testRingAxioms(R, ntrials); } TEST(RingZZmod101, power) { Z_mod* R = Z_mod::create(101); testRingPower(R, ntrials); } TEST(RingZZmod101, syzygy) { Z_mod* R = Z_mod::create(101); testRingSyzygy(R, ntrials); } ////////////////////////////////////////////////////////// TEST(RingZZmod2, create) { Ring* R = Z_mod::create(2); EXPECT_TRUE(R != 0); EXPECT_TRUE(dynamic_cast<const Z_mod*>(R) != 0); EXPECT_EQ(R->coefficient_type(), Ring::COEFF_BASIC); EXPECT_FALSE(R->is_ZZ()); EXPECT_EQ(ringName(*R), "ZZ/2"); } TEST(RingZZmod2, ones) { Z_mod* R = Z_mod::create(2); EXPECT_TRUE(R->is_equal(R->one(), R->from_long(1))); EXPECT_TRUE(R->is_equal(R->minus_one(), R->from_long(-1))); EXPECT_TRUE(R->is_equal(R->zero(), R->from_long(0))); EXPECT_TRUE(R->is_zero(R->from_long(0))); } TEST(RingZZmod2, negate) { Z_mod* R = Z_mod::create(2); testRingNegate(R, ntrials); } TEST(RingZZmod2, add) { Z_mod* R = Z_mod::create(2); testRingAdd(R, ntrials); } TEST(RingZZmod2, subtract) { Z_mod* R = Z_mod::create(2); testRingSubtract(R, ntrials); } TEST(RingZZmod2, multDivide) { Z_mod* R = Z_mod::create(2); testRingDivide(R, ntrials); } TEST(RingZZmod2, axioms) { Z_mod* R = Z_mod::create(2); testRingAxioms(R, ntrials); } TEST(RingZZmod2, power) { Z_mod* R = Z_mod::create(2); testRingPower(R, ntrials); } TEST(RingZZmod2, syzygy) { Z_mod* R = Z_mod::create(2); testRingSyzygy(R, ntrials); } // Local Variables: // compile-command: "make -C $M2BUILDDIR/Macaulay2/e/unit-tests check " // indent-tabs-mode: nil // End:
utf-8
1
GPL-2+
1993-2019 Daniel R. Grayson <dan@math.uiuc.edu> 1993-2020 Michael E. Stillman <mike@math.cornell.edu>
scummvm-2.5.1+dfsg/engines/cruise/sound.cpp
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include "common/endian.h" #include "common/system.h" #include "common/textconsole.h" #include "cruise/cruise.h" #include "cruise/cruise_main.h" #include "cruise/sound.h" #include "cruise/volume.h" #include "audio/fmopl.h" namespace Audio { class Mixer; } namespace Cruise { class PCSoundDriver { public: typedef void (*UpdateCallback)(void *); PCSoundDriver() : _upCb(nullptr), _upRef(nullptr), _musicVolume(0), _sfxVolume(0) {} virtual ~PCSoundDriver() {} virtual void setupChannel(int channel, const byte *data, int instrument, int volume) = 0; virtual void setChannelFrequency(int channel, int frequency) = 0; virtual void stopChannel(int channel) = 0; virtual void playSample(const byte *data, int size, int channel, int volume) = 0; virtual void stopAll() = 0; virtual const char *getInstrumentExtension() const { return ""; } virtual void syncSounds(); void setUpdateCallback(UpdateCallback upCb, void *ref); void resetChannel(int channel); void findNote(int freq, int *note, int *oct) const; protected: UpdateCallback _upCb; void *_upRef; uint8 _musicVolume; uint8 _sfxVolume; static const int _noteTable[]; static const int _noteTableCount; }; const int PCSoundDriver::_noteTable[] = { 0xEEE, 0xE17, 0xD4D, 0xC8C, 0xBD9, 0xB2F, 0xA8E, 0x9F7, 0x967, 0x8E0, 0x861, 0x7E8, 0x777, 0x70B, 0x6A6, 0x647, 0x5EC, 0x597, 0x547, 0x4FB, 0x4B3, 0x470, 0x430, 0x3F4, 0x3BB, 0x385, 0x353, 0x323, 0x2F6, 0x2CB, 0x2A3, 0x27D, 0x259, 0x238, 0x218, 0x1FA, 0x1DD, 0x1C2, 0x1A9, 0x191, 0x17B, 0x165, 0x151, 0x13E, 0x12C, 0x11C, 0x10C, 0x0FD, 0x0EE, 0x0E1, 0x0D4, 0x0C8, 0x0BD, 0x0B2, 0x0A8, 0x09F, 0x096, 0x08E, 0x086, 0x07E, 0x077, 0x070, 0x06A, 0x064, 0x05E, 0x059, 0x054, 0x04F, 0x04B, 0x047, 0x043, 0x03F, 0x03B, 0x038, 0x035, 0x032, 0x02F, 0x02C, 0x02A, 0x027, 0x025, 0x023, 0x021, 0x01F, 0x01D, 0x01C, 0x01A, 0x019, 0x017, 0x016, 0x015, 0x013, 0x012, 0x011, 0x010, 0x00F }; const int PCSoundDriver::_noteTableCount = ARRAYSIZE(_noteTable); struct AdLibRegisterSoundInstrument { uint8 vibrato; uint8 attackDecay; uint8 sustainRelease; uint8 feedbackStrength; uint8 keyScaling; uint8 outputLevel; uint8 freqMod; }; struct AdLibSoundInstrument { byte mode; byte channel; AdLibRegisterSoundInstrument regMod; AdLibRegisterSoundInstrument regCar; byte waveSelectMod; byte waveSelectCar; byte amDepth; }; struct VolumeEntry { int original; int adjusted; }; class AdLibSoundDriver : public PCSoundDriver { public: AdLibSoundDriver(Audio::Mixer *mixer); ~AdLibSoundDriver() override; // PCSoundDriver interface void setupChannel(int channel, const byte *data, int instrument, int volume) override; void stopChannel(int channel) override; void stopAll() override; void initCard(); void onTimer(); void setupInstrument(const byte *data, int channel); void setupInstrument(const AdLibSoundInstrument *ins, int channel); void loadRegisterInstrument(const byte *data, AdLibRegisterSoundInstrument *reg); virtual void loadInstrument(const byte *data, AdLibSoundInstrument *asi) = 0; void syncSounds() override; void adjustVolume(int channel, int volume); protected: OPL::OPL *_opl; Audio::Mixer *_mixer; byte _vibrato; VolumeEntry _channelsVolumeTable[5]; AdLibSoundInstrument _instrumentsTable[5]; static const int _freqTable[]; static const int _freqTableCount; static const int _operatorsTable[]; static const int _operatorsTableCount; static const int _voiceOperatorsTable[]; static const int _voiceOperatorsTableCount; }; const int AdLibSoundDriver::_freqTable[] = { 0x157, 0x16C, 0x181, 0x198, 0x1B1, 0x1CB, 0x1E6, 0x203, 0x222, 0x243, 0x266, 0x28A }; const int AdLibSoundDriver::_freqTableCount = ARRAYSIZE(_freqTable); const int AdLibSoundDriver::_operatorsTable[] = { 0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20, 21 }; const int AdLibSoundDriver::_operatorsTableCount = ARRAYSIZE(_operatorsTable); const int AdLibSoundDriver::_voiceOperatorsTable[] = { 0, 3, 1, 4, 2, 5, 6, 9, 7, 10, 8, 11, 12, 15, 16, 16, 14, 14, 17, 17, 13, 13 }; const int AdLibSoundDriver::_voiceOperatorsTableCount = ARRAYSIZE(_voiceOperatorsTable); class AdLibSoundDriverADL : public AdLibSoundDriver { public: AdLibSoundDriverADL(Audio::Mixer *mixer) : AdLibSoundDriver(mixer) {} const char *getInstrumentExtension() const override { return ".ADL"; } void loadInstrument(const byte *data, AdLibSoundInstrument *asi) override; void setChannelFrequency(int channel, int frequency) override; void playSample(const byte *data, int size, int channel, int volume) override; }; class PCSoundFxPlayer { private: enum { NUM_INSTRUMENTS = 15, NUM_CHANNELS = 4 }; void update(); void handleEvents(); void handlePattern(int channel, const byte *patternData); char _musicName[33]; bool _playing; bool _songPlayed; int _currentPos; int _currentOrder; int _numOrders; int _eventsDelay; bool _looping; int _fadeOutCounter; int _updateTicksCounter; int _instrumentsChannelTable[NUM_CHANNELS]; byte *_sfxData; byte *_instrumentsData[NUM_INSTRUMENTS]; PCSoundDriver *_driver; public: PCSoundFxPlayer(PCSoundDriver *driver); ~PCSoundFxPlayer(); bool load(const char *song); void play(); void stop(); void unload(); void fadeOut(); void doSync(Common::Serializer &s); static void updateCallback(void *ref); bool songLoaded() const { return _sfxData != NULL; } bool songPlayed() const { return _songPlayed; } bool playing() const { return _playing; } uint8 numOrders() const { assert(_sfxData); return _sfxData[470]; } void setNumOrders(uint8 v) { assert(_sfxData); _sfxData[470] = v; } void setPattern(int offset, uint8 value) { assert(_sfxData); _sfxData[472 + offset] = value; } const char *musicName() { return _musicName; } // Note: Original game never actually uses looping variable. Songs are hardcoded to loop bool looping() const { return _looping; } void setLooping(bool v) { _looping = v; } }; byte *readBundleSoundFile(const char *name) { // Load the correct file int fileIdx = findFileInDisks(name); if (fileIdx < 0) return NULL; int unpackedSize = volumePtrToFileDescriptor[fileIdx].extSize + 2; byte *data = (byte *)MemAlloc(unpackedSize); assert(data); if (volumePtrToFileDescriptor[fileIdx].size + 2 != unpackedSize) { uint8 *packedBuffer = (uint8 *)mallocAndZero(volumePtrToFileDescriptor[fileIdx].size + 2); loadPackedFileToMem(fileIdx, packedBuffer); //uint32 realUnpackedSize = READ_BE_UINT32(packedBuffer + volumePtrToFileDescriptor[fileIdx].size - 4); delphineUnpack(data, packedBuffer, volumePtrToFileDescriptor[fileIdx].size); MemFree(packedBuffer); } else { loadPackedFileToMem(fileIdx, data); } return data; } void PCSoundDriver::setUpdateCallback(UpdateCallback upCb, void *ref) { _upCb = upCb; _upRef = ref; } void PCSoundDriver::findNote(int freq, int *note, int *oct) const { *note = _noteTableCount - 1; for (int i = 0; i < _noteTableCount; ++i) { if (_noteTable[i] <= freq) { *note = i; break; } } *oct = *note / 12; *note %= 12; } void PCSoundDriver::resetChannel(int channel) { stopChannel(channel); stopAll(); } void PCSoundDriver::syncSounds() { bool mute = false; if (ConfMan.hasKey("mute")) mute = ConfMan.getBool("mute"); bool music_mute = mute; bool sfx_mute = mute; if (!mute) { music_mute = ConfMan.getBool("music_mute"); sfx_mute = ConfMan.getBool("sfx_mute"); } // Get the new music and sfx volumes _musicVolume = music_mute ? 0 : MIN(255, ConfMan.getInt("music_volume")); _sfxVolume = sfx_mute ? 0 : MIN(255, ConfMan.getInt("sfx_volume")); } AdLibSoundDriver::AdLibSoundDriver(Audio::Mixer *mixer) : _mixer(mixer) { _opl = OPL::Config::create(); if (!_opl || !_opl->init()) error("Failed to create OPL"); for (int i = 0; i < 5; ++i) { _channelsVolumeTable[i].original = 0; _channelsVolumeTable[i].adjusted = 0; } memset(_instrumentsTable, 0, sizeof(_instrumentsTable)); initCard(); _musicVolume = ConfMan.getBool("music_mute") ? 0 : MIN(255, ConfMan.getInt("music_volume")); _sfxVolume = ConfMan.getBool("sfx_mute") ? 0 : MIN(255, ConfMan.getInt("sfx_volume")); _opl->start(new Common::Functor0Mem<void, AdLibSoundDriver>(this, &AdLibSoundDriver::onTimer), 50); } AdLibSoundDriver::~AdLibSoundDriver() { delete _opl; } void AdLibSoundDriver::syncSounds() { PCSoundDriver::syncSounds(); // Force all instruments to reload on the next playing point for (int i = 0; i < 5; ++i) { adjustVolume(i, _channelsVolumeTable[i].original); AdLibSoundInstrument *ins = &_instrumentsTable[i]; setupInstrument(ins, i); } } void AdLibSoundDriver::adjustVolume(int channel, int volume) { _channelsVolumeTable[channel].original = volume; volume = CLIP(volume, 0, 80); volume += volume / 4; // The higher possible value for volume is 100 int volAdjust = (channel == 4) ? _sfxVolume : _musicVolume; volume = (volume * volAdjust) / 128; if (volume > 127) volume = 127; _channelsVolumeTable[channel].adjusted = volume; } void AdLibSoundDriver::setupChannel(int channel, const byte *data, int instrument, int volume) { assert(channel < 5); if (data) { adjustVolume(channel, volume); setupInstrument(data, channel); } } void AdLibSoundDriver::stopChannel(int channel) { assert(channel < 5); AdLibSoundInstrument *ins = &_instrumentsTable[channel]; if (ins->mode != 0 && ins->channel == 6) { channel = 6; } if (ins->mode == 0 || channel == 6) { _opl->writeReg(0xB0 | channel, 0); } if (ins->mode != 0) { _vibrato &= ~(1 << (10 - ins->channel)); _opl->writeReg(0xBD, _vibrato); } } void AdLibSoundDriver::stopAll() { for (int i = 0; i < 18; ++i) _opl->writeReg(0x40 | _operatorsTable[i], 63); for (int i = 0; i < 9; ++i) _opl->writeReg(0xB0 | i, 0); _opl->writeReg(0xBD, 0); } void AdLibSoundDriver::initCard() { _vibrato = 0x20; _opl->writeReg(0xBD, _vibrato); _opl->writeReg(0x08, 0x40); static const int oplRegs[] = { 0x40, 0x60, 0x80, 0x20, 0xE0 }; for (int i = 0; i < 9; ++i) { _opl->writeReg(0xB0 | i, 0); } for (int i = 0; i < 9; ++i) { _opl->writeReg(0xC0 | i, 0); } for (int j = 0; j < 5; j++) { for (int i = 0; i < 18; ++i) { _opl->writeReg(oplRegs[j] | _operatorsTable[i], 0); } } _opl->writeReg(1, 0x20); _opl->writeReg(1, 0); } void AdLibSoundDriver::onTimer() { if (_upCb) { (*_upCb)(_upRef); } } void AdLibSoundDriver::setupInstrument(const byte *data, int channel) { assert(channel < 5); AdLibSoundInstrument *ins = &_instrumentsTable[channel]; loadInstrument(data, ins); setupInstrument(ins, channel); } void AdLibSoundDriver::setupInstrument(const AdLibSoundInstrument *ins, int channel) { int mod, car, tmp; const AdLibRegisterSoundInstrument *reg; if (ins->mode != 0) { mod = _operatorsTable[_voiceOperatorsTable[2 * ins->channel + 0]]; car = _operatorsTable[_voiceOperatorsTable[2 * ins->channel + 1]]; } else { mod = _operatorsTable[_voiceOperatorsTable[2 * channel + 0]]; car = _operatorsTable[_voiceOperatorsTable[2 * channel + 1]]; } if (ins->mode == 0 || ins->channel == 6) { reg = &ins->regMod; _opl->writeReg(0x20 | mod, reg->vibrato); if (reg->freqMod) { tmp = reg->outputLevel & 0x3F; } else { tmp = (63 - (reg->outputLevel & 0x3F)) * _channelsVolumeTable[channel].adjusted; tmp = 63 - (2 * tmp + 127) / (2 * 127); } _opl->writeReg(0x40 | mod, tmp | (reg->keyScaling << 6)); _opl->writeReg(0x60 | mod, reg->attackDecay); _opl->writeReg(0x80 | mod, reg->sustainRelease); if (ins->mode != 0) { _opl->writeReg(0xC0 | ins->channel, reg->feedbackStrength); } else { _opl->writeReg(0xC0 | channel, reg->feedbackStrength); } _opl->writeReg(0xE0 | mod, ins->waveSelectMod); } reg = &ins->regCar; _opl->writeReg(0x20 | car, reg->vibrato); tmp = (63 - (reg->outputLevel & 0x3F)) * _channelsVolumeTable[channel].adjusted; tmp = 63 - (2 * tmp + 127) / (2 * 127); _opl->writeReg(0x40 | car, tmp | (reg->keyScaling << 6)); _opl->writeReg(0x60 | car, reg->attackDecay); _opl->writeReg(0x80 | car, reg->sustainRelease); _opl->writeReg(0xE0 | car, ins->waveSelectCar); } void AdLibSoundDriver::loadRegisterInstrument(const byte *data, AdLibRegisterSoundInstrument *reg) { reg->vibrato = 0; if (READ_LE_UINT16(data + 18)) { // amplitude vibrato reg->vibrato |= 0x80; } if (READ_LE_UINT16(data + 20)) { // frequency vibrato reg->vibrato |= 0x40; } if (READ_LE_UINT16(data + 10)) { // sustaining sound reg->vibrato |= 0x20; } if (READ_LE_UINT16(data + 22)) { // envelope scaling reg->vibrato |= 0x10; } reg->vibrato |= READ_LE_UINT16(data + 2) & 0xF; // frequency multiplier reg->attackDecay = READ_LE_UINT16(data + 6) << 4; // attack rate reg->attackDecay |= READ_LE_UINT16(data + 12) & 0xF; // decay rate reg->sustainRelease = READ_LE_UINT16(data + 8) << 4; // sustain level reg->sustainRelease |= READ_LE_UINT16(data + 14) & 0xF; // release rate reg->feedbackStrength = READ_LE_UINT16(data + 4) << 1; // feedback if (READ_LE_UINT16(data + 24) == 0) { // frequency modulation reg->feedbackStrength |= 1; } reg->keyScaling = READ_LE_UINT16(data); reg->outputLevel = READ_LE_UINT16(data + 16); reg->freqMod = READ_LE_UINT16(data + 24); } void AdLibSoundDriverADL::loadInstrument(const byte *data, AdLibSoundInstrument *asi) { asi->mode = *data++; asi->channel = *data++; asi->waveSelectMod = *data++ & 3; asi->waveSelectCar = *data++ & 3; asi->amDepth = *data++; ++data; loadRegisterInstrument(data, &asi->regMod); data += 26; loadRegisterInstrument(data, &asi->regCar); data += 26; } void AdLibSoundDriverADL::setChannelFrequency(int channel, int frequency) { assert(channel < 5); AdLibSoundInstrument *ins = &_instrumentsTable[channel]; if (ins->mode != 0) { channel = ins->channel; if (channel == 9) { channel = 8; } else if (channel == 10) { channel = 7; } } int freq, note, oct; findNote(frequency, &note, &oct); note += oct * 12; if (ins->amDepth) { note = ins->amDepth; } if (note < 0) { note = 0; } freq = _freqTable[note % 12]; _opl->writeReg(0xA0 | channel, freq); freq = ((note / 12) << 2) | ((freq & 0x300) >> 8); if (ins->mode == 0) { freq |= 0x20; } _opl->writeReg(0xB0 | channel, freq); if (ins->mode != 0) { _vibrato |= 1 << (10 - channel); _opl->writeReg(0xBD, _vibrato); } } void AdLibSoundDriverADL::playSample(const byte *data, int size, int channel, int volume) { assert(channel < 5); adjustVolume(channel, 127); setupInstrument(data, channel); AdLibSoundInstrument *ins = &_instrumentsTable[channel]; if (ins->mode != 0 && ins->channel == 6) { _opl->writeReg(0xB0 | channel, 0); } if (ins->mode != 0) { _vibrato &= ~(1 << (10 - ins->channel)); _opl->writeReg(0xBD, _vibrato); } if (ins->mode != 0) { channel = ins->channel; if (channel == 9) { channel = 8; } else if (channel == 10) { channel = 7; } } uint16 note = 48; if (ins->amDepth) { note = ins->amDepth; } int freq = _freqTable[note % 12]; _opl->writeReg(0xA0 | channel, freq); freq = ((note / 12) << 2) | ((freq & 0x300) >> 8); if (ins->mode == 0) { freq |= 0x20; } _opl->writeReg(0xB0 | channel, freq); if (ins->mode != 0) { _vibrato |= 1 << (10 - channel); _opl->writeReg(0xBD, _vibrato); } } PCSoundFxPlayer::PCSoundFxPlayer(PCSoundDriver *driver) : _playing(false), _songPlayed(false), _driver(driver) { memset(_instrumentsData, 0, sizeof(_instrumentsData)); _sfxData = NULL; _fadeOutCounter = 0; _driver->setUpdateCallback(updateCallback, this); _currentPos = 0; _currentOrder = 0; _numOrders = 0; _eventsDelay = 0; _looping = false; _updateTicksCounter = 0; } PCSoundFxPlayer::~PCSoundFxPlayer() { _driver->setUpdateCallback(NULL, NULL); stop(); } bool PCSoundFxPlayer::load(const char *song) { debug(9, "PCSoundFxPlayer::load('%s')", song); /* stop (w/ fade out) the previous song */ while (_fadeOutCounter != 0 && _fadeOutCounter < 100) { g_system->delayMillis(50); } _fadeOutCounter = 0; if (_playing) { stop(); } Common::strlcpy(_musicName, song, sizeof(_musicName)); _songPlayed = false; _looping = false; _sfxData = readBundleSoundFile(song); if (!_sfxData) { warning("Unable to load soundfx module '%s'", song); return 0; } for (int i = 0; i < NUM_INSTRUMENTS; ++i) { _instrumentsData[i] = NULL; char instrument[64]; memset(instrument, 0, 64); // Clear the data first memcpy(instrument, _sfxData + 20 + i * 30, 12); instrument[63] = '\0'; if (strlen(instrument) != 0) { char *dot = strrchr(instrument, '.'); if (dot) { *dot = '\0'; } Common::strlcat(instrument, _driver->getInstrumentExtension(), sizeof(instrument)); _instrumentsData[i] = readBundleSoundFile(instrument); if (!_instrumentsData[i]) { warning("Unable to load soundfx instrument '%s'", instrument); } } } return 1; } void PCSoundFxPlayer::play() { debug(9, "PCSoundFxPlayer::play()"); if (_sfxData) { for (int i = 0; i < NUM_CHANNELS; ++i) { _instrumentsChannelTable[i] = -1; } _currentPos = 0; _currentOrder = 0; _numOrders = _sfxData[470]; _eventsDelay = (244 - _sfxData[471]) * 100 / 1060; _updateTicksCounter = 0; _playing = true; } } void PCSoundFxPlayer::stop() { if (_playing || _fadeOutCounter != 0) { _fadeOutCounter = 0; _playing = false; for (int i = 0; i < NUM_CHANNELS; ++i) { _driver->stopChannel(i); } _driver->stopAll(); } unload(); } void PCSoundFxPlayer::fadeOut() { if (_playing) { _fadeOutCounter = 1; _playing = false; } } void PCSoundFxPlayer::updateCallback(void *ref) { ((PCSoundFxPlayer *)ref)->update(); } void PCSoundFxPlayer::update() { if (_playing || (_fadeOutCounter != 0 && _fadeOutCounter < 100)) { ++_updateTicksCounter; if (_updateTicksCounter > _eventsDelay) { handleEvents(); _updateTicksCounter = 0; } } } void PCSoundFxPlayer::handleEvents() { const byte *patternData = _sfxData + 600 + 1800; const byte *orderTable = _sfxData + 472; uint16 patternNum = orderTable[_currentOrder] * 1024; for (int i = 0; i < 4; ++i) { handlePattern(i, patternData + patternNum + _currentPos); patternData += 4; } if (_fadeOutCounter != 0 && _fadeOutCounter < 100) { _fadeOutCounter += 2; } if (_fadeOutCounter >= 100) { stop(); return; } _currentPos += 16; if (_currentPos >= 1024) { _currentPos = 0; ++_currentOrder; if (_currentOrder == _numOrders) { _currentOrder = 0; } } debug(7, "_currentOrder=%d/%d _currentPos=%d", _currentOrder, _numOrders, _currentPos); } void PCSoundFxPlayer::handlePattern(int channel, const byte *patternData) { int instrument = patternData[2] >> 4; if (instrument != 0) { --instrument; if (_instrumentsChannelTable[channel] != instrument || _fadeOutCounter != 0) { _instrumentsChannelTable[channel] = instrument; const int volume = _sfxData[instrument] - _fadeOutCounter; _driver->setupChannel(channel, _instrumentsData[instrument], instrument, volume); } } int16 freq = (int16)READ_BE_UINT16(patternData); if (freq > 0) { _driver->stopChannel(channel); _driver->setChannelFrequency(channel, freq); } } void PCSoundFxPlayer::unload() { for (int i = 0; i < NUM_INSTRUMENTS; ++i) { MemFree(_instrumentsData[i]); _instrumentsData[i] = NULL; } MemFree(_sfxData); _sfxData = NULL; _songPlayed = true; } void PCSoundFxPlayer::doSync(Common::Serializer &s) { s.syncBytes((byte *)_musicName, 33); uint16 v = (uint16)songLoaded(); s.syncAsSint16LE(v); if (s.isLoading() && v) { load(_musicName); for (int i = 0; i < NUM_CHANNELS; ++i) { _instrumentsChannelTable[i] = -1; } _numOrders = _sfxData[470]; _eventsDelay = (244 - _sfxData[471]) * 100 / 1060; _updateTicksCounter = 0; } s.syncAsSint16LE(_songPlayed); s.syncAsSint16LE(_looping); s.syncAsSint16LE(_currentPos); s.syncAsSint16LE(_currentOrder); s.syncAsSint16LE(_playing); } PCSound::PCSound(Audio::Mixer *mixer, CruiseEngine *vm) { _vm = vm; _mixer = mixer; _soundDriver = new AdLibSoundDriverADL(_mixer); _player = new PCSoundFxPlayer(_soundDriver); _genVolume = 0; } PCSound::~PCSound() { delete _player; delete _soundDriver; } void PCSound::loadMusic(const char *name) { debugC(5, kCruiseDebugSound, "PCSound::loadMusic('%s')", name); _player->load(name); } void PCSound::playMusic() { debugC(5, kCruiseDebugSound, "PCSound::playMusic()"); _player->play(); } void PCSound::stopMusic() { debugC(5, kCruiseDebugSound, "PCSound::stopMusic()"); _player->stop(); } void PCSound::removeMusic() { debugC(5, kCruiseDebugSound, "PCSound::removeMusic()"); _player->unload(); } void PCSound::fadeOutMusic() { debugC(5, kCruiseDebugSound, "PCSound::fadeOutMusic()"); _player->fadeOut(); } void PCSound::playSound(const uint8 *data, int size, int volume) { debugC(5, kCruiseDebugSound, "PCSound::playSound() channel %d size %d", 4, size); _soundDriver->playSample(data, size, 4, volume); } void PCSound::stopSound(int channel) { debugC(5, kCruiseDebugSound, "PCSound::stopSound() channel %d", channel); _soundDriver->resetChannel(channel); } void PCSound::stopChannel(int channel) { debugC(5, kCruiseDebugSound, "PCSound::stopChannel() channel %d", channel); _soundDriver->stopChannel(channel); } bool PCSound::isPlaying() const { return _player->playing(); } bool PCSound::songLoaded() const { return _player->songLoaded(); } bool PCSound::songPlayed() const { return _player->songPlayed(); } void PCSound::fadeSong() { _player->fadeOut(); } uint8 PCSound::numOrders() const { return _player->numOrders(); } void PCSound::setNumOrders(uint8 v) { _player->setNumOrders(v); } void PCSound::setPattern(int offset, uint8 value) { _player->setPattern(offset, value); } bool PCSound::musicLooping() const { return _player->looping(); } void PCSound::musicLoop(bool v) { _player->setLooping(v); } void PCSound::startNote(int channel, int volume, int freq) { warning("TODO: startNote"); // _soundDriver->setVolume(channel, volume); _soundDriver->setChannelFrequency(channel, freq); } void PCSound::doSync(Common::Serializer &s) { _player->doSync(s); s.syncAsSint16LE(_genVolume); } const char *PCSound::musicName() { return _player->musicName(); } void PCSound::syncSounds() { _soundDriver->syncSounds(); } } // End of namespace Cruise
utf-8
1
GPL-2+
2001-2021 The ScummVM Project The ScummVM Team 2002-2011 The DOSBox Team 1994-1998 Revolution Software Ltd. 2001-2004 Andrea Mazzoleni 2003-2005 Andreas 'Sprawl' Karlsso 2002-2008 Jurgen 'SumthinWicked' Braam 2003-2014 Lars 'AnotherGuest' Persso 2013-2020 Fedor Strizhniou 1990-2012 Neil Dodwell 1995-1997 Presto Studios, Inc. and others listed in COPYRIGHT file
qtwebengine-opensource-src-5.15.8+dfsg/src/3rdparty/chromium/components/services/app_service/public/cpp/protocol_handler_info.cc
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/services/app_service/public/cpp/protocol_handler_info.h" #include <ostream> namespace apps { ProtocolHandlerInfo::ProtocolHandlerInfo() = default; ProtocolHandlerInfo::ProtocolHandlerInfo(const ProtocolHandlerInfo& other) = default; ProtocolHandlerInfo::~ProtocolHandlerInfo() = default; bool operator==(const ProtocolHandlerInfo& handler1, const ProtocolHandlerInfo& handler2) { return handler1.protocol == handler2.protocol && handler1.url == handler2.url; } std::ostream& operator<<(std::ostream& out, const ProtocolHandlerInfo& handler) { return out << "protocol: " << handler.protocol << " url: " << handler.url; } } // namespace apps
utf-8
1
LGPL-3 or GPL-2
2006-2021 The Chromium Authors 2016-2021 The Qt Company Ltd.
calligra-3.2.1+dfsg/words/part/tests/TestClipToPage.cpp
#include "TestClipToPage.h" #include <KWDocument.h> #include <KWCanvas.h> #include <MockShapes.h> #include <KWPage.h> #include "MockPart.h" #include <QtTest> void TestClipToPage::testClipToPage() { KWDocument doc(new MockPart); KWPage page1 = doc.appendPage("Standard"); KoPageLayout layout = page1.pageStyle().pageLayout(); layout.width = 300; layout.height = 410; page1.pageStyle().setPageLayout(layout); KWCanvas canvas("bla", &doc, 0, 0); MockShape shape; shape.setPosition(QPointF(50, 50)); shape.setSize(QSizeF(100, 100)); QPointF distance(0, 0); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(0, 0)); distance = QPointF(-200, -500); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(-145, -145)); distance = QPointF(1000, 2000); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(245, 355)); distance = QPointF(50, 50); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(50, 50)); // test when we start outside the page shape.setPosition(QPointF(-200, -100)); distance = QPointF(0, 0); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(105, 5)); distance = QPointF(120, 120); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(120, 120)); shape.setPosition(QPointF(400, 200)); distance = QPointF(0, 0); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(-105, 0)); distance = QPointF(-110, -50); canvas.clipToDocument(&shape, distance); QCOMPARE(distance, QPointF(-110, -50)); } QTEST_MAIN(TestClipToPage)
utf-8
1
GPL-2+__and__LGPL-2+__and__LGPL-2.1__and__LGPL-2.1+
2012 <hanna.et.scott@gmail.com> 2007 <hubipete@gmx.net> 2006-2007 Aaron J. Seigo <aseigo@kde.org> 2010 Adam Celarek <kdedev@xibo.at> 2014 Adam Pigg 2003-2012,2014 Adam Pigg <adam@piggz.co.uk> 1999 Adobe Systems Incorporated 2006 Adriaan de Groot <groot@kde.org> 2004-2007,2009-2010 Adrian Page <adrian@pagenet.plus.com> 2010 Ajay Pundhir <ajay.pratap@iiitb.net> 2002,2004 Alexander Dymo <cloudtemple@mskat.net> 2014 Alexander Potashev <aspotashev@gmail.com> 2009 Alexia Allanic <alexia_allanic@yahoo.fr> 2007 Alexis Ménard <darktears31@gmail.com> 2006-2009 Alfredo Beaumont Sainz <alfredo.beaumont@gmail.com> 2013 Aman Madaan <madaan.amanmadaan@gmail.com> 2010 Amit Aggarwal <amit.5.aggarwal@nokia.com> 2002 Anders Lund <anders@alweb.dk> 2003 Andras Mantia <amantia@kde.org> 2001 Andrea Rizzi <rizzi@kde.org> 2006 Andreas Hartmetz <ahartmetz@gmail.com> 2010 Ariya Hidayat <ariya.hidayat@gmail.com> 2002-2007 Ariya Hidayat <ariya@kde.org> 2002 Ariya Hidayat <ariyahidayat@yahoo.de> 2010-2014 Arjen Hiemstra <ahiemstra@heimr.nl> 2013 Arjen-Wander Hiemstra <aw.hiemstra@gmail.com> 2010 Artur Duque de Souza <asouzakde.org> 2011 Aurélien Gâteau <agateau@kde.org> 2004-2006,2008 Bart Coppens <kde@bartcoppens.be> 1994 Basil K. Malyshev 2011-2012 Ben Martin <monkeyiq@users.sourceforge.net> 2009-2010 Benjamin Port <port.benjamin@gmail.com> 2008 Benoit Jacob <jacob.benoit.1@gmail.com> 2002 Benoit Vautrin <benoit.vautrin@free.fr> 2001-2002 Benoît Vautrin <benoit.vautrin@free.fr> 2001 Bernd Gehrmann <bernd@kdevelop.org> 2000 Bernd Johannes Wuebben <wuebben@kde.org> 2000 Bernd Wuebben <wuebben@kde.org> 2002 Bo Thorsen <bo@sonofthor.dk> 1999 Boris Wedl <boris.wedl@kfunigraz.ac.at> 2011 Boudewijn Rempot <boud@valdyas.org> 2004,2009-2010 Boudewijn Rempt 2007-2008,2011,2014 Boudewijn Rempt <boud@kde.org> 2010-2012,2014 Boudewijn Rempt <boud@kogmbh.com> 2003-2015 Boudewijn Rempt <boud@valdyas.org> 2008 Boudewijn Rempt <boud@valdysa.org> 2004,2008,2010-2011 Brad Hards <bradh@frogmouth.net> 2006 Brad Hards <bradh@kde.org> 2005 Bram Schoenmakers <bramschoenmakers@kde.nl> 1998-2004 Brian Bruns 2011-2012 Brijesh Patel <brijesh3105@gmail.com> 2004-2006 Bulia Byak <buliabyak@users.sf.net> 2004-2015 C. Boemann <cbo@boemann.dk> 2010 C. Boemann <cbo@boemannn.dk> 2009-2012 C. Boemann <cbo@kogmbh.com> 2006 C. Boemann Rasmussen <cbo@boemann.dk> 2011 C. Boemann, KO GmbH <cbo@kogmbh.com> 2011-2013 C.Boemann <cbo@boemann.dk> 2001-2011 Calligra developers 2013 Camilla Boemann <cbo@boemann.dk> 2008-2009 Carlos Licea <carlos.licea@kdemail.net> 2007-2008 Carlos Licea <carlos.licea@kdemail.org> 2010-2011 Carlos Licea <carlos@kdab.com> 1999 Carsten Pfeiffer <pfeiffer@kde.org> 2006 Casper Boemann <cbr@boemann.dk> 2004-2005 Cedric Pasteur <cedric.pasteur@free.fr> 2010 Celarek Adam <kdedev@xibo.at> 2004 Christian Muehlhaeuser <chris@chris.de> 2006 Christian Mueller <cmueller@gmx.de> 2005 Christian Nitschkowski <segfault_ii@web.de> 2010 Christoph Cullmann <cullmann@kde.org> 2010 Christoph Goerlich <chgoerlich@gmx.de> 2007 Chusslove Illich <caslav.ilic@gmx.net> 2004 Clarence Dang <dang@k.org> 2003-2004 Clarence Dang <dang@kde.org> 2011 Cuong Le <metacuong@gmail.com> 2007 Cyrille Berger 2006-2008,2010 Cyrille Berger <cberger@cberger.bet> 2004-2011 Cyrille Berger <cberger@cberger.net> 2006-2009 Dag Andersen <calligra-devel@kde.org> 2002-2012 Dag Andersen <danders@get2net.dk> 2008 Dag Andersen <kplato@kde.org> 2011 Daker Fernandes Pinheiro <dakerfp@gmail.com> 2011-2014 Dan Leinir Turthra Jensen <admin@leinir.dk> 2000 Daniel A. Atkinson 2002 Daniel Herring <herring@eecs.ku.edu> 2003 Daniel Molkentin <molkentin@kde.org> 2001-2003 Daniel Naber <daniel.naber@t-online.de> 2012-2013 Daniel Nicoletti <dantti12@gmail.com> 1998-2007,2010-2011 David Faure <faure@kde.org> 2000-2002 David Faure <faure@kde.org> Werner Trobin <trobin@kde.org> 2013 David Revoy <info@davidrevoy.com> 2014 Denis Kuplaykov <dener.kup@gmail.com> 2014 Denis Kupluakov <dener.kup@gmail.com> 2014 Denis Kuplyakov <dener.kup@gmail.com> 2013 Digia Plc and/or its subsidiary(-ies) 2012 Dimitrios T. Tanis <dimitrios.tanis@kdemail.net> 2003,2006-2007 Dirk Mueller <mueller@kde.org> 2002 Dirk Schönberger <dirk.schoenberger@sz-online.de> 2009-2015 Dmitry Kazakov <dimula73@gmail.com> 2003 Dominik Seichter <domseichter@web.de> 1982-1989 Donald H. House <x@unknown.com> 2009-2010 Edward Apap <schumifer@hotmail.com> 1997 Eiichi Takamori <taka@ma1.seikyou.ne.jp> 2002 Ellis Whitehead <ellis@kde.org> 2009,2013 Elvis Stansvik <elvstone@gmail.com> 2009 Elvis Stansvik <elvstone@gmail.org> 2007-2008 Emanuele Tamponi <emanuele@valinor.it> 2000-2001 Enno Bartels <ebartels@nwn.de> 2007 Eric Lamarque <eric.lamarque@free.fr> 2001 Eva Brucherseifer <eva@kde.org> 2001 Ewald Snel <ewald@rambo.its.tudelft.nl> 1997 Federico Mena Quintero <federico@nuclecu.unam.mx> 2008 Fela Winkelmolen <fela.kde@gmail.com> 2007 Florian Piquemal <flotueur@yahoo.fr> 2006 Frans Englich <frans.englich@telia.com> 2002 Fred Malabre <fmalabre@yahoo.com> 2006-2007 Frederic BECQUIER <frederic.becquier@gmail.com> 2006 Frederic Coiffier <fcoiffie@gmail.com> 2000 Frederik Fouvry 2004-2006 Fredrik Edemar <f_edemar@linux.se> 2007 Fredrik Höglund <fredrik@kde.org> 2007-2009 Fredy Yanardi <fyanardi@gmail.com> 2008 Fredy Yanardi <fyanardi@kde.org> 1989,1991,1999 Free Software Foundation, Inc 2005 Frerich Raabe <raabe@kde.org> 2004-2007 Fridrich Strba <fridrich.strba@bluewin.ch> 2013 Friedrich W. H. Kossebau <friedrich@kogmbh.com> 2012-2013,2015 Friedrich W. H. Kossebau <kossebau@kde.org> 2005-2006 Gabor Lehel <illissius@gmail.com> 2009,2011 Ganesh Paramasivam <ganesh@crystalfab.com> 2006 Gary Cramblitt <garycramblitt@comcast.net> 2010-2011 Geoffry Song <goffrie@gmail.com> 2006-2013 Gilles Caulier <caulier.gilles@gmail.com> 2008 Girish Ramakrishnan <girish@forwardbias.in> 2010-2013 Gopalakrishna Bhat A <gopalakbhat@gmail.com> 2001 Graham Short <grahshrt@netscape.net> 2001 Graham Short. <grahshrt@netscape.net> 2002 GraphicsMagick Group 2006 Gábor Lehel <illissius@gmail.com> 2003,2005-2006 Hamish Rodda <rodda@kde.org> 2011 Hanna Skott <hannaetscott@gmail.com> 2008 Hans Bakker <hansmbakker@gmail.com> 1999-2003 Hans Petter Bieker <bieker@kde.org> 1999-2002 Harri Porten <porten@kde.org> 2001 Holger Freyther <freyther@kde.org> 2009-2010 Hugo Pereira Da Costa <hugo@oxygen-icons.org> 2008-2009 Hyves (Startphone Ltd.) 2006 ISaac Clerencia <isaac@warp.es> 2004 Ignacio Castaño <castano@ludicon.com> 2002 Igor Jansen <rm@kde.org> 2009 Ilya Portnov 2004-2014 Inge Wallin <inge@lysator.liu.se> 2007 Inge Wallin <ingwa@kde.org> 2011 Inge Wallin <ingwa@kogmbh.com> 2009 Inge Wallin <ingwa@lysator.liu.se> 2009 Inge wallin <inge@lysator.liu.se> 2006 Isaac Clerencia <isaac@warp.es> 2006 Jaison Lee <lee.jaison@gmail.com> 2008 James Hogan <james@albanarts.com> 2005-2012 Jan Hambrecht <jaham@gmx.net> 2003-2015 Jarosław Staniek <staniek@kde.org> 2003-2007 Jarosław Staniek <staniek@kde.org>/ OpenOffice Software 2003-2007 Jarosław Staniek @ OpenOffice Polska 2001 Jarosław Staniek, MIMUW (www.mimuw.edu.pl) 2010 Jean Nicolas Artaud <jean.nicolas.artaud@kogmbh.com> 2009-2013 Jean-Nicolas Artaud <jeannicolasartaud@gmail.com> 1997-2000 Jens Lautenbacher <jtl@gimp.org> 2009 Jens-Michael Hoffmann <jensmh@gmx.de> 2009 Jeremias Epperlein 2009 Jeremias Epperlein <jeeree@web.de> 2013 Jeremy Bourdiol <jerem.dante@gmail.com> 2010 Jeremy Lugagne <lugagne.jeremy@gmail.com> 2012-2013 Jigar Raisinghani <jigarraisinghani@gmail.com> 2011 Jignesh Kakadiya <jigneshhk1992@gmail.com> 2008 Jim Courtiau <jeremy.courtiau@gmail.com> 2009 Johann Hingue <yoan1703@hotmail.fr> 2005 Johannes Schaub <johannes.schaub@kdemail.net> 2005 Johannes Schaub <litb_devel@web.de> 2008 Johannes Simon <inge@lysator.liu.se> 2007-2010 Johannes Simon <johannes.simon@gmail.com> 2000-2001 John Califf <jcaliff@compuzone.net> 2000 John Califf <jcaliff@comuzone.net> 2000 John Califf <jwcaliff@compuzone.net> 2002-2003 John Dailey <dailey@vt.edu> 2007 John Marshall 2009-2010,2013 Jos van den Oever <jos@vandenoever.info> 2003 Joseph <Wenninger <jowenn@kde.org> 2002-2003 Joseph Wenninger <jowenn@kde.org> 2004-2006 Josh Andler <scislac@users.sf.net> 2010-2012 José Luis Vergara <pentalis@gmail.com> 2010-2011 José Luis Vergara Toloza <pentalis@gmail.com> 2011 Juan Aquino <utcl95@gmail.com> 2013 Juan Palacios <jpalaciosdev@gmail.com> 2010 Justin Noel <justin@ics.com> 2009 Jérémy Lugagne <jejewindsurf@hotmail.com> 2010 KO GmbH <ben.martin@kogmbh.com> 2010 KO GmbH <boud@kogbmh.com> 2010 KO GmbH <boud@kogmbh.com> 2009-2013 KO GmbH <cbo@kogmbh.com> 2009-2010 KO GmbH <jos.van.den.oever@kogmbh.com> 2012 KO GmbH. Contact: Boudewijn Rempt <boud@kogmbh.com> 2010 KO Gmbh <boud@kogmbh.com> 2010-2011 KO Gmbh <cbo@kogmbh.com> 1998-2004 KSpread Team <calligra-devel@kde.org> 1992 Karl Berry <karl@cs.umb.edu> 1992 Kathryn Hargreaves <letters@cs.umb.edu> 2007 Kenneth P. Esler, Jr 2003-2010 Kexi Team 2003-2012 Kexi Team <kexi@kde.org> 2011 Ko GmbH <cbo@kogmbh.com> 2010-2011 Ko Gmbh <cbo@kogmbh.com> 2000 Kurt Granroth <granroth@kde.org> 2002 Lars Siebold <khandha5@gmx.net> 2014 Lassi Nieminen <lassniem@gmail.com> 2002-2003 Laurent Montel <lmontel@mandrakesoft.com> 1999-2006 Laurent Montel <montel@kde.org> 2001-2003 Lennart Kudling <kudling@kde.org> 2008 Long Huynh Huu <long.upcase@googlemail.com> 2002-2003 Lucijan Busch <lucijan@gmx.at> 2003-2004 Lucijan Busch <lucijan@kde.org> 2002-2003 Lukas Tinkl <lukas.tinkl@suse.cz> 2002-2003 Lukas Tinkl <lukas@kde.org> 2006,2008-2009 Lukas Tvrdy <lukast.dev@gmail.com> 2013 Luke De Mouy <lukewolf101010devel@gmail.com> 2003 Lukáš Tinkl <lukas@kde.org> 2009-2011 Lukáš Tvrdý <LukasT.dev@gmail.com> 2011 Lukáš Tvrdý <lukas.tvrdy@ixonos.com> 2006,2008-2015 Lukáš Tvrdý <lukast.dev@gmail.com> 1998-2002 Maksym Polyakov 2014 Manuel Riecke <spell1337@gmail.com> 2010 Marc Pegon <pe.marc@free.fr> 2006-2009 Marco Gulino <marco.gulino@gmail.com> 2006-2012 Marijn Kruisselbrink <mkruisselbrink@kde.org> 2008-2009 Mark Kretschmann <kretschmann@kde.org> 1998-2001 Marti Maria 2005 Martin Ellis <kde@martinellis.co.uk> 2004 Martin Ellis <m.a.ellis@ncl.ac.uk> 2004-2006 Martin Ellis <martin.ellis@kdemail.net> 2006-2008 Martin Pfeiffer <hubipete@gmx.net> 2008 Martin Renold <martinxyz@gmx.ch> 2009 Matthew Woehlke 2007,2009 Matthew Woehlke <mw_triad@users.sourceforge.net> 1999-2000 Matthias Elter <elter@kde.org> 1999-2000 Matthias Elter <me@kde.org> 1997-1999 Matthias Kalle Dalheimer <kalle@kde.org> 2007 Matthias Kretz <kretz@kde.org> 2011 Matus Hanzes <matus.hanzes@ixonos.com> 2010 Matus Talcik <matus.talcik@gmail.com> 2011-2012 Matus Uzak <matus.uzak@gmail.com> 2010-2012 Matus Uzak <matus.uzak@ixonos.com> 2004-2005 Max Howell <max.howell@methylblue.com> 2006-2007 Menard Alexis <danders@get2net.dk> 2003-2005 Meni Livne <livne@kde.org> 2015 Michael Abrahams <miabraha@gmail.com> 2000 Michael Johnson <mikej@xnet.com> 1999-2000 Michael Koch <koch@kde.org> 1999 Michael Reiher <michael.reiher@gmx.de> 2005 Michael Thaler 2004 Michael Thaler <michael <Thaler@physik.tu-muenchen.de> 2004-2006 Michael Thaler <michael.thaler@physik.tu-muenchen.de> 2004 Michael Thaler <michael.thaler@physik.tu-muenchen.de> filters 2014 Michał Poteralski <michalpoteralskikde@gmail.com> 1998-2001 Mirko Boehm 1998-2001 Mirko Boehm <mirko@kde.org> 2012-2013 Mohammed Nafees <nafees.technocool@gmail.com> 2012,2014 Mohit Goyal <mohit.bits2011@gmail.com> 2011-2013 Mojtaba Shahi Senobari <mojtaba.shahi3000@gmail.com> 2001-2003,2006 Montel Laurent <lmontel@mandrakesoft.com> 2004,2007 Montel Laurent <montel@kde.org> 2015 Moritz Molch <kde@moritzmolch.de> 2010 Nandita Suri <suri.nandita@gmail.com> 2001-2004 Nicolas GOUTTE <goutte@kde.org> 2004 Nicolas Goutte <goutte@kde.org> 2002-2004 Nicolas Goutte <nicolasg@snafu.de> 2000 Nicolas Hadacek <haadcek@kde.org> 2010 Nokia 2009-2011 Nokia Corporation and/or its subsidiary(-ies) 2010 Nokia, Matus Hanzes 2003 Norbert <Andres <nandres@web.de> 2000,2002-2003 Norbert Andres <nandres@web.de> 2012-2013 Oleg Kukharchuk <oleg.kuh@gmail.com> 2012 Oleg Kukharchuk <oleg.kuh@gmail.org> 2001-2012 OpenMFG, LLC 2001-2007 OpenMFG, LLC <info@openmfg.com> 2003 OpenOffice Polska 2003-2006 OpenOffice Software 2005 Packwood Software 2002-2003 Patrick Julien <freak@codepimps.org> 2008 Patrick Spendrin <ps_ml@gmx.de> 2011-2012 Paul Mendez <paulestebanms@gmail.com> 2011 Pavol Korinek <pavol.korinek@ixonos.com> 2008 Peter Penz <peter.penz19@gmail.com> 2006-2009 Peter Simonsson <peter.simonsson@gmail.com> 2002-2006 Peter Simonsson <psn@linux.se> 2012 Philip Van Hoof <philip@codeminded.be> 2001-2003 Philipp Mueller <philipp.mueller@gmx.de> 2003 Philipp Müller <philipp.mueller@gmx.de> 2002 Phillip Mueller <philipp.mueller@gmx.de> 2007-2011 Pierre Ducroquet <pinaraf@gmail.com> 2007-2011 Pierre Ducroquet <pinaraf@pinaraf.info> 2008 Pierre Stirnweiss <\pierre.stirnweiss_calligra@gadz.org> 2010 Pierre Stirnweiss <\pstirnweiss@googlemail.com> 2008 Pierre Stirnweiss <pierre.stirnweiss_calligra@gadz.org> 2008-2012 Pierre Stirnweiss <pstirnweiss@googlemail.com> 2008,2011-2012 Pierre Stirnweiss <pstirnweiss@googlemail.org> 1999 Preston Brown <pbrown@kde.org> 2011-2015 Radoslaw Wicik <radoslaw@wicik.pl> 2011 Radoslaw Wicik <rockford@wicik.pl> 2011,2015 Radosław Wicik <radoslaw@wicik.pl> 2005-2006 Raphael Langerhorst <raphael.langerhorst@kdemail.net> 2009 Red Hat, Inc 1998-2001 Reginald Stadlbauer <reggie@kde.org> 2003 Reinhart Geiser <geiseri@kde.org> 2010 Ricardo Cabello <hello@mrdoob.com> 2001-2008 Rob Buis <buis@kde.org> 1989 Robert Allen <x@unknown.com> 2000-2003 Robert JACOLIN 2000,2002-2003 Robert JACOLIN <rjacolin@ifrance.com> 2006 Robert Knight <robertknight@gmail.com> 2011 Robert Mathias Marmorstein <robert@narnia.homeunix.com> 2014 Roman Shtemberko <shtemberko@gmail.com> 2008 Roopesh Chander <roop@forwardbias.in> 2000 S.R.Haque <shaheedhaque@hotmail.com> 2013 Sahil Nagpal <nagpal.sahil01@gmail.com> 2013 Sahil Nagpal <nagpal.sahil@gmail.com> 2013 Sascha Suelzer <s.suelzer@gmail.com> 2004-2006 Seb Ruiz <ruiz@kde.org> 2011 Sebastian Kügler <sebas@kde.org> 2004-2011 Sebastian Sauer <mail@dipe.org> 2011 Sebastian Sauer <sebastian.sauer@kdab.com> 2009-2011 Sebastian Sauer <sebsauer@kdab.com> 2011-2012 Shantanu Tushar <shaan7in@gmail.com> 2013 Shantanu Tushar <shantanu@kde.org> 2006-2008 Sharan Rao <sharanrao@gmail.com> 2012 Shreya <Pandit <shreya@shreyapandit.com> 2011-2012 Shreya Pandit <shreya@shreyapandit.com> 2011 Siddharth Sharma <siddharth.kde@gmail.com> 2008,2011-2012 Silvio Heinrich <plassy@web.de> 2011 Silvio Heinrich <plassyqweb.de> 1999-2001,2006 Simon Hausmann <hausmann@kde.org> 2002 Simon MacMullen <calligra@babysimon.co.uk> 1998-2002 Skåne Sjælland Linux User Group <bestyrelsen@sslug.dk> 2011 Smit Patel <smitpatel24@gmail.com> 2015 Soma Schliszka <soma.schliszka@gmail.com> 2013 Somsubhra Bairi <somsubhra.bairi@gmail.com> 2011 Srikanth Tiyyagura <srikanth.tulasiram@gmail.com> 2003 Stefan Hetzl <shetzl@chello.at> 2005-2010 Stefan Nikolaus <stefan.nikolaus@kdemail.net> 1998 Stefan Taferner 2015 Stefano Bonicatti <smjert@gmail.com> 2002-2006 Stephan Binner <binner@kde.org> 1998-2000,2002-2003,2006-2007 Stephan Kulow <coolo@kde.org> 2011 Stuart Dickson <stuart@furkinfantasic.net> 2011 Stuart Dickson <stuart@furkinfantastic.net> 2014 Stuart Dickson <stuartmd@kogmbh.com> 2012 Sujith H <sujith.h@gmail.com> 2011-2013 Sujith Haridasan <sujith.h@gmail.com> 2012 Sujith Haridasan <sujith.haridasan@kdemail.net> 2000 Sun Microsystems, Inc 2010,2012 Sven Langkamp 2004-2014 Sven Langkamp <sven.langkamp@gmail.com> 2005-2006 Sven Lüppken <sven@kde.org> 2000 Sven Neumann <sven@gimp.org> 2006 Thomas Braxton <brax108@cox.net> 2002 Thomas Franke and Andreas Pietzowski <andreas@pietzowski.de> 2003 Thomas Nagy <tnagyemail-mail@yahoo.fr> 2006 Thomas Schaap <thomas.schaap@kdemail.net> 2001-2002,2005-2011 Thomas Zander <zander@kde.org> 2001,2007,2009 Thomas zander <zander@kde.org> 2007 Thorsten Zach3n <zachmann@kde.org> 2006-2008,2010-2011 Thorsten Zachmann <t.zachmann@zagge.de> 2011 Thorsten Zachmann <zachmann@kde.com> 2007 Thorsten Zachmann <zachmann@kde.okde.org> 2006-2013,2015 Thorsten Zachmann <zachmann@kde.org> 2002 Till Busch <till@bux.at> 2005-2006 Tim Beaulen <tbscope@gmail.com> 2005 Tim Beaulen <tbscope@gmail.org> 1997 Tim D. Gilman 1997 Tim D. Gilman <tdgilman@best.org> 2008 Timothe Lacroix <dakeyras.khan@gmail.com> 2007 Timothee Lacroix <dakeyras.khan@gmail.com> 2008 Timothée Lacroix <dakeyras.khan@gmail.com> 2005 Tom Albers <tomalbers@kde.nl> 2004-2007 Tomas Mecir <mecirt@gmail.com> 2001 Tomasz Grobelny <grotk@poczta.onet.pl> 2002-2003,2005 Tomislav Lukman <tomislav.lukman@ck.t-com.hr> 1998-2000 Torben Weis <weis@kde.org> 2011 Torio Mlshi <mlshi@lavabit.com> 2007 Torsten Rahn <tackat@kde.org> 2003 Ulrich Kuettler <ulrich.kuettler@gmx.de> 2001 Ulrich Kuettler <ulrich.kuettler@mailbox.tu-dresden.de> 2010 Valek Filippov <frob@gnome.org> 2014 Victor Lafon <metabolic.ewilan@hotmail.fr> 2010 Vidhyapria Arunkumar <vidhyapria.arunkumar@nokia.com> 2010 Vidhyapria arunkumar <vidhyapria.arunkumar@nokia.com> 2004 Waldo Bastian <bastian@kde.org> 2000-2002 Werner Trobin <trobin@kde.org> 2000 Wilco Greven <greven@kde.org> 2002-2004 William Lachance <wrlach@gmail.com> 2014 Wojciech Kosowicz <pcellix@gmail.com> 2014 Wolthera van Hövell <griffinvalley@gmail.com> 2014-2015 Wolthera van Hövell tot Westerflier <griffinvalley@gmail.com> 2005 Yann Bodson <yann.bodson@online.fr> 2009 Yannick Motta <yannick.motta@gmail.com> 2005 Yolla Indria <yolla.indria@gmail.com> 2010-2014 Yue Liu <yue.liu@mail.com> 2002 patrick julien <freak@codepimps.org> 1999-2007 the KSpread Team <calligra-devel@kde.org> 2001-2013 the Karbon Developers 2003 the Kexi Team 2002 the calligra team <calligra@kde.org> 2002-2003 thierry lorthiois
kwin-5.23.5/src/kcmkwin/kwindesktop/animationsmodel.h
/* KWin - the KDE window manager This file is part of the KDE project. SPDX-FileCopyrightText: 2018 Vlad Zahorodnii <vlad.zahorodnii@kde.org> SPDX-License-Identifier: GPL-2.0-or-later */ #pragma once #include "effectsmodel.h" namespace KWin { class AnimationsModel : public EffectsModel { Q_OBJECT Q_PROPERTY(bool animationEnabled READ animationEnabled WRITE setAnimationEnabled NOTIFY animationEnabledChanged) Q_PROPERTY(int animationIndex READ animationIndex WRITE setAnimationIndex NOTIFY animationIndexChanged) Q_PROPERTY(bool currentConfigurable READ currentConfigurable NOTIFY currentConfigurableChanged) Q_PROPERTY(bool defaultAnimationEnabled READ defaultAnimationEnabled NOTIFY defaultAnimationEnabledChanged) Q_PROPERTY(int defaultAnimationIndex READ defaultAnimationIndex NOTIFY defaultAnimationIndexChanged) public: explicit AnimationsModel(QObject *parent = nullptr); bool animationEnabled() const; void setAnimationEnabled(bool enabled); int animationIndex() const; void setAnimationIndex(int index); bool currentConfigurable() const; bool defaultAnimationEnabled() const; int defaultAnimationIndex() const; void load(); void save(); void defaults(); bool isDefaults() const; bool needsSave() const; Q_SIGNALS: void animationEnabledChanged(); void animationIndexChanged(); void currentConfigurableChanged(); void defaultAnimationEnabledChanged(); void defaultAnimationIndexChanged(); protected: bool shouldStore(const EffectData &data) const override; private: Status status(int row) const; void loadDefaults(); bool modelAnimationEnabled() const; int modelAnimationIndex() const; bool m_animationEnabled = false; bool m_defaultAnimationEnabled = false; int m_animationIndex = -1; int m_defaultAnimationIndex = -1; bool m_currentConfigurable = false; Q_DISABLE_COPY(AnimationsModel) }; }
utf-8
1
GPL-2+
2010, Alexandre Pereira <pereira.alex@gmail.com> 2019-2020, Aleix Pol Gonzalez <aleixpol@kde.org> 2018, Alex Nemeth <alex.nemeth329@gmail.com> 2010, Alexandre Pereira <pereira.alex@gmail.com> 2013, Antonis Tsiapaliokas <kok3rs@gmail.com> 2011, Arthur Arlt <a.arlt@stud.uni-heidelberg.de> 2020, Benjamin Port <benjamin.port@enioka.com> 2020, Carson Black <uhhadd@gmail.com> 2007, Christian Nitschkowski <christian.nitschkowski@kdemail.net> 2007, Christian Nitschkowski <christian.nitschkowski@kdemail.net> 1997-2002, Cristian Tibirna <tibirna@kde.org> 2020, Cyril Rossi <cyril.rossi@enioka.com> 2011-2012, Cédric Bellegarde <gnumdk@gmail.com> 2008, Cédric Borgese <cedric.borgese@gmail.com> 2018-2021, David Edmundson <davidedmundson@kde.org> 2017, David Edmundson <kde@davidedmundson.co.uk> 2020, David Redondo <kde@david-redondo.de> 2017, Demitrius Belai <demitriusbelai@gmail.com> 2018, Eike Hein <hein@kde.org> 2012, Filip Wieladek <wattos@gmail.com> 2009-2018, Fredrik Höglund <fredrik@kde.org> 2020, Henri Chain <henri.chain@enioka.com> 2014, Hugo Pereira Da Costa <hugo.pereira@free.fr> 2020, Ismael Asensio <isma.af@gmail.com> 2010, Jorge Mata <matamax123@gmail.com> 2017, Kai Uwe Broulik <kde@privat.broulik.de> 2003, Karol Szwed <kszwed@kde.org> 2008, Kristian Høgsberg 2018, Laurent Montel <montel@kde.org> 2011, Lionel Chauvin <megabigbug@yahoo.fr> 2001-2008, Lubos Lunak <l.lunak@kde.org> 2008, Lubos Lunak <l.lunak@suse.cz> 2008-2009, Lucas Murray <lmurray@undefinedfire.com> 2014-2020, Marco Martin <mart@kde.org> 2009, Marco Martin <notmart@gmail.com> 2015-2019, Martin Flöser <mgraesslin@kde.org> 2008-2011, Martin Gräßlin <kde@martin-graesslin.com> 2012, Martin Gräßlin <m.graesslin@kde.org> 2008-2017, Martin Gräßlin <mgraesslin@kde.org> 1998-2000, Matthias Ettrich <ettrich@kde.org> 2009, Michael Zanetti <michael_zanetti@gmx.net> 2015, Mika Allan Rauhala <mika.allan.rauhala@gmail.com> 2011, NVIDIA Corporation 2019, NVIDIA Inc. 2010, Nokia Corporation and/or its subsidiary(-ies) 1997, Patrick Dowler <dowler@morgul.fsh.uvic.ca> 2007, Philip Falkner <philip.falkner@gmail.com> 2011-2012, Philipp Knechtges <philipp-dev@knechtges.com> 2001, Rik Hemsley (rikkus) <rik@kde.org> 2006-2007, Rivo Laks <rivolaks@hot.ee> 2010, Rohan Prabhu <rohan@rohanprabhu.com> 2016-2020, Roman Gilg <subdiff@gmail.com> 2003-2005, Sandro Giessl <sandro@giessl.com> 2010, Sebastian Sauer <sebsauer@kdab.com> 2011, Tamas Krutki <ktamasw@gmail.com> 2014-2015, Thomas Lübking <thomas.luebking@gmail.com> 2010-2011, Thomas Lübking <thomas.luebking@web.de> 2001, Waldo Bastian <bastian@kde.org> 2020, Xaver Hugl <xaver.hugl@gmail.com> 2002-2018, Free Software Foundation 1997-2018, Free Software Foundation, Inc 2000, Jesús Bravo Álvarez 2008-2009, K Desktop Environment 1998, KDE Russian translation Team 2000-2001, KDE Team 2000-2002, KDE e.v. 2007, KDE i18n Project for Vietnamese 1999-2002, Meni Livne <livne@kde.org> 2009-2012, Rosetta Contributors and Canonical Ltd 2002-2021, This_file_is_part_of_KDE 2001, translate.org.za
qt6-webengine-6.2.2+dfsg/src/3rdparty/chromium/third_party/tflite/src/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/compiler/xla/service/hlo_dataflow_analysis.h" #include "tensorflow/compiler/xla/literal.h" #include "tensorflow/compiler/xla/service/flatten_call_graph.h" #include "tensorflow/compiler/xla/service/hlo_computation.h" #include "tensorflow/compiler/xla/service/hlo_creation_utils.h" #include "tensorflow/compiler/xla/service/hlo_graph_dumper.h" #include "tensorflow/compiler/xla/service/hlo_matchers.h" #include "tensorflow/compiler/xla/service/hlo_opcode.h" #include "tensorflow/compiler/xla/service/hlo_ordering.h" #include "tensorflow/compiler/xla/service/instruction_fusion.h" #include "tensorflow/compiler/xla/shape_util.h" #include "tensorflow/compiler/xla/status_macros.h" #include "tensorflow/compiler/xla/test.h" #include "tensorflow/compiler/xla/test_helpers.h" #include "tensorflow/compiler/xla/tests/hlo_test_base.h" #include "tensorflow/compiler/xla/xla_data.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" namespace xla { namespace { using ::testing::ElementsAre; using ::testing::UnorderedElementsAre; // Test is parameterized on a bool which is whether the dataflow analysis is // performed with SSA form. class HloDataflowAnalysisTest : public HloTestBase, public ::testing::WithParamInterface<bool> { protected: HloDataflowAnalysisTest() : module_(CreateNewVerifiedModule()) {} // Run dataflow analysis on the member module. For convenience returns a // reference to the generated analysis stored in analysis_. const HloDataflowAnalysis& RunAnalysis(bool ssa_form, bool bitcast_defines_value = false) { FlattenCallGraph flatten; EXPECT_TRUE(flatten.Run(module_.get()).ok()); analysis_ = HloDataflowAnalysis::Run(*module_, ssa_form, bitcast_defines_value) .ConsumeValueOrDie(); return *analysis_; } // Return a vector of the HloValues at the given program position. std::vector<HloValue> HloValuesAt(const HloInstruction* instruction, const ShapeIndex& index = {}) { CHECK(analysis_ != nullptr); std::vector<HloValue> values; for (const HloValue* value : analysis_->GetValueSet(instruction, index).values()) { values.push_back(*value); } return values; } // Returns true if the top-level values for instructions 'a' and 'b' may // interfere. Precondition: 'a' and 'b' define array-shaped values. bool InstructionsMayInterfere(const HloOrdering& ordering, const HloInstruction* a, const HloInstruction* b) { EXPECT_FALSE(a->shape().IsTuple()); EXPECT_FALSE(b->shape().IsTuple()); return ordering.MayInterfere(analysis_->GetValueDefinedAt(a), analysis_->GetValueDefinedAt(b), *analysis_); } std::unique_ptr<HloComputation> CreateR0F32UnaryOpComputation( HloOpcode opcode) { HloComputation::Builder builder(TestName() + "." + HloOpcodeString(opcode)); HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); builder.AddInstruction( HloInstruction::CreateUnary(scalar_shape_, opcode, param0)); return builder.Build(); } std::unique_ptr<HloModule> module_; std::unique_ptr<HloDataflowAnalysis> analysis_; const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {}); const Shape vector_shape_ = ShapeUtil::MakeShape(F32, {42}); const Shape tuple_shape_ = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})}); }; TEST_P(HloDataflowAnalysisTest, BinaryOperation) { // Test the dataflow for a simple binary operation (Add). auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto add = builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, constant1, constant2)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); // Each instruction should define a single value. EXPECT_EQ(analysis.values().size(), 3); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2)); EXPECT_TRUE(analysis.ValueIsDefinedAt(add)); // Verify the positions of the values. These positions are all trivial because // there are no instructions which forward values. EXPECT_THAT(analysis.GetValueDefinedAt(constant1).positions(), UnorderedElementsAre(HloPosition{constant1, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).positions(), UnorderedElementsAre(HloPosition{constant2, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(add).positions(), UnorderedElementsAre(HloPosition{add, {}})); // Verify the uses of the values. EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), UnorderedElementsAre(HloUse{add, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), UnorderedElementsAre(HloUse{add, 1, {}})); EXPECT_TRUE(analysis.GetValueDefinedAt(add).uses().empty()); // Verify liveout values from the module. EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, TupleAndGtes) { // Verify the dataflow through a Tuple and GetTupleElement instructions. auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({param0, param1})); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(scalar_shape_, HloOpcode::kAdd, gte0, gte1)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); // The two params, tuple, and add should each define one value. EXPECT_EQ(analysis.values().size(), 4); EXPECT_TRUE(analysis.ValueIsDefinedAt(param0)); EXPECT_TRUE(analysis.ValueIsDefinedAt(param1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple, /*index=*/{})); EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, /*index=*/{1})); EXPECT_FALSE(analysis.ValueIsDefinedAt(gte0)); EXPECT_FALSE(analysis.ValueIsDefinedAt(gte1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(add)); // Verify the positions of the values. EXPECT_THAT( analysis.GetValueDefinedAt(param0).positions(), UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}}, HloPosition{gte0, {}})); EXPECT_THAT( analysis.GetValueDefinedAt(param1).positions(), UnorderedElementsAre(HloPosition{param1, {}}, HloPosition{tuple, {1}}, HloPosition{gte1, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(tuple).positions(), UnorderedElementsAre(HloPosition{tuple, {}})); // Verify uses. Of interest is that a GetTupleElement instruction is only a // use of the top-level value in the tuple operand. EXPECT_THAT(analysis.GetValueDefinedAt(param0).uses(), UnorderedElementsAre(HloUse{add, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(param1).uses(), UnorderedElementsAre(HloUse{add, 1, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(tuple, /*index=*/{}).uses(), UnorderedElementsAre(HloUse{gte0, 0, {}}, HloUse{gte1, 0, {}})); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, NestedTuple) { // Verify the dataflow through a nested tuple. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto nested_tuple = builder.AddInstruction( HloInstruction::CreateTuple({tuple, tuple, constant1})); auto gte_tuple = builder.AddInstruction( HloInstruction::CreateGetTupleElement(tuple->shape(), nested_tuple, 1)); auto gte_out = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, gte_tuple, 0)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_EQ(analysis.values().size(), 4); // Verify positions and uses. EXPECT_THAT( analysis.GetValueDefinedAt(constant1).positions(), UnorderedElementsAre( HloPosition{constant1, {}}, HloPosition{tuple, {0}}, HloPosition{nested_tuple, {0, 0}}, HloPosition{nested_tuple, {1, 0}}, HloPosition{nested_tuple, {2}}, HloPosition{gte_tuple, {0}}, HloPosition{gte_out, {}})); // Constant values should have only a single use, which is the root of the // computation. EXPECT_THAT(analysis.GetValueDefinedAt(constant1, /*index=*/{}).uses(), UnorderedElementsAre(HloUse{gte_out, 0, {0}})); EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).uses().empty()); // The top-level tuple values are used in GTE instructions. EXPECT_THAT(analysis.GetValueDefinedAt(tuple, /*index=*/{}).uses(), UnorderedElementsAre(HloUse{gte_out, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(nested_tuple, /*index=*/{}).uses(), UnorderedElementsAre(HloUse{gte_tuple, 0, {}})); EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); EXPECT_FALSE( analysis.GetValueDefinedAt(tuple, /*index=*/{}).live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(nested_tuple, /*index=*/{}) .live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, SingleCall) { // Test a single call of a subcomputation. The subcomputation adds its two // array-shaped parameters. auto subbuilder = HloComputation::Builder("Subcomputation"); auto subparam0 = subbuilder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto subparam1 = subbuilder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, subparam0, subparam1)); HloComputation* called_computation = module_->AddEmbeddedComputation(subbuilder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto call = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_EQ(analysis.values().size(), 3); // The parameters of the subcomputation and the call instruction itself should // not define values. Their values flow from elsewhere. EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2)); EXPECT_FALSE(analysis.ValueIsDefinedAt(subparam0)); EXPECT_FALSE(analysis.ValueIsDefinedAt(subparam1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(add)); EXPECT_FALSE(analysis.ValueIsDefinedAt(call)); EXPECT_EQ(analysis.GetUniqueValueAt(subparam0), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(subparam1), analysis.GetValueDefinedAt(constant2)); EXPECT_EQ(analysis.GetUniqueValueAt(call), analysis.GetValueDefinedAt(add)); EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{add, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{add, 1, {}})); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, NestedCalls) { // Test a module with nested computations. HLO is: // // F32[] inner_computation(F32[] %param0, F32[] %param1): // %add = Add(%param0, %param1) // // F32[] outer_computation((F32[] %param0, F32[] %param1): // ;; Note that parameters are interchanged in the call. // %nested_call = Call(inner_computation, {%param1, %param0}) // // F32[] entry: // %constant1 = Constant(1.0) // %constant2 = Constant(2.0) // %call = Call(outer_computation, {%constant1, %constant2}) // auto inner_builder = HloComputation::Builder("InnerComputation"); auto inner_param0 = inner_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto inner_param1 = inner_builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, inner_param0, inner_param1)); HloComputation* inner_computation = module_->AddEmbeddedComputation(inner_builder.Build()); auto outer_builder = HloComputation::Builder("OuterComputation"); auto outer_param0 = outer_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto outer_param1 = outer_builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); // Swizzle parameters. auto nested_call = outer_builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {outer_param1, outer_param0}, inner_computation)); HloComputation* outer_computation = module_->AddEmbeddedComputation(outer_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto call = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, outer_computation)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); // Only three values should be defined. Most instructions just pass through // their operand values. EXPECT_EQ(analysis.values().size(), 3); // Verify that the uses of the constants are properly swizzled by parameter // permutation in nested_call. EXPECT_THAT( analysis.GetValueDefinedAt(constant1).uses(), UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{nested_call, 1, {}}, HloUse{add, 1, {}})); EXPECT_THAT( analysis.GetValueDefinedAt(constant2).uses(), UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{nested_call, 0, {}}, HloUse{add, 0, {}})); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, SingleWhile) { // Test a simple single while instruction. The while body includes a // pass-through value. HLO: // // body((F32[], F32[]) %tuple_param): // %add = Add(%tuple_param{0}, %tuple_param{1}) // return Tuple(%tuple_param{0}, %add) // // condition((F32[], F32[]) %tuple_param): // return Constant(false) // // entry: // %constant1 = Constant(1.0) // %constant2 = Constant(2.0) // %tuple = Tuple(%constant1, %constant2) // return While(%tuple, body, condition) // const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); // Element 0 passes transparently through the body. auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1)); auto body_root = body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_0, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); // Condition computation trivially returns a constant "false". auto cond_builder = HloComputation::Builder("condition"); auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto cond_constant = cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_FALSE(analysis.GetValueDefinedAt(cond_constant).live_out_of_module()); if (ssa_form) { // Element 0 of the tuple passed through the body so no phi value is // defined. EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, /*index=*/{0})); // Element 1 of the tuple should be a phi value. EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{1}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(body_param, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(body_param, /*index=*/{1}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(cond_param, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(cond_param, /*index=*/{1}).is_phi()); EXPECT_THAT( analysis.GetValueDefinedAt(constant1).uses(), UnorderedElementsAre(HloUse{add, 0, {}}, HloUse{body_root, 0, {}}, HloUse{xla_while, 0, {0}})); // Constant1 passes through the body and out of the module. EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{1}) .live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(add).live_out_of_module()); } else { // While instruction and subcomputation parameters should not define values // in non-ssa form. EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{1})); EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, /*index=*/{1})); EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); } } TEST_P(HloDataflowAnalysisTest, SequentialWhiles) { // Test sequential while instructions. The while body includes a // pass-through value. HLO: // // body((F32[], F32[]) %tuple_param): // %add = Add(%tuple_param{0}, %tuple_param{1}) // return Tuple(%tuple_param{0}, %add) // // condition((F32[], F32[]) %tuple_param): // return Constant(false) // // entry: // %constant1 = Constant(1.0) // %constant2 = Constant(2.0) // %tuple = Tuple(%constant1, %constant2) // %while0 = While(%tuple, body, condition) // %while1 = While(%while0, body, condition) // return While(%while1, body, condition) // const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); // Element 0 passes transparently through the body. auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1)); body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_0, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while0 = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, tuple)); auto xla_while1 = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while0)); auto xla_while2 = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while1)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); // Element 0 is passed through all the while instructions and out of the // module.. EXPECT_EQ(analysis.GetUniqueValueAt(xla_while0, /*index=*/{0}), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(xla_while1, /*index=*/{0}), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(xla_while2, /*index=*/{0}), analysis.GetValueDefinedAt(constant1)); EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, MultiLevelNestedWhile) { // Test nested while instructions. The level0 body (most inner while) and // level1 body pass through the parameter, while level2 (most outer while) // modifies it. // // level0_body((F32[]) %tuple_param): // return Tuple(%tuple_param{0}) // // level1_body((F32[]) %tuple_param): // return While(%tuple_param{0}), body=level0 // // level2_body((F32[]) %tuple_param): // while = While(%tuple_param{0}), body=level1 //. return negate(%while{0}) // // entry: // %constant = Constant(1.0) // %tuple = Tuple(%constant) // return While(%tuple), body=level2 // const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_}); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); // level 0 passes transparently through the body. auto level0_builder = HloComputation::Builder("level0_body"); auto level0_param = level0_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto level0_element_0 = level0_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, level0_param, 0)); auto level0_root = level0_builder.AddInstruction( HloInstruction::CreateTuple({level0_element_0})); HloComputation* level0_body = module_->AddEmbeddedComputation(level0_builder.Build()); // Element 1 passes transparently through the body. auto level1_builder = HloComputation::Builder("level1_body"); auto level1_param = level1_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto level1_root = level1_builder.AddInstruction(HloInstruction::CreateWhile( tuple_shape, condition, level0_body, level1_param)); HloComputation* level1_body = module_->AddEmbeddedComputation(level1_builder.Build()); // Element 1 passes transparently through the body. auto level2_builder = HloComputation::Builder("level2_body"); auto level2_param = level2_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto level2_while = level2_builder.AddInstruction(HloInstruction::CreateWhile( tuple_shape, condition, level1_body, level2_param)); auto level2_element_0 = level2_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, level2_while, 0)); auto negate = level2_builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kNegate, level2_element_0)); level2_builder.AddInstruction(HloInstruction::CreateTuple({negate})); HloComputation* level2_body = module_->AddEmbeddedComputation(level2_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({constant1})); builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, level2_body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); if (!ssa_form) { return; } const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); // Phi node on inner parameters and roots should have been eliminated. EXPECT_FALSE(analysis.ValueIsDefinedAt(level1_param, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(level0_param, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(level1_root, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(level0_root, /*index=*/{0})); EXPECT_TRUE(analysis.ValueIsDefinedAt(level2_param, /*index=*/{0})); EXPECT_EQ(HloValuesAt(level1_param, /*index=*/{0}), HloValuesAt(level2_param, /*index=*/{0})); EXPECT_EQ(HloValuesAt(level0_param, /*index=*/{0}), HloValuesAt(level2_param, /*index=*/{0})); EXPECT_EQ(HloValuesAt(level1_root, /*index=*/{0}), HloValuesAt(level2_param, /*index=*/{0})); EXPECT_EQ(HloValuesAt(level0_root, /*index=*/{0}), HloValuesAt(level2_param, /*index=*/{0})); } TEST_P(HloDataflowAnalysisTest, NestedWhiles) { // Test nested while instructions. The inner body passes through element 0 of // its parameter, and the outer body passes through element 1. HLO: // // inner_body((F32[], F32[]) %tuple_param): // %add = Add(%tuple_param{0}, %tuple_param{1}) // return Tuple(%tuple_param{0}, %add) // // outer_body((F32[], F32[]) %tuple_param): // %negate = Negate(%tuple_param{0}) // %tuple = Tuple(%negate, %tuple_param{1}) // return While(%tuple, inner_body, condition) // // entry: // %constant1 = Constant(1.0) // %constant2 = Constant(2.0) // %tuple = Tuple(%constant1, %constant2) // return While(%tuple, outer_body, condition) // const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); // Element 0 passes transparently through the body. auto inner_builder = HloComputation::Builder("inner_body"); auto inner_param = inner_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto inner_element_0 = inner_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 0)); auto inner_element_1 = inner_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 1)); auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, inner_element_0, inner_element_1)); inner_builder.AddInstruction( HloInstruction::CreateTuple({inner_element_0, add})); HloComputation* inner_body = module_->AddEmbeddedComputation(inner_builder.Build()); // Element 1 passes transparently through the body. auto outer_builder = HloComputation::Builder("outer_body"); auto outer_param = outer_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto outer_element_0 = outer_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 0)); auto negate = outer_builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kNegate, outer_element_0)); auto outer_element_1 = outer_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 1)); auto outer_tuple = outer_builder.AddInstruction( HloInstruction::CreateTuple({negate, outer_element_1})); auto nested_while = outer_builder.AddInstruction(HloInstruction::CreateWhile( tuple_shape, condition, inner_body, outer_tuple)); HloComputation* outer_body = module_->AddEmbeddedComputation(outer_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto entry_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, outer_body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_THAT(HloValuesAt(inner_param, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(negate))); if (ssa_form) { EXPECT_TRUE(analysis.ValueIsDefinedAt(inner_param, /*index=*/{1})); EXPECT_TRUE( analysis.GetValueDefinedAt(inner_param, /*index=*/{1}).is_phi()); // Element 0 of the nested while is %negate. EXPECT_FALSE(analysis.ValueIsDefinedAt(nested_while, /*index=*/{0})); EXPECT_THAT(HloValuesAt(inner_param, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(negate))); // Element 1 is a phi value (join of %add and %constant2). EXPECT_TRUE(analysis.ValueIsDefinedAt(nested_while, /*index=*/{1})); EXPECT_TRUE( analysis.GetValueDefinedAt(nested_while, /*index=*/{1}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(entry_while, /*index=*/{0})); EXPECT_TRUE( analysis.GetValueDefinedAt(entry_while, /*index=*/{0}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(entry_while, /*index=*/{1})); EXPECT_TRUE( analysis.GetValueDefinedAt(entry_while, /*index=*/{1}).is_phi()); } else { EXPECT_THAT(HloValuesAt(inner_param, /*index=*/{1}), UnorderedElementsAre(analysis.GetValueDefinedAt(add), analysis.GetValueDefinedAt(constant2))); EXPECT_THAT(HloValuesAt(nested_while, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(negate))); EXPECT_THAT(HloValuesAt(nested_while, /*index=*/{1}), UnorderedElementsAre(analysis.GetValueDefinedAt(add), analysis.GetValueDefinedAt(constant2))); EXPECT_THAT(HloValuesAt(entry_while, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(negate), analysis.GetValueDefinedAt(constant1))); EXPECT_THAT(HloValuesAt(entry_while, /*index=*/{1}), UnorderedElementsAre(analysis.GetValueDefinedAt(add), analysis.GetValueDefinedAt(constant2))); } } TEST_P(HloDataflowAnalysisTest, SwizzlingWhileSharedInput) { // Test a while instruction with a body which permutes it's tuple parameter // elements. HLO: // // body((F32[], F32[]) %tuple_param): // return Tuple(%tuple_param{1}, %tuple_param{0}) // // condition((F32[], F32[]) %tuple_param): // return Constant(false) // // entry: // %constant1 = Constant(1.0) // %tuple = Tuple(%constant1, %constant1) // return While(%tuple, body, condition) // const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_1, body_element_0})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant1})); builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, /*index=*/{0})); } TEST_P(HloDataflowAnalysisTest, SwizzlingWhile) { // Test a while instruction with a body which permutes it's tuple parameter // elements. HLO: // // body((F32[], F32[]) %tuple_param): // return Tuple(%tuple_param{1}, %tuple_param{0}) // // condition((F32[], F32[]) %tuple_param): // return Constant(false) // // entry: // %constant1 = Constant(1.0) // %constant2 = Constant(2.0) // %tuple = Tuple(%constant1, %constant2) // return While(%tuple, body, condition) // const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_1, body_element_0})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); if (ssa_form) { // Element 0 and 1 in the while should both be phi values. EXPECT_TRUE(analysis.ValueIsDefinedAt(body_param, /*index=*/{0})); EXPECT_TRUE(analysis.GetValueDefinedAt(body_param, /*index=*/{0}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(body_param, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(body_param, /*index=*/{1}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{0})); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{0}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{1}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(cond_param, /*index=*/{0})); EXPECT_TRUE(analysis.GetValueDefinedAt(cond_param, /*index=*/{0}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(cond_param, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(cond_param, /*index=*/{1}).is_phi()); EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{}) .live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{0}) .live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{1}) .live_out_of_module()); } else { // Elements 0 and 1 have both constants as reaching definitions. EXPECT_THAT(HloValuesAt(xla_while, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant2))); EXPECT_THAT(HloValuesAt(xla_while, /*index=*/{1}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant2))); EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); } } TEST_P(HloDataflowAnalysisTest, ArraySelect) { // Test a kSelect of an array value. auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto select = builder.AddInstruction(HloInstruction::CreateTernary( scalar_shape_, HloOpcode::kSelect, pred, constant1, constant2)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_TRUE(analysis.ValueIsDefinedAt(select)); EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(select).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, TupleSelect) { // Test a kTupleSelect. Non-top-level element flow through the instruction. auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); auto constant4 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0))); auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({constant1})); auto tuple2 = builder.AddInstruction(HloInstruction::CreateTuple({constant2})); auto tuple3 = builder.AddInstruction(HloInstruction::CreateTuple({constant3})); auto tuple4 = builder.AddInstruction(HloInstruction::CreateTuple({constant4})); const Shape tuple_shape = tuple1->shape(); auto select11 = builder.AddInstruction(HloInstruction::CreateTernary( tuple_shape, HloOpcode::kTupleSelect, pred, tuple1, tuple1)); auto select12 = builder.AddInstruction(HloInstruction::CreateTernary( tuple_shape, HloOpcode::kTupleSelect, pred, tuple1, tuple2)); auto select34 = builder.AddInstruction(HloInstruction::CreateTernary( tuple_shape, HloOpcode::kTupleSelect, pred, tuple3, tuple4)); auto select1234 = builder.AddInstruction(HloInstruction::CreateTernary( tuple_shape, HloOpcode::kTupleSelect, pred, select12, select34)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); // Top-level value is always defined by a kTupleSelect. EXPECT_TRUE(analysis.ValueIsDefinedAt(select11)); EXPECT_TRUE(analysis.ValueIsDefinedAt(select12)); EXPECT_TRUE(analysis.ValueIsDefinedAt(select34)); EXPECT_TRUE(analysis.ValueIsDefinedAt(select1234)); EXPECT_FALSE(analysis.ValueIsDefinedAt(select11, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(select12, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(select34, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(select1234, /*index=*/{0})); EXPECT_THAT(HloValuesAt(select11, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1))); EXPECT_THAT(HloValuesAt(select12, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant2))); EXPECT_THAT(HloValuesAt(select34, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant3), analysis.GetValueDefinedAt(constant4))); EXPECT_THAT(HloValuesAt(select1234, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant2), analysis.GetValueDefinedAt(constant3), analysis.GetValueDefinedAt(constant4))); EXPECT_THAT( analysis.GetValueDefinedAt(tuple1, /*index=*/{}).uses(), UnorderedElementsAre(HloUse{select11, 1, {}}, HloUse{select11, 2, {}}, HloUse{select12, 1, {}})); // The two constant values just pass through the Selects and are not // used except at the root. They are live out however. EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), UnorderedElementsAre(HloUse{select1234, 1, {0}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), UnorderedElementsAre(HloUse{select1234, 1, {0}})); EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, NestedTupleSelect) { // Test kTupleSelect of a nested tuple. auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); auto constant4 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0))); auto constant5 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0))); auto inner_tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant2, constant3})); auto tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant1, inner_tuple1})); auto inner_tuple2 = builder.AddInstruction( HloInstruction::CreateTuple({constant5, constant3})); auto tuple2 = builder.AddInstruction( HloInstruction::CreateTuple({constant4, inner_tuple2})); auto select = builder.AddInstruction(HloInstruction::CreateTernary( tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_TRUE(analysis.ValueIsDefinedAt(select)); EXPECT_THAT(HloValuesAt(select, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant4))); EXPECT_THAT(HloValuesAt(select, /*index=*/{1}), UnorderedElementsAre(analysis.GetValueDefinedAt(inner_tuple1), analysis.GetValueDefinedAt(inner_tuple2))); EXPECT_THAT(HloValuesAt(select, /*index=*/{1, 0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant2), analysis.GetValueDefinedAt(constant5))); EXPECT_THAT(HloValuesAt(select, /*index=*/{1, 1}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant3))); } TEST_P(HloDataflowAnalysisTest, TupleSelectToWhile) { // Test a tuple-shaped kTupleSelect feeding a kWhile instruction. HLO: // // body((F32[], F32[]) %tuple_param): // %add = Add(%tuple_param{0}, %tuple_param{1}) // return Tuple(%tuple_param{0}, %add) // // condition((F32[], F32[]) %tuple_param): // return Constant(false) // // entry: // %constant1 = Constant(1.0) // %constant2 = Constant(2.0) // %constant3 = Constant(3.0) // %tuple1 = Tuple(%constant1) // %tuple2 = Tuple(%constant2) // %select = Select(%tuple1, %tuple2) // %gte = GetTupleElement(%select, 0) // %tuple = Tuple(%gte, %constant3) // return While(%tuple, body, condition) // auto builder = HloComputation::Builder(TestName()); const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); // Element 0 passes transparently through the body. auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1)); body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_0, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto pred = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({constant1})); auto tuple2 = builder.AddInstruction(HloInstruction::CreateTuple({constant2})); auto select = builder.AddInstruction(HloInstruction::CreateTernary( tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2)); auto gte = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, select, 0)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({gte, constant3})); auto xla_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple->shape(), condition, body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); if (ssa_form) { EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{0})); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{0}).is_phi()); EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, /*index=*/{1})); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{1}).is_phi()); EXPECT_FALSE(analysis.ValueIsDefinedAt(select, /*index=*/{0})); EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); EXPECT_FALSE(analysis.GetValueDefinedAt(constant3).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{1}) .live_out_of_module()); } else { EXPECT_THAT(HloValuesAt(gte), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant2))); EXPECT_THAT(HloValuesAt(xla_while, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant2))); EXPECT_THAT(HloValuesAt(xla_while, /*index=*/{1}), UnorderedElementsAre(analysis.GetValueDefinedAt(add), analysis.GetValueDefinedAt(constant3))); EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(constant3).live_out_of_module()); } } TEST_P(HloDataflowAnalysisTest, BitcastDefinesValue) { // Test the bitcast_defines_value flag to the dataflow analysis. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto bitcast = builder.AddInstruction( HloInstruction::CreateBitcast(scalar_shape_, constant)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); { const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form, /*bitcast_defines_value=*/true); EXPECT_EQ(analysis.values().size(), 2); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant)); EXPECT_TRUE(analysis.ValueIsDefinedAt(bitcast)); EXPECT_FALSE(analysis.GetValueDefinedAt(constant).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(bitcast).live_out_of_module()); } { const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form, /*bitcast_defines_value=*/false); EXPECT_EQ(analysis.values().size(), 1); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant)); EXPECT_FALSE(analysis.ValueIsDefinedAt(bitcast)); EXPECT_TRUE(analysis.GetValueDefinedAt(constant).live_out_of_module()); } } TEST_P(HloDataflowAnalysisTest, TupleCopy) { // Test that a tuple-shaped copy only copies (defines) the top-level value. auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({param0, param1})); auto copy = builder.AddInstruction( HloInstruction::CreateUnary(tuple->shape(), HloOpcode::kCopy, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_EQ(analysis.values().size(), 4); EXPECT_TRUE(analysis.ValueIsDefinedAt(param0)); EXPECT_TRUE(analysis.ValueIsDefinedAt(param1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple, /*index=*/{})); EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, /*index=*/{1})); EXPECT_TRUE(analysis.ValueIsDefinedAt(copy, /*index=*/{})); EXPECT_FALSE(analysis.ValueIsDefinedAt(copy, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(copy, /*index=*/{1})); EXPECT_THAT(HloValuesAt(copy, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(param0))); EXPECT_THAT(HloValuesAt(copy, /*index=*/{1}), UnorderedElementsAre(analysis.GetValueDefinedAt(param1))); EXPECT_TRUE( analysis.GetValueDefinedAt(copy, /*index=*/{}).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, CopyStartAndCopyDone) { // Test that a CopyDone forwards its operand tuple element at {0} to the // output. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto copy_start = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeTupleShape({constant->shape(), constant->shape(), ShapeUtil::MakeShape(U32, {})}), HloOpcode::kCopyStart, constant)); auto copy_done = builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kCopyDone, copy_start)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_EQ(analysis.values().size(), 4); EXPECT_TRUE(analysis.ValueIsDefinedAt(copy_start, /*index=*/{})); EXPECT_TRUE(analysis.ValueIsDefinedAt(copy_start, /*index=*/{0})); EXPECT_FALSE(analysis.ValueIsDefinedAt(copy_start, /*index=*/{1})); EXPECT_TRUE(analysis.ValueIsDefinedAt(copy_start, /*index=*/{2})); EXPECT_FALSE(analysis.ValueIsDefinedAt(copy_done, /*index=*/{})); EXPECT_THAT( HloValuesAt(copy_done, /*index=*/{}), UnorderedElementsAre(analysis.GetValueDefinedAt(copy_start, {0}))); EXPECT_TRUE(analysis.GetValueDefinedAt(copy_start, /*index=*/{0}) .live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, SendAndSendDone) { // Test that a Send forwards its operand to the output tuple at {0}. auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto token = builder.AddInstruction(HloInstruction::CreateToken()); auto send = builder.AddInstruction( HloInstruction::CreateSend(param, token, /*channel_id=*/0)); auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_EQ(analysis.values().size(), 6); EXPECT_TRUE(analysis.ValueIsDefinedAt(param)); EXPECT_TRUE(analysis.ValueIsDefinedAt(send, /*index=*/{})); EXPECT_FALSE(analysis.ValueIsDefinedAt(send, /*index=*/{0})); EXPECT_TRUE(analysis.ValueIsDefinedAt(send, /*index=*/{1})); EXPECT_TRUE(analysis.ValueIsDefinedAt(send, /*index=*/{2})); EXPECT_TRUE(analysis.ValueIsDefinedAt(send_done)); EXPECT_THAT(HloValuesAt(send, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(param))); } TEST_P(HloDataflowAnalysisTest, SetDimensionSizeForwardsValue) { auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "param")); auto size = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(3))); auto sds = builder.AddInstruction( HloInstruction::CreateSetDimensionSize(vector_shape_, param, size, 0)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); { const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_EQ(analysis.values().size(), 2); EXPECT_TRUE(analysis.ValueIsDefinedAt(param)); EXPECT_FALSE(analysis.ValueIsDefinedAt(sds)); EXPECT_TRUE(analysis.GetValueDefinedAt(param).live_out_of_module()); } } TEST_P(HloDataflowAnalysisTest, RecvAndRecvDone) { // Test that a RecvDone forwards its operand tuple element at {0} to element // {0} of the output. auto builder = HloComputation::Builder(TestName()); auto token = builder.AddInstruction(HloInstruction::CreateToken()); auto recv = builder.AddInstruction( HloInstruction::CreateRecv(scalar_shape_, token, /*channel_id=*/0)); auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); EXPECT_EQ(analysis.values().size(), 7); EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{})); EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{0})); EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{1})); EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{2})); EXPECT_TRUE(analysis.ValueIsDefinedAt(recv_done, /*index=*/{})); EXPECT_FALSE(analysis.ValueIsDefinedAt(recv_done, /*index=*/{0})); EXPECT_TRUE(analysis.ValueIsDefinedAt(recv_done, /*index=*/{1})); EXPECT_THAT(HloValuesAt(recv_done, /*index=*/{0}), UnorderedElementsAre(analysis.GetValueDefinedAt(recv, {0}))); EXPECT_TRUE( analysis.GetValueDefinedAt(recv, /*index=*/{0}).live_out_of_module()); } TEST_P(HloDataflowAnalysisTest, ElementwiseChainInterference) { // A simple chain of elementwise operations. No values should interfere. // // param --> negate -> exp -> log // auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "param")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, negate)); auto log = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kLog, exp)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); RunAnalysis(GetParam()); DependencyHloOrdering ordering(module_.get()); // No values should interfere. EXPECT_FALSE(InstructionsMayInterfere(ordering, param, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, log)); EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, log)); EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, log)); EXPECT_FALSE(InstructionsMayInterfere(ordering, log, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, log, exp)); // Values should interfere with itself. EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, exp)); } TEST_P(HloDataflowAnalysisTest, MultipleEntryParameters_Sequential) { // Two entry params, which interfere with each other. // // param0 --> negate ---------------\ // param1 --> exp --> add auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, vector_shape_, "param1")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param0)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param1)); auto add = builder.AddInstruction(HloInstruction::CreateBinary( vector_shape_, HloOpcode::kAdd, negate, exp)); auto entry = module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); RunAnalysis(GetParam()); HloSchedule schedule(module_.get()); schedule.set_sequence(entry, {param0, negate, param1, exp, add}); TF_ASSERT_OK(schedule.Verify()); SequentialHloOrdering ordering(schedule); // Entry parameters interfere as if they are defined simultaneously at // the very beginning. EXPECT_TRUE(InstructionsMayInterfere(ordering, param0, param1)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param0, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param0, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param0, add)); EXPECT_TRUE(InstructionsMayInterfere(ordering, param1, param0)); EXPECT_TRUE(InstructionsMayInterfere(ordering, param1, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param1, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param1, add)); // Negate and exp still interfere. EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, exp)); EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, negate)); // But {negate, add} and {exp, add} don't interfere. EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, add, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, add, exp)); } TEST_P(HloDataflowAnalysisTest, WhileParameters_Sequential) { // Similar to MultipleEntryParameters_Sequential, but the parameter is of // while body computation. Body computation in the sequential order: // // %constant = Constant(...) // %exp = Exp(%constant) // %param = Param(0) // %add = Add(%param, %exp) ;; Root of body // %dead_constant = Constant(...) // %dead_negate = Negate(%dead_constant) // // %constant and its only use %exp are ordered before 'param'. However, the // %constant and %param values still interfere because the parameter is // considered live into the while body. // // Similarly, %dead_constant and %dead_negate are ordered after the root of // the body computation %add. However, %add is liveout of the computation so // %dead_constant and %add interfere. auto body_builder = HloComputation::Builder(TestName()); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "body_param")); auto constant = body_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto exp = body_builder.AddInstruction( HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kExp, constant)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, exp, body_param)); auto dead_constant = body_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto dead_negate = body_builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kNegate, dead_constant)); HloComputation* body = module_->AddEmbeddedComputation( body_builder.Build(/*root_instruction=*/add)); auto cond_builder = HloComputation::Builder("condition"); auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "cond_param")); auto cond_constant = cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param")); auto xla_while = builder.AddInstruction( HloInstruction::CreateWhile(scalar_shape_, condition, body, param)); auto entry = module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); bool ssa_form = GetParam(); RunAnalysis(ssa_form); HloSchedule schedule(module_.get()); schedule.set_sequence(entry, {param, xla_while}); schedule.set_sequence(condition, {cond_param, cond_constant}); // Construct the order such that 'constant' and its use 'exp' are before // body_param. schedule.set_sequence( body, {constant, exp, body_param, add, dead_constant, dead_negate}); TF_ASSERT_OK(schedule.Verify()); SequentialHloOrdering ordering(schedule); // 'add' is live out of the body and will interfere with an later instructions // such as 'dead_constant' and 'dead_negate'. EXPECT_TRUE(InstructionsMayInterfere(ordering, add, dead_constant)); EXPECT_TRUE(InstructionsMayInterfere(ordering, add, dead_negate)); // The remaining checks test phi values defined by body and condition // parameters which only occur in the SSA form of the analysis. if (ssa_form) { // Though the ordering suggests 'constant' and 'param' should not interfere, // 'param' is live in and thus interferes with any earlier instruction of // the computation in the order (eg 'constant')' EXPECT_TRUE(InstructionsMayInterfere(ordering, body_param, constant)); EXPECT_TRUE(InstructionsMayInterfere(ordering, body_param, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, add)); // The following values end up in the same buffer: // (1) the init value: 'param' // (2) the body parameter: 'body_param' // (3) the condition parameter: 'cond_param' // (4) the root value of the while body: 'add' // (5) the while value: 'xla_while' // None should interfere. EXPECT_FALSE(InstructionsMayInterfere(ordering, param, body_param)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, cond_param)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, xla_while)); EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, cond_param)); EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, xla_while)); EXPECT_FALSE(InstructionsMayInterfere(ordering, cond_param, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, cond_param, xla_while)); EXPECT_FALSE(InstructionsMayInterfere(ordering, add, xla_while)); } } TEST_P(HloDataflowAnalysisTest, NonElementwiseOperand) { // A chain of operations with two elementwise and one non-elementwise. The // elementwise op should not interfere with its operand, while the // non-elementwise op should interfere. Entry params always interfere. // // param --> exp -> negate -> reverse // auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "param")); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param)); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, exp)); auto reverse = builder.AddInstruction( HloInstruction::CreateReverse(vector_shape_, negate, {0})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); RunAnalysis(GetParam()); DependencyHloOrdering ordering(module_.get()); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, reverse)); // Negate is elementwise, so doesn't interfere with its operand. // Reverse is non-elementwise, so does interfere with its operand. EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, negate)); EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, reverse)); } TEST_P(HloDataflowAnalysisTest, OverlappedValues) { // Verify simultaneously live values interfere (exp and negate). // // param --> negate -> add // \---> exp -----/ // auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "param")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param)); auto add = builder.AddInstruction(HloInstruction::CreateBinary( vector_shape_, HloOpcode::kAdd, negate, exp)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); RunAnalysis(GetParam()); DependencyHloOrdering ordering(module_.get()); EXPECT_TRUE(InstructionsMayInterfere(ordering, param, negate)); EXPECT_TRUE(InstructionsMayInterfere(ordering, param, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, add)); // Negate and exp interfere with each other, but not with add. EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, exp)); EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, add, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, add, exp)); } TEST_P(HloDataflowAnalysisTest, OverlappedValuesSequentialOrder) { // Identical to the test OverlappedValue but using a sequential ordering of // HLO instructions. // // param --> negate -> add // \---> exp -----/ // // Sequential order: // param, negate, exp, add // // Liveness is identical to the DependencyHloOrdering. auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "param")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param)); auto add = builder.AddInstruction(HloInstruction::CreateBinary( vector_shape_, HloOpcode::kAdd, negate, exp)); auto entry = module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); RunAnalysis(GetParam()); HloSchedule schedule(module_.get()); schedule.set_sequence(entry, {param, negate, exp, add}); TF_ASSERT_OK(schedule.Verify()); SequentialHloOrdering ordering(schedule); EXPECT_TRUE(InstructionsMayInterfere(ordering, param, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, exp)); EXPECT_FALSE(InstructionsMayInterfere(ordering, param, add)); // Negate and exp interfere with each other, but not with add. EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, exp)); EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, add, negate)); EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, add)); EXPECT_FALSE(InstructionsMayInterfere(ordering, add, exp)); } TEST_P(HloDataflowAnalysisTest, EmbeddedComputationInterference) { // Test MayInterfere() for embedded computation, specifically the interference // of values in different computations. // // embedded_computation: // %embedded_param = Param(0) // %embedded_log = Log(%embedded_param) // // entry computation: // %param = Param(0) // %negate = Negate(%param) // %exp = Negate(%exp) // %call = Call(embedded_computation, {%exp}) // %add = Add(%negate, %call) // // Note %negate is live across the call and should interfere with all values // in the embedded computation. auto embedded_builder = HloComputation::Builder(TestName() + "_embedded"); auto embedded_param = embedded_builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "embedded_param")); auto embedded_log = embedded_builder.AddInstruction(HloInstruction::CreateUnary( vector_shape_, HloOpcode::kLog, embedded_param)); auto embedded_computation = module_->AddEmbeddedComputation(embedded_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vector_shape_, "param")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param)); auto call = builder.AddInstruction( HloInstruction::CreateCall(vector_shape_, {exp}, embedded_computation)); builder.AddInstruction(HloInstruction::CreateBinary( vector_shape_, HloOpcode::kAdd, negate, call)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); RunAnalysis(GetParam()); DependencyHloOrdering ordering(module_.get()); // Exp only use is the call so it should not interfere with values inside // the embedded computation. EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, embedded_log)); // Negate is live across the call and should interfere with values in the // embedded computation EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, embedded_log)); } TEST_P(HloDataflowAnalysisTest, GetFlattenedValueSet) { const char* hlo_text = R"( HloModule test_aliasing_module ENTRY root { param = s32[1000] parameter(0) p0 = s32[1000] copy(param) p1 = s32[1000] copy(param) ROOT t = (s32[1000], s32[1000]) tuple(p0, p1) })"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text)); auto entry = module_->entry_computation(); entry->GetInstructionWithName("t"); auto& dataflow_analysis = RunAnalysis(GetParam()); auto set = dataflow_analysis.GetFlattenedValueSet( entry->GetInstructionWithName("t")); EXPECT_EQ(set.values().size(), 3); } TEST_P(HloDataflowAnalysisTest, ConditionalWithIdentity) { // Test conditional with identity computations in both true and false cases. // // true_computation(F32[] %true_param): // return %true_param // // false_computation(F32[] %false_param): // return %false_param // // entry: // %pred = Constant(true) // %constant1 = Constant(56.0) // %constant2 = Constant(12.0) // return Conditional(%pred, %constant1, true_computation, // %constant2, false_computation) auto true_builder = HloComputation::Builder(TestName() + "_true"); auto true_param = true_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "true_param")); HloComputation* true_computation = module_->AddEmbeddedComputation(true_builder.Build()); auto false_builder = HloComputation::Builder(TestName() + "_false"); auto false_param = false_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "false_param")); HloComputation* false_computation = module_->AddEmbeddedComputation(false_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f))); auto conditional = builder.AddInstruction(HloInstruction::CreateConditional( scalar_shape_, pred, constant1, true_computation, constant2, false_computation)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloDataflowAnalysis& analysis = RunAnalysis(GetParam()); EXPECT_TRUE(analysis.ValueIsDefinedAt(pred)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2)); EXPECT_FALSE(analysis.ValueIsDefinedAt(true_param)); EXPECT_FALSE(analysis.ValueIsDefinedAt(false_param)); EXPECT_EQ(analysis.GetUniqueValueAt(true_param), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(false_param), analysis.GetValueDefinedAt(constant2)); EXPECT_THAT(analysis.GetValueDefinedAt(pred).uses(), ElementsAre(HloUse{conditional, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), ElementsAre(HloUse{conditional, 1, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), ElementsAre(HloUse{conditional, 2, {}})); bool ssa_form = GetParam(); if (ssa_form) { EXPECT_EQ(analysis.values().size(), 4); EXPECT_TRUE(analysis.ValueIsDefinedAt(conditional)); } else { EXPECT_EQ(analysis.values().size(), 3); EXPECT_FALSE(analysis.ValueIsDefinedAt(conditional)); EXPECT_THAT(HloValuesAt(conditional), UnorderedElementsAre(analysis.GetValueDefinedAt(constant1), analysis.GetValueDefinedAt(constant2))); } } TEST_P(HloDataflowAnalysisTest, ConditionalTakingTupleOperand) { // Test conditional with true and false computations taking a tuple operand. // // true_computation((F32[], F32[]) %true_param): // %true_x = GetTupleElement(%true_param, 0) // %true_y = GetTupleElement(%true_param, 1) // return Add(%true_x, %true_y) // // false_computation((F32[], F32[]) %false_param): // %false_x = GetTupleElement(%false_param, 0) // %false_y = GetTupleElement(%false_param, 1) // return Subtract(%false_x, %false_y) // // entry: // %pred = Constant(true) // %constant1 = Constant(56.0) // %constant2 = Constant(12.0) // %tuple_operand = Tuple(%constant1, %constant2) // return Conditional(%pred, %tuple_operand, true_computation, // %tuple_operand, false_computation) auto true_builder = HloComputation::Builder(TestName() + "_true"); auto true_param = true_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape_, "true_param")); auto true_x = true_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, true_param, 0)); auto true_y = true_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, true_param, 1)); auto add = true_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, true_x, true_y)); HloComputation* true_computation = module_->AddEmbeddedComputation(true_builder.Build()); auto false_builder = HloComputation::Builder(TestName() + "_false"); auto false_param = false_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape_, "false_param")); auto false_x = false_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, false_param, 0)); auto false_y = false_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, false_param, 1)); auto sub = false_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kSubtract, false_x, false_y)); HloComputation* false_computation = module_->AddEmbeddedComputation(false_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f))); auto tuple_operand = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto conditional = builder.AddInstruction(HloInstruction::CreateConditional( scalar_shape_, pred, tuple_operand, true_computation, tuple_operand, false_computation)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloDataflowAnalysis& analysis = RunAnalysis(GetParam()); EXPECT_TRUE(analysis.ValueIsDefinedAt(pred)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2)); EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple_operand)); EXPECT_TRUE(analysis.ValueIsDefinedAt(add)); EXPECT_TRUE(analysis.ValueIsDefinedAt(sub)); EXPECT_FALSE(analysis.ValueIsDefinedAt(true_param)); EXPECT_FALSE(analysis.ValueIsDefinedAt(false_param)); EXPECT_FALSE(analysis.ValueIsDefinedAt(true_x)); EXPECT_FALSE(analysis.ValueIsDefinedAt(true_y)); EXPECT_FALSE(analysis.ValueIsDefinedAt(false_x)); EXPECT_FALSE(analysis.ValueIsDefinedAt(false_y)); EXPECT_EQ(analysis.GetUniqueValueAt(true_param), analysis.GetValueDefinedAt(tuple_operand)); EXPECT_EQ(analysis.GetUniqueValueAt(false_param), analysis.GetValueDefinedAt(tuple_operand)); EXPECT_EQ(analysis.GetUniqueValueAt(true_x), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(true_y), analysis.GetValueDefinedAt(constant2)); EXPECT_EQ(analysis.GetUniqueValueAt(false_x), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(false_y), analysis.GetValueDefinedAt(constant2)); EXPECT_THAT(analysis.GetValueDefinedAt(pred).uses(), ElementsAre(HloUse{conditional, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), UnorderedElementsAre(HloUse{conditional, 1, {0}}, HloUse{conditional, 2, {0}}, HloUse{add, 0, {}}, HloUse{sub, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), UnorderedElementsAre(HloUse{conditional, 1, {1}}, HloUse{conditional, 2, {1}}, HloUse{add, 1, {}}, HloUse{sub, 1, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(tuple_operand).uses(), UnorderedElementsAre( HloUse{conditional, 1, {}}, HloUse{conditional, 2, {}}, HloUse{true_x, 0, {}}, HloUse{true_y, 0, {}}, HloUse{false_x, 0, {}}, HloUse{false_y, 0, {}})); bool ssa_form = GetParam(); if (ssa_form) { EXPECT_EQ(analysis.values().size(), 7); EXPECT_TRUE(analysis.ValueIsDefinedAt(conditional)); } else { EXPECT_EQ(analysis.values().size(), 6); EXPECT_FALSE(analysis.ValueIsDefinedAt(conditional)); EXPECT_THAT(HloValuesAt(conditional), UnorderedElementsAre(analysis.GetValueDefinedAt(add), analysis.GetValueDefinedAt(sub))); } } TEST_P(HloDataflowAnalysisTest, NestedConditionals) { // computation1(F32[] %param1): // %ceil = Ceil(%param1) // return %ceil // // computation2(F32[] %param2): // %floor = Floor(%param2) // return %floor // // computation3(F32[] %param3): // %negate = Negate(%param3) // return %negate // // inner_conditional((PRED, F32[], F32[]) %param_cond): // %pred_cond = GetTupleElement(%param_cond, 0) // %true_operand_cond = GetTupleElement(%param_cond, 1) // %false_operand_cond = GetTupleElement(%param_cond, 2) // return Conditional(%pred_cond, %true_operand_cond, computation1, // %false_operand_cond, computation2) // // entry: // %pred1 = Constant(true) // %pred2 = Constant(false) // %constant1 = Constant(1.1); // %constant2 = Constant(2.2); // %constant3 = Constant(3.3); // return Conditional(%pred1, (%pred2, %constant1, %constant2), // inner_conditional, %constant3, computation3) auto computation1 = module_->AddEmbeddedComputation( CreateR0F32UnaryOpComputation(HloOpcode::kCeil)); auto computation2 = module_->AddEmbeddedComputation( CreateR0F32UnaryOpComputation(HloOpcode::kFloor)); auto computation3 = module_->AddEmbeddedComputation( CreateR0F32UnaryOpComputation(HloOpcode::kNegate)); // Build inner_conditional computation. const Shape scalar_bool_shape = ShapeUtil::MakeShape(PRED, {}); const Shape tuple_param_shape = ShapeUtil::MakeTupleShape( {scalar_bool_shape, scalar_shape_, scalar_shape_}); auto inner_builder = HloComputation::Builder(TestName() + "_inner_conditional"); auto param_cond = inner_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_param_shape, "param_cond")); auto pred_cond = inner_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_bool_shape, param_cond, 0)); auto true_operand_cond = inner_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, param_cond, 1)); auto false_operand_cond = inner_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, param_cond, 2)); auto inner_conditional = inner_builder.AddInstruction(HloInstruction::CreateConditional( scalar_shape_, pred_cond, true_operand_cond, computation1, false_operand_cond, computation2)); auto inner_conditional_computation = module_->AddEmbeddedComputation(inner_builder.Build()); // Build entry computation. auto builder = HloComputation::Builder(TestName()); auto pred1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); auto pred2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.2f))); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.3f))); auto tuple_operand = builder.AddInstruction( HloInstruction::CreateTuple({pred2, constant1, constant2})); auto conditional = builder.AddInstruction(HloInstruction::CreateConditional( scalar_shape_, pred1, tuple_operand, inner_conditional_computation, constant3, computation3)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloDataflowAnalysis& analysis = RunAnalysis(GetParam()); EXPECT_TRUE(analysis.ValueIsDefinedAt(pred1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(pred2)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2)); EXPECT_TRUE(analysis.ValueIsDefinedAt(constant3)); EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple_operand)); EXPECT_TRUE(analysis.ValueIsDefinedAt(computation1->root_instruction())); EXPECT_TRUE(analysis.ValueIsDefinedAt(computation2->root_instruction())); EXPECT_TRUE(analysis.ValueIsDefinedAt(computation3->root_instruction())); auto computation1_param = computation1->parameter_instruction(0); auto computation2_param = computation2->parameter_instruction(0); auto computation3_param = computation3->parameter_instruction(0); EXPECT_FALSE(analysis.ValueIsDefinedAt(computation1_param)); EXPECT_FALSE(analysis.ValueIsDefinedAt(computation2_param)); EXPECT_FALSE(analysis.ValueIsDefinedAt(computation3_param)); EXPECT_EQ(analysis.GetUniqueValueAt(computation1_param), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(computation2_param), analysis.GetValueDefinedAt(constant2)); EXPECT_EQ(analysis.GetUniqueValueAt(computation3_param), analysis.GetValueDefinedAt(constant3)); EXPECT_FALSE(analysis.ValueIsDefinedAt(param_cond)); EXPECT_FALSE(analysis.ValueIsDefinedAt(pred_cond)); EXPECT_FALSE(analysis.ValueIsDefinedAt(true_operand_cond)); EXPECT_FALSE(analysis.ValueIsDefinedAt(false_operand_cond)); EXPECT_EQ(analysis.GetUniqueValueAt(param_cond), analysis.GetValueDefinedAt(tuple_operand)); EXPECT_EQ(analysis.GetUniqueValueAt(pred_cond), analysis.GetValueDefinedAt(pred2)); EXPECT_EQ(analysis.GetUniqueValueAt(true_operand_cond), analysis.GetValueDefinedAt(constant1)); EXPECT_EQ(analysis.GetUniqueValueAt(false_operand_cond), analysis.GetValueDefinedAt(constant2)); bool ssa_form = GetParam(); if (ssa_form) { EXPECT_EQ(analysis.values().size(), 11); EXPECT_TRUE(analysis.ValueIsDefinedAt(inner_conditional)); EXPECT_TRUE(analysis.ValueIsDefinedAt(conditional)); } else { EXPECT_EQ(analysis.values().size(), 9); EXPECT_FALSE(analysis.ValueIsDefinedAt(inner_conditional)); EXPECT_FALSE(analysis.ValueIsDefinedAt(conditional)); EXPECT_THAT( HloValuesAt(inner_conditional), UnorderedElementsAre( analysis.GetValueDefinedAt(computation1->root_instruction()), analysis.GetValueDefinedAt(computation2->root_instruction()))); EXPECT_THAT( HloValuesAt(conditional), UnorderedElementsAre( analysis.GetValueDefinedAt(computation1->root_instruction()), analysis.GetValueDefinedAt(computation2->root_instruction()), analysis.GetValueDefinedAt(computation3->root_instruction()))); } } TEST_P(HloDataflowAnalysisTest, AddDependency) { string module_string = R"( HloModule AddDependency ENTRY %AddDependency (p: f32[3]) -> f32[3] { %p = f32[3] parameter(0) %token0 = token[] after-all() ROOT %add_dep = f32[3] add-dependency(f32[3] %p, token[] %token0) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string, GetModuleConfigForTest())); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> analysis, HloDataflowAnalysis::Run(*module)); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kAddDependency); // The after-all and parameter should define a value. Add-dependency should // not. EXPECT_EQ(analysis->values().size(), 2); EXPECT_FALSE(analysis->ValueIsDefinedAt(root)); } INSTANTIATE_TEST_SUITE_P(HloDataflowAnalysisInstantiation, HloDataflowAnalysisTest, ::testing::Values(false, true)); class HloDataflowAnalysisTestBase : public HloTestBase { protected: void BuildModule(std::unique_ptr<HloComputation> computation) { module_ = CreateNewVerifiedModule(); computation_ = module_->AddEntryComputation(std::move(computation)); } void RunAnalysis( const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) { CHECK_NOTNULL(module_.get()); dataflow_analysis_ = HloDataflowAnalysis::Run( *module_, /*ssa_form=*/false, /*bitcast_defines_value=*/false, can_share_buffer) .ConsumeValueOrDie(); } void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) { BuildModule(std::move(computation)); RunAnalysis(); } std::unique_ptr<HloModule> module_; HloComputation* computation_ = nullptr; std::unique_ptr<HloDataflowAnalysis> dataflow_analysis_; }; class DoesNotUseOperandBufferTest : public HloDataflowAnalysisTestBase {}; TEST_F(DoesNotUseOperandBufferTest, GetTupleElement) { auto builder = HloComputation::Builder(TestName()); Shape elem_shape = ShapeUtil::MakeShape(F32, {8}); auto tuple = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({elem_shape, elem_shape}), "tuple")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(elem_shape, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(elem_shape, tuple, 1)); builder.AddInstruction( HloInstruction::CreateBinary(elem_shape, HloOpcode::kAdd, gte0, gte1)); BuildModuleAndRunAnalysis(builder.Build()); // GetTupleElement instructions only access the top-level buffer of their // operand. EXPECT_TRUE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {0}, gte0)); EXPECT_TRUE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {1}, gte1)); EXPECT_FALSE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte0)); EXPECT_FALSE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte1)); } TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {8}); auto tuple = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 1)); // Create a DynamicUpdateSlice instruction of tuple element 1. auto starts = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(2))); auto update = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, std::initializer_list<HloInstruction*>({starts}))); builder.AddInstruction( HloInstruction::CreateTuple({gte0, dynamic_update_slice})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {dynamic_update_slice, starts, update, gte1}, HloInstruction::FusionKind::kLoop); RunAnalysis(); // The fusion instruction never uses tuple element 0, but does use element 1. EXPECT_TRUE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {0}, fusion)); EXPECT_FALSE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {1}, fusion)); } // Similar to FusedDynamicUpdateSlice above, but tests indirect uses of the // parameter tuple. TEST_F(DoesNotUseOperandBufferTest, IndirectUses) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {8}); auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple")); auto t0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple_param, 0)); auto t1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple_param, 1)); // Swap the tuple elements. auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({t1, t0})); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 1)); // Create a DynamicUpdateSlice instruction of tuple element 1. auto starts = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(2))); auto update = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, std::initializer_list<HloInstruction*>({starts}))); builder.AddInstruction( HloInstruction::CreateTuple({gte0, dynamic_update_slice})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {dynamic_update_slice, starts, update, gte1}, HloInstruction::FusionKind::kLoop); RunAnalysis(); // The fusion instruction never uses tuple element 0, but does use element 1. EXPECT_TRUE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {0}, fusion)); EXPECT_FALSE(dataflow_analysis_->DoesNotUseOperandBuffer(tuple, {1}, fusion)); // The same holds for the parameter tuple, except that the tuple elements // are swapped in 'tuple'. EXPECT_TRUE( dataflow_analysis_->DoesNotUseOperandBuffer(tuple_param, {1}, fusion)); EXPECT_FALSE( dataflow_analysis_->DoesNotUseOperandBuffer(tuple_param, {0}, fusion)); } class CanShareOperandBufferWithUserTest : public HloDataflowAnalysisTestBase {}; TEST_F(CanShareOperandBufferWithUserTest, ElementWiseSameShape) { auto builder = HloComputation::Builder(TestName()); Shape shape = ShapeUtil::MakeShape(F32, {8}); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, shape, "param")); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(shape, HloOpcode::kExp, param)); auto log = builder.AddInstruction( HloInstruction::CreateUnary(shape, HloOpcode::kLog, exp)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(param, {}, exp, {})); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(exp, {}, log, {})); } TEST_F(CanShareOperandBufferWithUserTest, NonElementwiseLoopFusionCantAliasOperandBuffer) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape, "param0")); auto neg = builder.AddInstruction( HloInstruction::CreateUnary(data_shape, HloOpcode::kNegate, param0)); auto reverse = builder.AddInstruction( HloInstruction::CreateReverse(data_shape, neg, {0, 1})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {reverse, neg}, HloInstruction::FusionKind::kLoop); RunAnalysis(); EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser(param0, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, MultiOutputFusionCanAliasOperandBuffer) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); Shape in_shape = ShapeUtil::MakeShape(F32, {8}); Shape out_shape = ShapeUtil::MakeShape(PRED, {8}); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, in_shape, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, in_shape, "param1")); auto copy0 = builder.AddInstruction( HloInstruction::CreateUnary(in_shape, HloOpcode::kCopy, param0)); auto copy1 = builder.AddInstruction( HloInstruction::CreateUnary(in_shape, HloOpcode::kCopy, param1)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({copy1, copy0})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {tuple, copy1, copy0}, HloInstruction::FusionKind::kLoop); RunAnalysis(); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser(param0, {}, fusion, {0})); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser(param0, {}, fusion, {1})); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser(param1, {}, fusion, {0})); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser(param1, {}, fusion, {1})); } TEST_F(CanShareOperandBufferWithUserTest, ElementwiseLoopFusionCantAliasOperandBuffer) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto operand = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape, one, {})); auto neg = builder.AddInstruction( HloInstruction::CreateUnary(data_shape, HloOpcode::kNegate, operand)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(data_shape, HloOpcode::kExp, neg)); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {exp, neg}, HloInstruction::FusionKind::kLoop); RunAnalysis(); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser(operand, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, CanShareOperandWhenDynamicUpdateSliceIsFedByDynamicSliceWithSameIndex) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); Shape slice_shape = ShapeUtil::MakeShape(F32, {1, 2}); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape, "param0")); auto zero = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64>(0))); auto ds = builder.AddInstruction(HloInstruction::CreateDynamicSlice( slice_shape, param, {zero, zero}, {1, 2})); auto dus = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, param, ds, {zero, zero})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {dus, ds, zero}, HloInstruction::FusionKind::kLoop); RunAnalysis(); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(param, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, DUSWithSliceWithDifferentIndices) { const char* kModule = R"( HloModule test fused_computation { p0 = f32[10,20,30] parameter(0) p1 = s32[] parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) slice = f32[1,1,30] dynamic-slice(p0, p1, p2, p3), dynamic_slice_sizes={1,1,30} ROOT dus = f32[10,20,30] dynamic-update-slice(p0, slice, p1, p3, p2) } ENTRY test { p0 = f32[10,20,30] parameter(0) p1 = s32[] parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) ROOT fusion = f32[10,20,30] fusion(p0, p1, p2, p3), kind=kLoop, calls=fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(kModule)); auto* fusion = module_->entry_computation()->root_instruction(); auto* param = module_->entry_computation()->parameter_instruction(0); RunAnalysis(); EXPECT_FALSE( dataflow_analysis_->CanShareOperandBufferWithUser(param, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, DUSWithSliceWithSameIndices) { const char* kModule = R"( HloModule test fused_computation { p0 = f32[10,20,30] parameter(0) p1 = s32[] parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) slice = f32[1,1,30] dynamic-slice(p0, p1, p2, p3), dynamic_slice_sizes={1,1,30} ROOT dus = f32[10,20,30] dynamic-update-slice(p0, slice, p1, p2, p3) } ENTRY test { p0 = f32[10,20,30] parameter(0) p1 = s32[] parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) ROOT fusion = f32[10,20,30] fusion(p0, p1, p2, p3), kind=kLoop, calls=fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(kModule)); auto* fusion = module_->entry_computation()->root_instruction(); auto* param = module_->entry_computation()->parameter_instruction(0); RunAnalysis(); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(param, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, ElementWiseDifferentShape) { auto builder = HloComputation::Builder(TestName()); Shape in_shape = ShapeUtil::MakeShape(F32, {8}); Shape out_shape = ShapeUtil::MakeShape(PRED, {8}); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, in_shape, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, in_shape, "param1")); auto result = builder.AddInstruction(HloInstruction::CreateCompare( out_shape, param0, param1, ComparisonDirection::kEq)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser(param0, {}, result, {})); EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser(param1, {}, result, {})); } TEST_F(CanShareOperandBufferWithUserTest, CopyShares) { auto builder = HloComputation::Builder(TestName()); Shape shape = ShapeUtil::MakeShape(F32, {8}); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, shape, "param")); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(shape, HloOpcode::kExp, param)); auto copy = builder.AddInstruction( HloInstruction::CreateUnary(shape, HloOpcode::kCopy, exp)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(param, {}, exp, {})); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(exp, {}, copy, {})); } TEST_F(CanShareOperandBufferWithUserTest, FusedDynamicUpdateSlice) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {8}); auto tuple = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 1)); // Create a DynamicUpdateSlice instruction of tuple element 1. auto starts = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(2))); auto update = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, std::initializer_list<HloInstruction*>({starts}))); builder.AddInstruction( HloInstruction::CreateTuple({gte0, dynamic_update_slice})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {dynamic_update_slice, starts, update, gte1}, HloInstruction::FusionKind::kLoop); RunAnalysis(); // The fusion instruction can share with tuple element 1. EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser(tuple, {0}, fusion, {})); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser(tuple, {1}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, FusedDynamicUpdateSliceWithConvertCanShare) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {8}); Shape data_shape_bf16 = ShapeUtil::MakeShape(BF16, {8}); auto tuple = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 1)); auto convert1 = builder.AddInstruction( HloInstruction::CreateConvert(data_shape_bf16, gte1)); // Create a DynamicUpdateSlice instruction of tuple element 1. auto starts = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(2))); auto update = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape_bf16, convert1, update, std::initializer_list<HloInstruction*>({starts}))); auto convert2 = builder.AddInstruction( HloInstruction::CreateConvert(data_shape, dynamic_update_slice)); builder.AddInstruction(HloInstruction::CreateTuple({gte0, convert2})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {convert2, dynamic_update_slice, starts, update, convert1}, HloInstruction::FusionKind::kLoop); RunAnalysis(); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(gte1, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, DynamicUpdateSliceCanShare) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {1, 8}); Shape update_shape = ShapeUtil::MakeShape(F32, {1, 4}); Shape starts_shape = ShapeUtil::MakeShape(S32, {2}); auto data = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape, "data")); auto update = builder.AddInstruction( HloInstruction::CreateParameter(1, update_shape, "update")); auto start = builder.AddInstruction( HloInstruction::CreateParameter(2, starts_shape, "start")); auto dus = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, data, update, {start})); BuildModuleAndRunAnalysis(builder.Build()); // The DynamicUpdateSlice instruction can share with the data operand, but not // with update or start. EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(data, {}, dus, {})); EXPECT_FALSE( dataflow_analysis_->CanShareOperandBufferWithUser(update, {}, dus, {})); EXPECT_FALSE( dataflow_analysis_->CanShareOperandBufferWithUser(start, {}, dus, {})); } TEST_F(CanShareOperandBufferWithUserTest, ScatterCanShare) { const char* hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) ROOT rhs = s32[] parameter(1) } ENTRY main { operand = s32[3,3] parameter(0) indices = s32[2] parameter(1) updates = s32[2,3] parameter(2) ROOT scatter = s32[3,3] scatter(operand, indices, updates), to_apply=update_s32, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text)); computation_ = module_->entry_computation(); RunAnalysis(); HloInstruction* operand_param = computation_->parameter_instruction(0); HloInstruction* indices_param = computation_->parameter_instruction(1); HloInstruction* updates_param = computation_->parameter_instruction(2); HloInstruction* scatter = computation_->root_instruction(); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser( operand_param, {}, scatter, {})); EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser( indices_param, {}, scatter, {})); EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser( updates_param, {}, scatter, {})); } TEST_F(CanShareOperandBufferWithUserTest, TriangularSolveCanShare) { const char* hlo_text = R"( HloModule TensorFlowTriangularSolve ENTRY main { a = f32[4,4]{1,0} parameter(0) b = f32[3,4]{1,0} parameter(1) ROOT triangular-solve = f32[3,4]{1,0} triangular-solve(a, b), lower=true, transpose_a=NO_TRANSPOSE } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text)); computation_ = module_->entry_computation(); RunAnalysis(); HloInstruction* lhs_param = computation_->parameter_instruction(0); HloInstruction* rhs_param = computation_->parameter_instruction(1); HloInstruction* triangular_solve = computation_->root_instruction(); EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser( lhs_param, {}, triangular_solve, {})); EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser( rhs_param, {}, triangular_solve, {})); } TEST_F(CanShareOperandBufferWithUserTest, SortCanShare) { auto builder = HloComputation::Builder(TestName()); module_ = CreateNewVerifiedModule(); Shape keys_shape = ShapeUtil::MakeShape(F32, {8}); auto keys = builder.AddInstruction( HloInstruction::CreateParameter(0, keys_shape, "keys")); TF_ASSERT_OK_AND_ASSIGN( auto* sort, MakeSortHlo(keys_shape, {keys}, -1, /*is_stable=*/false, &builder, module_.get())); computation_ = module_->AddEntryComputation(builder.Build()); RunAnalysis(); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(keys, {}, sort, {})); } TEST_F(CanShareOperandBufferWithUserTest, SortCanShareWithTupleUser) { auto builder = HloComputation::Builder(TestName()); module_ = CreateNewVerifiedModule(); Shape keys_shape = ShapeUtil::MakeShape(F32, {8}); Shape values_shape = ShapeUtil::MakeShape(F32, {8}); auto keys = builder.AddInstruction( HloInstruction::CreateParameter(0, keys_shape, "keys")); auto values = builder.AddInstruction( HloInstruction::CreateParameter(1, values_shape, "values")); TF_ASSERT_OK_AND_ASSIGN( auto* sort, MakeSortHlo(ShapeUtil::MakeTupleShape({keys_shape, values_shape}), {keys, values}, 0, /*is_stable=*/false, &builder, module_.get())); computation_ = module_->AddEntryComputation(builder.Build()); RunAnalysis(); // The buffer for the keys can be shared with the first tuple entry. EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(keys, {}, sort, {0})); // The buffer for the values can be shared with the second tuple entry. EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(values, {}, sort, {1})); // Verify that the buffers are not shared with the "wrong" tuple entry. EXPECT_FALSE( dataflow_analysis_->CanShareOperandBufferWithUser(keys, {}, sort, {1})); EXPECT_FALSE( dataflow_analysis_->CanShareOperandBufferWithUser(values, {}, sort, {0})); } TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto a = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}}))); auto b = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); DotDimensionNumbers dot_dnums; dot_dnums.add_lhs_contracting_dimensions(1); dot_dnums.add_rhs_contracting_dimensions(0); PrecisionConfig precision_config; precision_config.mutable_operand_precision()->Resize( 2, PrecisionConfig::DEFAULT); auto dot = builder.AddInstruction( HloInstruction::CreateDot(data_shape, a, b, dot_dnums, precision_config)); auto one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto add_operand = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape, one, {})); auto add = builder.AddInstruction(HloInstruction::CreateBinary( data_shape, HloOpcode::kAdd, dot, add_operand)); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {add, dot}, HloInstruction::FusionKind::kOutput); RunAnalysis(); // Output fused dot add should be able to share buffer with 'add_operand'. EXPECT_TRUE(dataflow_analysis_->CanShareOperandBufferWithUser(add_operand, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto operand = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape, one, {})); auto reverse = builder.AddInstruction( HloInstruction::CreateReverse(data_shape, operand, {0, 1})); auto two = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, reverse, two)); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {add, two, reverse}, HloInstruction::FusionKind::kOutput); RunAnalysis(); // Output fused operand->reverse->add cannot alias operand buffer 'operand'. EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser(operand, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, FusionCanShareBufferCustomized) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto operand = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape, one, {})); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( data_shape, HloOpcode::kMultiply, operand, operand)); auto two = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, mul, two)); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {add, two, mul}, HloInstruction::FusionKind::kInput); RunAnalysis(/*can_share_buffer=*/[](const HloInstruction* fusion, const HloInstruction*, const ShapeIndex&) { return fusion->IsLoopFusion(); }); EXPECT_FALSE(dataflow_analysis_->CanShareOperandBufferWithUser(operand, {}, fusion, {})); } TEST_F(CanShareOperandBufferWithUserTest, WhileCanShare) { module_ = CreateNewVerifiedModule(); Shape data_shape = ShapeUtil::MakeShape(F32, {8}); Shape pred_scalar_shape = ShapeUtil::MakeShape(PRED, {}); auto b = HloComputation::Builder(TestName() + ".And"); auto p0 = b.AddInstruction( HloInstruction::CreateParameter(0, pred_scalar_shape, "p0")); auto p1 = b.AddInstruction( HloInstruction::CreateParameter(1, pred_scalar_shape, "p1")); b.AddInstruction( HloInstruction::CreateBinary(pred_scalar_shape, HloOpcode::kAnd, p0, p1)); auto and_computation = module_->AddEmbeddedComputation(b.Build()); auto make_cond = [&data_shape, &and_computation]() { auto builder = HloComputation::Builder(TestName() + ".Cond"); auto data = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape, "data")); auto compare = builder.AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {8}), data, data, ComparisonDirection::kEq)); auto true_value = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); builder.AddInstruction( HloInstruction::CreateReduce(ShapeUtil::MakeShape(PRED, {}), compare, true_value, {0}, and_computation)); return builder.Build(); }; auto make_body = [&data_shape]() { auto builder = HloComputation::Builder(TestName() + ".Body"); auto data = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape, "data")); builder.AddInstruction( HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, data, data)); return builder.Build(); }; HloComputation* cond_computation = module_->AddEmbeddedComputation(make_cond()); HloComputation* body_computation = module_->AddEmbeddedComputation(make_body()); auto builder = HloComputation::Builder(TestName()); auto data = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape, "data")); auto whil = builder.AddInstruction(HloInstruction::CreateWhile( data_shape, cond_computation, body_computation, data)); computation_ = module_->AddEntryComputation(builder.Build()); RunAnalysis(); // The While instruction can share with the data operand. EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(data, {}, whil, {})); } // Tests that Call can alias operand buffer if the only use of the operand // in the called computation is an elementwise instruction. TEST_F(CanShareOperandBufferWithUserTest, CallToComputationWithFusionRoot) { Shape shape = ShapeUtil::MakeShape(F32, {8}); // Build sub-computation with fusion root. auto sub_builder = HloComputation::Builder(TestName() + "_sub"); auto sub_param = sub_builder.AddInstruction( HloInstruction::CreateParameter(0, shape, "sub_param")); auto one = sub_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto ones = sub_builder.AddInstruction( HloInstruction::CreateBroadcast(shape, one, {})); auto add = sub_builder.AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sub_param, ones)); module_ = CreateNewVerifiedModule(); auto sub_computation = module_->AddEmbeddedComputation(sub_builder.Build()); sub_computation->CreateFusionInstruction({add, ones}, HloInstruction::FusionKind::kLoop); // Build entry-computation with kCall which calls 'sub_computation'. auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, shape, "param")); auto reverse = builder.AddInstruction(HloInstruction::CreateReverse(shape, param, {0})); auto call = builder.AddInstruction( HloInstruction::CreateCall(shape, {reverse}, sub_computation)); computation_ = module_->AddEntryComputation(builder.Build()); RunAnalysis(); EXPECT_TRUE( dataflow_analysis_->CanShareOperandBufferWithUser(reverse, {}, call, {})); } } // namespace } // namespace xla
utf-8
1
LGPL-3 or GPL-2
2006-2021 The Chromium Authors 2016-2021 The Qt Company Ltd.
qt6-webengine-6.2.2+dfsg/src/3rdparty/chromium/chrome/browser/extensions/api/extension_action/page_action_interactive_test.cc
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/extensions/api/extension_action/extension_action_api.h" #include "chrome/browser/extensions/extension_apitest.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/browser_window.h" #include "chrome/test/base/interactive_test_utils.h" #include "content/public/test/browser_test.h" #include "extensions/test/result_catcher.h" namespace extensions { namespace { using PageActionInteractiveTest = ExtensionApiTest; // Tests popups in page actions. IN_PROC_BROWSER_TEST_F(PageActionInteractiveTest, ShowPageActionPopup) { ASSERT_TRUE(RunExtensionTest("page_action/popup")) << message_; const Extension* extension = GetSingleLoadedExtension(); ASSERT_TRUE(extension) << message_; ASSERT_TRUE(WaitForPageActionVisibilityChangeTo(1)); ASSERT_TRUE(ui_test_utils::BringBrowserWindowToFront(browser())); ASSERT_TRUE(browser()->window()->IsActive()); ResultCatcher catcher; ASSERT_TRUE(ExtensionActionAPI::Get(browser()->profile()) ->ShowExtensionActionPopupForAPICall(extension, browser())); ASSERT_TRUE(catcher.GetNextResult()); } } // namespace } // namespace extensions
utf-8
1
LGPL-3 or GPL-2
2006-2021 The Chromium Authors 2016-2021 The Qt Company Ltd.
gnome-boxes-41.3/subprojects/libhandy/src/gtk-window-private.h
/* * Copyright (C) 2019 Purism SPC * * SPDX-License-Identifier: LGPL-2.1+ */ #pragma once #include <gtk/gtk.h> G_BEGIN_DECLS void hdy_gtk_window_toggle_maximized (GtkWindow *window); GdkPixbuf *hdy_gtk_window_get_icon_for_size (GtkWindow *window, gint size); GdkWindowState hdy_gtk_window_get_state (GtkWindow *window); G_END_DECLS
utf-8
1
LGPL-2+
2011 Red Hat, Inc.
chkservice-0.3/include/chk-ctl.h
/* * chkservice is a tool for managing systemd units. * more infomration at https://github.com/linuxenko/chkservice * * Copyright (C) 2017 Svetlana Linuxenko * * chkservice program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * chkservice program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _CHK_CTL_H #define _CHK_CTL_H #include "chk-systemd.h" typedef struct UnitItem { std::string id; std::string target; std::string description; int sub; int state; } UnitItem; enum { UNIT_STATE_DISABLED = 0x01, UNIT_STATE_ENABLED = 0x02, UNIT_STATE_STATIC = 0x03, UNIT_STATE_BAD = 0x04, UNIT_STATE_MASKED = 0x05, UNIT_STATE_TMP = 0x06 }; enum { UNIT_SUBSTATE_RUNNING = 0x1a, UNIT_SUBSTATE_CONNECTED = 0x2a, UNIT_SUBSTATE_INVALID = 0x3a, UNIT_SUBSTATE_TMP = 0x4a }; class ChkCTL { public: ChkCTL(); ~ChkCTL(); ChkBus *bus; std::vector<UnitItem *> getItemsSorted(); std::vector<UnitItem *> getByTarget(const char *target); std::vector<UnitItem *> getItems(); void toggleUnitState(UnitItem *item); void toggleUnitSubState(UnitItem *item); void fetch(); private: std::vector<UnitItem *> items; void pushItem(UnitInfo *unit); void sortByName(std::vector<UnitItem *> *sortable); }; #endif
utf-8
1
GPL-3+
2017 Svetlana Linuxenko <svetlana@linuxenko.pro>
nettle-3.7.3/sec-tabselect.c
/* sec-tabselect.c Copyright (C) 2013 Niels Möller This file is part of GNU Nettle. GNU Nettle is free software: you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. GNU Nettle is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with this program. If not, see http://www.gnu.org/licenses/. */ /* Development of Nettle's ECC support was funded by the .SE Internet Fund. */ #if HAVE_CONFIG_H # include "config.h" #endif #include <assert.h> #include "ecc-internal.h" /* Copy the k'th element of the table out tn elements, each of size rn. Always read complete table. Similar to gmp's mpn_tabselect. */ /* FIXME: Should we need to volatile declare anything? */ void sec_tabselect (mp_limb_t *rp, mp_size_t rn, const mp_limb_t *table, unsigned tn, unsigned k) { const mp_limb_t *end = table + tn * rn; const mp_limb_t *p; mp_size_t i; assert (k < tn); mpn_zero (rp, rn); for (p = table; p < end; p += rn, k--) { mp_limb_t mask = - (mp_limb_t) (k == 0); for (i = 0; i < rn; i++) rp[i] += mask & p[i]; } }
utf-8
1
LGPL-3+ or GPL-2+
2001-2020 Niels Möller
scilab-6.1.1+dfsg2/scilab/modules/call_scilab/examples/call_scilab/cpp/common/ccmatrix1.cc
#include <iostream> #include "ccmatrix.h" class SciError {}; // CMatrix A(name,job); extern "C" { #include <stdio.h> #include <string.h> #undef _PARAMS #include "stack-c.h" int SendScilabJob(char *); void cc_test(); } static char buf[256]; CMatrix::CMatrix(char *name1,char *job) { sprintf(buf,"%s=%s;",name1,job); if ( SendScilabJob(buf) != 0) throw SciError(); else { int lp; C2F(cmatptr)(name1, &m, &n, &lp,strlen(name1)); v = stk(lp); } name = new char[strlen(name1)+1]; strcpy(name,name1); } void CMatrix::scijob(char *jobname) { sprintf(buf,jobname,name); if ( SendScilabJob(buf) != 0) throw SciError(); } CMatrix& CMatrix::plus(CMatrix &B) { sprintf(buf,"%s=%s+%s",this->name,this->name,B.name); if ( SendScilabJob(buf) != 0) throw SciError(); int lp; C2F(cmatptr)(name, &m, &n, &lp,strlen(name)); v = stk(lp); return *this; } void CMatrix::print() { fprintf(stdout,"Matrix %s=\n",name); scijob("disp(%s)"); } void CMatrix::inv() { sprintf(buf,"%s=inv(%s)",name,name); if ( SendScilabJob(buf) != 0) throw SciError(); int lp; C2F(cmatptr)(name, &m, &n, &lp,strlen(name)); v = stk(lp); } void cc_test() { CMatrix A("a","ones(2,2)"); CMatrix B("b","8"); A.plus(B); A.scijob("disp(%s);"); A.scijob("%s=rand(2,2)"); A.print(); A.inv(); A.print(); }
utf-8
1
GPL-2+
1989-2008 INRIA 1989-2007 ENPC 2008-2012 DIGITEO 2012-2016 Scilab Enterprises 2017-2018 ESI Group
chromium-98.0.4758.102/remoting/host/chromoting_host_services_client.cc
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "remoting/host/chromoting_host_services_client.h" #include "base/bind.h" #include "base/environment.h" #include "base/notreached.h" #include "base/sequence_checker.h" #include "build/build_config.h" #include "mojo/public/cpp/bindings/pending_remote.h" #include "mojo/public/cpp/system/isolated_connection.h" #include "remoting/host/ipc_constants.h" #include "remoting/host/mojom/chromoting_host_services.mojom.h" #if defined(OS_WIN) #include <windows.h> #include "remoting/host/win/acl_util.h" #endif namespace remoting { namespace { #if defined(OS_LINUX) constexpr char kChromeRemoteDesktopSessionEnvVar[] = "CHROME_REMOTE_DESKTOP_SESSION"; #endif bool g_initialized = false; } // namespace ChromotingHostServicesClient::ChromotingHostServicesClient() : environment_(base::Environment::Create()), server_name_(GetChromotingHostServicesServerName()) { DCHECK(g_initialized) << "ChromotingHostServicesClient::Initialize() has not been called."; } ChromotingHostServicesClient::~ChromotingHostServicesClient() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); } // static bool ChromotingHostServicesClient::Initialize() { DCHECK(!g_initialized); #if defined(OS_WIN) // The ChromotingHostServices server runs under the LocalService account, // which normally isn't allowed to query process info like session ID of a // process running under a different account, so we add an ACL to allow it. g_initialized = AddProcessAccessRightForWellKnownSid( WinLocalServiceSid, PROCESS_QUERY_LIMITED_INFORMATION); #else // Other platforms don't need initialization. g_initialized = true; #endif return g_initialized; } mojom::ChromotingSessionServices* ChromotingHostServicesClient::GetSessionServices() const { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (!const_cast<ChromotingHostServicesClient*>(this) ->EnsureSessionServicesBinding()) { return nullptr; } return session_services_remote_.get(); } bool ChromotingHostServicesClient::EnsureConnection() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (remote_.is_bound()) { return true; } auto endpoint = mojo::NamedPlatformChannel::ConnectToServer(server_name_); if (!endpoint.is_valid()) { LOG(WARNING) << "Cannot connect to IPC through server name " << server_name_ << ". Endpoint is invalid."; return false; } connection_ = std::make_unique<mojo::IsolatedConnection>(); mojo::PendingRemote<mojom::ChromotingHostServices> pending_remote( connection_->Connect(std::move(endpoint)), /* version= */ 0); if (!pending_remote.is_valid()) { LOG(WARNING) << "Invalid message pipe."; connection_.reset(); return false; } remote_.Bind(std::move(pending_remote)); remote_.set_disconnect_handler(base::BindOnce( &ChromotingHostServicesClient::OnDisconnected, base::Unretained(this))); return true; } bool ChromotingHostServicesClient::EnsureSessionServicesBinding() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (session_services_remote_.is_bound()) { return true; } #if defined(OS_LINUX) if (!environment_->HasVar(kChromeRemoteDesktopSessionEnvVar)) { LOG(WARNING) << "Current desktop environment is not remotable."; return false; } #endif if (!EnsureConnection()) { return false; } remote_->BindSessionServices( session_services_remote_.BindNewPipeAndPassReceiver()); session_services_remote_.reset_on_disconnect(); return true; } void ChromotingHostServicesClient::OnDisconnected() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); remote_.reset(); connection_.reset(); } } // namespace remoting
utf-8
1
BSD-3-clause
The Chromium Authors. All rights reserved.
nettle-3.7.3/cast128.h
/* cast128.h The CAST-128 block cipher. Copyright (C) 2001, 2014 Niels Möller This file is part of GNU Nettle. GNU Nettle is free software: you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. GNU Nettle is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with this program. If not, see http://www.gnu.org/licenses/. */ #ifndef NETTLE_CAST128_H_INCLUDED #define NETTLE_CAST128_H_INCLUDED #include "nettle-types.h" #ifdef __cplusplus extern "C" { #endif /* Name mangling */ #define cast5_set_key nettle_cast5_set_key #define cast128_set_key nettle_cast128_set_key #define cast128_encrypt nettle_cast128_encrypt #define cast128_decrypt nettle_cast128_decrypt #define CAST128_BLOCK_SIZE 8 /* Variable key size between 40 and 128. */ #define CAST5_MIN_KEY_SIZE 5 #define CAST5_MAX_KEY_SIZE 16 #define CAST128_KEY_SIZE 16 struct cast128_ctx { unsigned rounds; /* Number of rounds to use, 12 or 16 */ /* Expanded key, rotations (5 bits only) and 32-bit masks. */ unsigned char Kr[16]; uint32_t Km[16]; }; /* Using variable key size. */ void cast5_set_key(struct cast128_ctx *ctx, size_t length, const uint8_t *key); void cast128_set_key(struct cast128_ctx *ctx, const uint8_t *key); void cast128_encrypt(const struct cast128_ctx *ctx, size_t length, uint8_t *dst, const uint8_t *src); void cast128_decrypt(const struct cast128_ctx *ctx, size_t length, uint8_t *dst, const uint8_t *src); #ifdef __cplusplus } #endif #endif /* NETTLE_CAST128_H_INCLUDED */
utf-8
1
LGPL-3+ or GPL-2+
2001-2020 Niels Möller
pulseaudio-15.0+dfsg1/src/pulsecore/semaphore-posix.c
/*** This file is part of PulseAudio. Copyright 2006 Lennart Poettering PulseAudio is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. PulseAudio is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. ***/ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <errno.h> #include <pthread.h> #include <semaphore.h> #include <pulse/xmalloc.h> #include <pulsecore/macro.h> #include "semaphore.h" struct pa_semaphore { sem_t sem; }; pa_semaphore* pa_semaphore_new(unsigned value) { pa_semaphore *s; s = pa_xnew(pa_semaphore, 1); pa_assert_se(sem_init(&s->sem, 0, value) == 0); return s; } void pa_semaphore_free(pa_semaphore *s) { pa_assert(s); pa_assert_se(sem_destroy(&s->sem) == 0); pa_xfree(s); } void pa_semaphore_post(pa_semaphore *s) { pa_assert(s); pa_assert_se(sem_post(&s->sem) == 0); } void pa_semaphore_wait(pa_semaphore *s) { int ret; pa_assert(s); do { ret = sem_wait(&s->sem); } while (ret < 0 && errno == EINTR); pa_assert(ret == 0); } pa_semaphore* pa_static_semaphore_get(pa_static_semaphore *s, unsigned value) { pa_semaphore *m; pa_assert(s); /* First, check if already initialized and short cut */ if ((m = pa_atomic_ptr_load(&s->ptr))) return m; /* OK, not initialized, so let's allocate, and fill in */ m = pa_semaphore_new(value); if ((pa_atomic_ptr_cmpxchg(&s->ptr, NULL, m))) return m; pa_semaphore_free(m); /* Him, filling in failed, so someone else must have filled in * already */ pa_assert_se(m = pa_atomic_ptr_load(&s->ptr)); return m; }
utf-8
1
LGPL-2.1+
Copyright (C) 2004-2009 Lennart Poettering Copyright (C) 2006-2007 Pierre Ossman <ossman@cendio.se> for Cendio AB
angelscript-2.35.1+ds/angelscript/source/as_callfunc_x64_mingw.cpp
/* AngelCode Scripting Library Copyright (c) 2003-2015 Andreas Jonsson This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. The original version of this library can be located at: http://www.angelcode.com/angelscript/ Andreas Jonsson andreas@angelcode.com */ // // This code was adapted from as_callfunc_x64_msvc by _Vicious_ on August 20th, 2011. // // Added support for functor methods by Jordi Oliveras Rovira in April, 2014. // #include <stdio.h> #include "as_config.h" #ifndef AS_MAX_PORTABILITY #ifdef AS_X64_MINGW #include "as_callfunc.h" #include "as_scriptengine.h" #include "as_texts.h" #include "as_context.h" BEGIN_AS_NAMESPACE static asQWORD __attribute__((noinline)) CallX64(const asQWORD *args, const asQWORD *floatArgs, const int paramSize, asQWORD func) { volatile asQWORD ret = 0; __asm__ __volatile__ ( "# Move the parameters into registers before the rsp is modified\n" "mov %1, %%r10\n" // r10 = args "mov %2, %%r11\n" // r11 = floatArgs "xor %%r12, %%r12\n" "mov %3, %%r12d\n" "mov %4, %%r14\n" // r14 = func "# Store the stack pointer in r15 since it is guaranteed not to change over a function call\n" "mov %%rsp, %%r15\n" "# Allocate space on the stack for the arguments\n" "# Make room for at least 4 arguments even if there are less. When\n" "# the compiler does optimizations for speed it may use these for \n" "# temporary storage.\n" "mov %%r12, %%rdi\n" "add $32,%%edi\n" "# Make sure the stack pointer is 16byte aligned so the\n" "# whole program optimizations will work properly\n" "# TODO: runtime optimize: Can this be optimized with fewer instructions?\n" "mov %%rsp,%%rsi\n" "sub %%rdi,%%rsi\n" "and $0x8,%%rsi\n" "add %%rsi,%%rdi\n" "sub %%rdi,%%rsp\n" "# Jump straight to calling the function if no parameters\n" "cmp $0,%%r12 # Compare paramSize with 0\n" "je callfunc # Jump to call funtion if (paramSize == 0)\n" "# Copy arguments from script stack to application stack\n" "# Order is (first to last):\n" "# rcx, rdx, r8, r9 & everything else goes on stack\n" "movq (%%r10),%%rcx\n" "movq 8(%%r10),%%rdx\n" "movq 16(%%r10),%%r8\n" "movq 24(%%r10),%%r9\n" "# Negate the 4 params from the size to be copied\n" "sub $32,%%r12d\n" "js copyfloat # Jump if negative result\n" "jz copyfloat # Jump if zero result\n" "# Now copy all remaining params onto stack allowing space for first four\n" "# params to be flushed back to the stack if required by the callee.\n" "add $32,%%r10 # Position input pointer 4 args ahead\n" "mov %%rsp,%%r13 # Put the stack pointer into r13\n" "add $32,%%r13 # Leave space for first 4 args on stack\n" "copyoverflow:\n" "movq (%%r10),%%rdi # Read param from source stack into rdi\n" "movq %%rdi,(%%r13) # Copy param to real stack\n" "add $8,%%r13 # Move virtual stack pointer\n" "add $8,%%r10 # Move source stack pointer\n" "sub $8,%%r12d # Decrement remaining count\n" "jnz copyoverflow # Continue if more params\n" "copyfloat:\n" "# Any floating point params?\n" "cmp $0,%%r11\n" "je callfunc\n" "movlpd (%%r11),%%xmm0\n" "movlpd 8(%%r11),%%xmm1\n" "movlpd 16(%%r11),%%xmm2\n" "movlpd 24(%%r11),%%xmm3\n" "callfunc:\n" "call *%%r14\n" "# restore stack pointer\n" "mov %%r15, %%rsp\n" "lea %0, %%rbx\n" // Load the address of the ret variable into rbx "movq %%rax,(%%rbx)\n" // Copy the returned value into the ret variable : // no output : "m" (ret), "r" (args), "r" (floatArgs), "r" (paramSize), "r" (func) : "rdi", "rsi", "rsp", "rbx", "r10", "r11", "%r12", "r13", "r14", "r15" ); return ret; } static asDWORD GetReturnedFloat() { volatile asDWORD ret = 0; __asm__ __volatile__ ( "lea %0, %%rax\n" "movss %%xmm0, (%%rax)" : /* no output */ : "m" (ret) : "%rax" ); return ret; } static asQWORD GetReturnedDouble() { volatile asQWORD ret = 0; __asm__ __volatile__ ( "lea %0, %%rax\n" "movlpd %%xmm0, (%%rax)" : /* no optput */ : "m" (ret) : "%rax" ); return ret; } asQWORD CallSystemFunctionNative(asCContext *context, asCScriptFunction *descr, void *obj, asDWORD *args, void *retPointer, asQWORD &/*retQW2*/, void *secondObject) { asCScriptEngine *engine = context->m_engine; asSSystemFunctionInterface *sysFunc = descr->sysFuncIntf; asQWORD retQW = 0; void *func = (void*)sysFunc->func; asUINT paramSize = 0; // QWords void **vftable; asQWORD allArgBuffer[64]; asQWORD floatArgBuffer[4]; int callConv = sysFunc->callConv; if( sysFunc->hostReturnInMemory ) { // The return is made in memory callConv++; // Set the return pointer as the first argument allArgBuffer[paramSize++] = (asQWORD)retPointer; } #ifdef AS_NO_THISCALL_FUNCTOR_METHOD if( callConv == ICC_THISCALL || callConv == ICC_THISCALL_RETURNINMEM || callConv == ICC_VIRTUAL_THISCALL || callConv == ICC_VIRTUAL_THISCALL_RETURNINMEM ) #else // Optimization to avoid check 12 values (all ICC_ that contains THISCALL) if( (callConv >= ICC_THISCALL && callConv <= ICC_VIRTUAL_THISCALL_RETURNINMEM) || (callConv >= ICC_THISCALL_OBJLAST && callConv <= ICC_VIRTUAL_THISCALL_OBJFIRST_RETURNINMEM) ) #endif { // Add the object pointer as the first parameter allArgBuffer[paramSize++] = (asQWORD)obj; } if( callConv == ICC_CDECL_OBJFIRST || callConv == ICC_CDECL_OBJFIRST_RETURNINMEM ) { // Add the object pointer as the first parameter allArgBuffer[paramSize++] = (asQWORD)obj; } #ifndef AS_NO_THISCALL_FUNCTOR_METHOD else if( callConv == ICC_THISCALL_OBJFIRST || callConv == ICC_THISCALL_OBJFIRST_RETURNINMEM || callConv == ICC_VIRTUAL_THISCALL_OBJFIRST || callConv == ICC_VIRTUAL_THISCALL_OBJFIRST_RETURNINMEM ) { // Add the object pointer as the first parameter allArgBuffer[paramSize++] = (asQWORD)secondObject; } #endif #ifdef AS_NO_THISCALL_FUNCTOR_METHOD if( callConv == ICC_VIRTUAL_THISCALL || callConv == ICC_VIRTUAL_THISCALL_RETURNINMEM ) #else if( callConv == ICC_VIRTUAL_THISCALL || callConv == ICC_VIRTUAL_THISCALL_RETURNINMEM || callConv == ICC_VIRTUAL_THISCALL_OBJFIRST || callConv == ICC_VIRTUAL_THISCALL_OBJFIRST_RETURNINMEM || callConv == ICC_VIRTUAL_THISCALL_OBJLAST || callConv == ICC_VIRTUAL_THISCALL_OBJLAST_RETURNINMEM ) #endif { // Get the true function pointer from the virtual function table vftable = *(void***)obj; func = vftable[asPWORD(func)>>3]; } // Move the arguments to the buffer asUINT dpos = paramSize; asUINT spos = 0; for( asUINT n = 0; n < descr->parameterTypes.GetLength(); n++ ) { if( descr->parameterTypes[n].IsObject() && !descr->parameterTypes[n].IsObjectHandle() && !descr->parameterTypes[n].IsReference() ) { if( descr->parameterTypes[n].GetSizeInMemoryDWords() >= AS_LARGE_OBJ_MIN_SIZE || (descr->parameterTypes[n].GetTypeInfo()->flags & COMPLEX_MASK) ) { allArgBuffer[dpos++] = *(asQWORD*)&args[spos]; spos += AS_PTR_SIZE; paramSize++; } else { // Copy the object's memory to the buffer memcpy(&allArgBuffer[dpos], *(void**)(args+spos), descr->parameterTypes[n].GetSizeInMemoryBytes()); // Delete the original memory engine->CallFree(*(char**)(args+spos)); spos += AS_PTR_SIZE; asUINT dwords = descr->parameterTypes[n].GetSizeInMemoryDWords(); asUINT qwords = (dwords >> 1) + (dwords & 1); dpos += qwords; paramSize += qwords; } } else if( descr->parameterTypes[n].GetTokenType() == ttQuestion ) { // Copy the reference and the type id allArgBuffer[dpos++] = *(asQWORD*)&args[spos]; spos += 2; allArgBuffer[dpos++] = args[spos++]; paramSize += 2; } else { // Copy the value directly asUINT dwords = descr->parameterTypes[n].GetSizeOnStackDWords(); if( dwords > 1 ) { allArgBuffer[dpos] = *(asQWORD*)&args[spos]; // Double arguments are moved to a separate buffer in order to be placed in the XMM registers, // though this is only done for first 4 arguments, the rest are placed on the stack if( paramSize < 4 && descr->parameterTypes[n].IsDoubleType() ) floatArgBuffer[dpos] = *(asQWORD*)&args[spos]; dpos++; spos += 2; } else { allArgBuffer[dpos] = args[spos]; // Float arguments are moved to a separate buffer in order to be placed in the XMM registers, // though this is only done for first 4 arguments, the rest are placed on the stack if( paramSize < 4 && descr->parameterTypes[n].IsFloatType() ) floatArgBuffer[dpos] = args[spos]; dpos++; spos++; } paramSize++; } } if( callConv == ICC_CDECL_OBJLAST || callConv == ICC_CDECL_OBJLAST_RETURNINMEM ) { // Add the object pointer as the last parameter allArgBuffer[paramSize++] = (asQWORD)obj; } #ifndef AS_NO_THISCALL_FUNCTOR_METHOD else if( callConv == ICC_THISCALL_OBJLAST || callConv == ICC_THISCALL_OBJLAST_RETURNINMEM || callConv == ICC_VIRTUAL_THISCALL_OBJLAST || callConv == ICC_VIRTUAL_THISCALL_OBJLAST_RETURNINMEM ) { // Add the object pointer as the last parameter allArgBuffer[paramSize++] = (asQWORD)secondObject; } #endif retQW = CallX64(allArgBuffer, floatArgBuffer, paramSize*8, (asPWORD)func); // If the return is a float value we need to get the value from the FP register if( sysFunc->hostReturnFloat ) { if( sysFunc->hostReturnSize == 1 ) *(asDWORD*)&retQW = GetReturnedFloat(); else retQW = GetReturnedDouble(); } return retQW; } END_AS_NAMESPACE #endif // AS_X64_MSVC #endif // AS_MAX_PORTABILITY
utf-8
1
Zlib
2003-2020 Andreas Jönsson <andreas@angelcode.com>
xdg-desktop-portal-gtk-1.12.0/src/lockdown.c
/* * Copyright © 2018 Red Hat, Inc * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see <http://www.gnu.org/licenses/>. * * Authors: * Matthias Clasen <mclasen@redhat.com> */ #define _GNU_SOURCE 1 #include "config.h" #include <string.h> #include <gio/gio.h> #include "xdg-desktop-portal-dbus.h" #include "lockdown.h" #include "utils.h" static GSettings *lockdown; static GSettings *location; static GSettings *privacy; gboolean lockdown_init (GDBusConnection *bus, GError **error) { GDBusInterfaceSkeleton *helper; GSettingsSchemaSource *source; GSettingsSchema *schema; helper = G_DBUS_INTERFACE_SKELETON (xdp_impl_lockdown_skeleton_new ()); lockdown = g_settings_new ("org.gnome.desktop.lockdown"); g_settings_bind (lockdown, "disable-printing", helper, "disable-printing", G_SETTINGS_BIND_DEFAULT); g_settings_bind (lockdown, "disable-save-to-disk", helper, "disable-save-to-disk", G_SETTINGS_BIND_DEFAULT); g_settings_bind (lockdown, "disable-application-handlers", helper, "disable-application-handlers", G_SETTINGS_BIND_DEFAULT); location = g_settings_new ("org.gnome.system.location"); g_settings_bind (location, "enabled", helper, "disable-location", G_SETTINGS_BIND_INVERT_BOOLEAN); source = g_settings_schema_source_get_default (); schema = g_settings_schema_source_lookup (source, "org.gnome.desktop.privacy", TRUE); privacy = g_settings_new ("org.gnome.desktop.privacy"); if (g_settings_schema_has_key (schema, "disable-camera")) g_settings_bind (privacy, "disable-camera", helper, "disable-camera", G_SETTINGS_BIND_DEFAULT); if (g_settings_schema_has_key (schema, "disable-microphone")) g_settings_bind (privacy, "disable-microphone", helper, "disable-microphone", G_SETTINGS_BIND_DEFAULT); if (g_settings_schema_has_key (schema, "disable-sound-output")) g_settings_bind (privacy, "disable-sound-output", helper, "disable-sound-output", G_SETTINGS_BIND_DEFAULT); g_settings_schema_unref (schema); if (!g_dbus_interface_skeleton_export (helper, bus, DESKTOP_PORTAL_OBJECT_PATH, error)) return FALSE; g_debug ("providing %s", g_dbus_interface_skeleton_get_info (helper)->name); return TRUE; }
utf-8
1
GPL-2+ and LGPL-2+
© 2008-2019 Red Hat, Inc © 2016-2018 Free Software Foundation, Inc. © 2016 Christian Kirbach © 2016 Mario Sanchez Prada © 2016 Piotr Drag © 2016 Aviary.pl © 2017 Jan Alexander Steffens © 2018 Igalia S.L. © 2019 Alberto Fanjul
inkscape-1.1.1/src/live_effects/lpe-tangent_to_curve.cpp
// SPDX-License-Identifier: GPL-2.0-or-later /** \file * Implementation of tangent-to-curve LPE. */ /* * Authors: * Johan Engelen * Maximilian Albert * * Copyright (C) Johan Engelen 2007 <j.b.c.engelen@utwente.nl> * Copyright (C) Maximilian Albert 2008 <maximilian.albert@gmail.com> * * Released under GNU GPL v2+, read the file 'COPYING' for more information. */ #include "lpe-tangent_to_curve.h" #include "display/curve.h" #include "object/sp-shape.h" #include "object/sp-object-group.h" #include "ui/knot/knot-holder.h" #include "ui/knot/knot-holder-entity.h" // TODO due to internal breakage in glibmm headers, this must be last: #include <glibmm/i18n.h> namespace Inkscape { namespace LivePathEffect { namespace TtC { class KnotHolderEntityAttachPt : public LPEKnotHolderEntity { public: KnotHolderEntityAttachPt(LPETangentToCurve *effect) : LPEKnotHolderEntity(effect) {}; void knot_set(Geom::Point const &p, Geom::Point const &origin, guint state) override; Geom::Point knot_get() const override; }; class KnotHolderEntityLeftEnd : public LPEKnotHolderEntity { public: KnotHolderEntityLeftEnd(LPETangentToCurve *effect) : LPEKnotHolderEntity(effect) {}; void knot_set(Geom::Point const &p, Geom::Point const &origin, guint state) override; Geom::Point knot_get() const override; }; class KnotHolderEntityRightEnd : public LPEKnotHolderEntity { public: KnotHolderEntityRightEnd(LPETangentToCurve *effect) : LPEKnotHolderEntity(effect) {}; void knot_set(Geom::Point const &p, Geom::Point const &origin, guint state) override; Geom::Point knot_get() const override; }; } // namespace TtC LPETangentToCurve::LPETangentToCurve(LivePathEffectObject *lpeobject) : Effect(lpeobject), angle(_("Angle:"), _("Additional angle between tangent and curve"), "angle", &wr, this, 0.0), t_attach(_("Location along curve:"), _("Location of the point of attachment along the curve (between 0.0 and number-of-segments)"), "t_attach", &wr, this, 0.5), length_left(_("Length left:"), _("Specifies the left end of the tangent"), "length-left", &wr, this, 150), length_right(_("Length right:"), _("Specifies the right end of the tangent"), "length-right", &wr, this, 150) { show_orig_path = true; _provides_knotholder_entities = true; registerParameter(&angle); registerParameter(&t_attach); registerParameter(&length_left); registerParameter(&length_right); } LPETangentToCurve::~LPETangentToCurve() = default; Geom::Piecewise<Geom::D2<Geom::SBasis> > LPETangentToCurve::doEffect_pwd2 (Geom::Piecewise<Geom::D2<Geom::SBasis> > const & pwd2_in) { using namespace Geom; Piecewise<D2<SBasis> > output; ptA = pwd2_in.valueAt(t_attach); derivA = unit_vector(derivative(pwd2_in).valueAt(t_attach)); // TODO: Why are positive angles measured clockwise, not counterclockwise? Geom::Rotate rot(Geom::Rotate::from_degrees(-angle)); derivA = derivA * rot; C = ptA - derivA * length_left; D = ptA + derivA * length_right; output = Piecewise<D2<SBasis> >(D2<SBasis>(SBasis(C[X], D[X]), SBasis(C[Y], D[Y]))); return output; } void LPETangentToCurve::addKnotHolderEntities(KnotHolder *knotholder, SPItem *item) { { KnotHolderEntity *e = new TtC::KnotHolderEntityAttachPt(this); e->create(nullptr, item, knotholder, Inkscape::CANVAS_ITEM_CTRL_TYPE_LPE, "LPE:TangentToCurvePT", _("Adjust the point of attachment of the tangent")); knotholder->add(e); } { KnotHolderEntity *e = new TtC::KnotHolderEntityLeftEnd(this); e->create(nullptr, item, knotholder, Inkscape::CANVAS_ITEM_CTRL_TYPE_LPE, "LPE:TangentToCurveLeftEnd", _("Adjust the <b>left</b> end of the tangent")); knotholder->add(e); } { KnotHolderEntity *e = new TtC::KnotHolderEntityRightEnd(this); e->create(nullptr, item, knotholder, Inkscape::CANVAS_ITEM_CTRL_TYPE_LPE, "LPE:TangetToCurveRightEnd", _("Adjust the <b>right</b> end of the tangent")); knotholder->add(e); } }; namespace TtC { void KnotHolderEntityAttachPt::knot_set(Geom::Point const &p, Geom::Point const &/*origin*/, guint state) { using namespace Geom; LPETangentToCurve* lpe = dynamic_cast<LPETangentToCurve *>(_effect); Geom::Point const s = snap_knot_position(p, state); if ( !SP_IS_SHAPE(lpe->sp_lpe_item) ) { //lpe->t_attach.param_set_value(0); g_warning("LPEItem is not a path! %s:%d\n", __FILE__, __LINE__); return; } Piecewise<D2<SBasis> > pwd2 = paths_to_pw( lpe->pathvector_before_effect ); double t0 = nearest_time(s, pwd2); lpe->t_attach.param_set_value(t0); // FIXME: this should not directly ask for updating the item. It should write to SVG, which triggers updating. sp_lpe_item_update_patheffect (SP_LPE_ITEM(item), false, true); } void KnotHolderEntityLeftEnd::knot_set(Geom::Point const &p, Geom::Point const &/*origin*/, guint state) { LPETangentToCurve *lpe = dynamic_cast<LPETangentToCurve *>(_effect); Geom::Point const s = snap_knot_position(p, state); double lambda = Geom::nearest_time(s, lpe->ptA, lpe->derivA); lpe->length_left.param_set_value(-lambda); sp_lpe_item_update_patheffect (SP_LPE_ITEM(item), false, true); } void KnotHolderEntityRightEnd::knot_set(Geom::Point const &p, Geom::Point const &/*origin*/, guint state) { LPETangentToCurve *lpe = dynamic_cast<LPETangentToCurve *>(_effect); Geom::Point const s = snap_knot_position(p, state); double lambda = Geom::nearest_time(s, lpe->ptA, lpe->derivA); lpe->length_right.param_set_value(lambda); sp_lpe_item_update_patheffect (SP_LPE_ITEM(item), false, true); } Geom::Point KnotHolderEntityAttachPt::knot_get() const { LPETangentToCurve const *lpe = dynamic_cast<LPETangentToCurve const*>(_effect); return lpe->ptA; } Geom::Point KnotHolderEntityLeftEnd::knot_get() const { LPETangentToCurve const *lpe = dynamic_cast<LPETangentToCurve const*>(_effect); return lpe->C; } Geom::Point KnotHolderEntityRightEnd::knot_get() const { LPETangentToCurve const *lpe = dynamic_cast<LPETangentToCurve const*>(_effect); return lpe->D; } } // namespace TtC } //namespace LivePathEffect } /* namespace Inkscape */ /* Local Variables: mode:c++ c-file-style:"stroustrup" c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +)) indent-tabs-mode:nil fill-column:99 End: */ // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4 :
utf-8
1
unknown
unknown
qtwebengine-opensource-src-5.15.8+dfsg/src/3rdparty/chromium/third_party/ffmpeg/libavformat/rtpdec_asf.c
/* * Microsoft RTP/ASF support. * Copyright (c) 2008 Ronald S. Bultje * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Microsoft RTP/ASF support * @author Ronald S. Bultje <rbultje@ronald.bitfreak.net> */ #include "libavutil/avassert.h" #include "libavutil/base64.h" #include "libavutil/avstring.h" #include "libavutil/intreadwrite.h" #include "rtp.h" #include "rtpdec_formats.h" #include "rtsp.h" #include "asf.h" #include "avio_internal.h" #include "internal.h" /** * From MSDN 2.2.1.4, we learn that ASF data packets over RTP should not * contain any padding. Unfortunately, the header min/max_pktsize are not * updated (thus making min_pktsize invalid). Here, we "fix" these faulty * min_pktsize values in the ASF file header. * @return 0 on success, <0 on failure (currently -1). */ static int rtp_asf_fix_header(uint8_t *buf, int len) { uint8_t *p = buf, *end = buf + len; if (len < sizeof(ff_asf_guid) * 2 + 22 || memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) { return -1; } p += sizeof(ff_asf_guid) + 14; do { uint64_t chunksize = AV_RL64(p + sizeof(ff_asf_guid)); int skip = 6 * 8 + 3 * 4 + sizeof(ff_asf_guid) * 2; if (memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) { if (chunksize > end - p) return -1; p += chunksize; continue; } if (end - p < 8 + skip) break; /* skip most of the file header, to min_pktsize */ p += skip; if (AV_RL32(p) == AV_RL32(p + 4)) { /* and set that to zero */ AV_WL32(p, 0); return 0; } break; } while (end - p >= sizeof(ff_asf_guid) + 8); return -1; } /** * The following code is basically a buffered AVIOContext, * with the added benefit of returning -EAGAIN (instead of 0) * on packet boundaries, such that the ASF demuxer can return * safely and resume business at the next packet. */ static int packetizer_read(void *opaque, uint8_t *buf, int buf_size) { return AVERROR(EAGAIN); } static void init_packetizer(AVIOContext *pb, uint8_t *buf, int len) { ffio_init_context(pb, buf, len, 0, NULL, packetizer_read, NULL, NULL); /* this "fills" the buffer with its current content */ pb->pos = len; pb->buf_end = buf + len; } int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p) { int ret = 0; if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) { AVIOContext pb = { 0 }; RTSPState *rt = s->priv_data; AVDictionary *opts = NULL; int len = strlen(p) * 6 / 8; char *buf = av_mallocz(len); ff_const59 AVInputFormat *iformat; if (!buf) return AVERROR(ENOMEM); av_base64_decode(buf, p, len); if (rtp_asf_fix_header(buf, len) < 0) av_log(s, AV_LOG_ERROR, "Failed to fix invalid RTSP-MS/ASF min_pktsize\n"); init_packetizer(&pb, buf, len); if (rt->asf_ctx) { avformat_close_input(&rt->asf_ctx); } if (!(iformat = av_find_input_format("asf"))) return AVERROR_DEMUXER_NOT_FOUND; rt->asf_ctx = avformat_alloc_context(); if (!rt->asf_ctx) { av_free(buf); return AVERROR(ENOMEM); } rt->asf_ctx->pb = &pb; av_dict_set(&opts, "no_resync_search", "1", 0); if ((ret = ff_copy_whiteblacklists(rt->asf_ctx, s)) < 0) { av_dict_free(&opts); return ret; } ret = avformat_open_input(&rt->asf_ctx, "", iformat, &opts); av_dict_free(&opts); if (ret < 0) { av_free(pb.buffer); return ret; } av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0); rt->asf_pb_pos = avio_tell(&pb); av_free(pb.buffer); rt->asf_ctx->pb = NULL; } return ret; } static int asfrtp_parse_sdp_line(AVFormatContext *s, int stream_index, PayloadContext *asf, const char *line) { if (stream_index < 0) return 0; if (av_strstart(line, "stream:", &line)) { RTSPState *rt = s->priv_data; s->streams[stream_index]->id = strtol(line, NULL, 10); if (rt->asf_ctx) { int i; for (i = 0; i < rt->asf_ctx->nb_streams; i++) { if (s->streams[stream_index]->id == rt->asf_ctx->streams[i]->id) { avcodec_parameters_copy(s->streams[stream_index]->codecpar, rt->asf_ctx->streams[i]->codecpar); s->streams[stream_index]->need_parsing = rt->asf_ctx->streams[i]->need_parsing; avpriv_set_pts_info(s->streams[stream_index], 32, 1, 1000); } } } } return 0; } struct PayloadContext { AVIOContext *pktbuf, pb; uint8_t *buf; }; /** * @return 0 when a packet was written into /p pkt, and no more data is left; * 1 when a packet was written into /p pkt, and more packets might be left; * <0 when not enough data was provided to return a full packet, or on error. */ static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { AVIOContext *pb = &asf->pb; int res, mflags, len_off; RTSPState *rt = s->priv_data; if (!rt->asf_ctx) return -1; if (len > 0) { int off, out_len = 0; if (len < 4) return -1; av_freep(&asf->buf); ffio_init_context(pb, (uint8_t *)buf, len, 0, NULL, NULL, NULL, NULL); while (avio_tell(pb) + 4 < len) { int start_off = avio_tell(pb); mflags = avio_r8(pb); len_off = avio_rb24(pb); if (mflags & 0x20) /**< relative timestamp */ avio_skip(pb, 4); if (mflags & 0x10) /**< has duration */ avio_skip(pb, 4); if (mflags & 0x8) /**< has location ID */ avio_skip(pb, 4); off = avio_tell(pb); if (!(mflags & 0x40)) { /** * If 0x40 is not set, the len_off field specifies an offset * of this packet's payload data in the complete (reassembled) * ASF packet. This is used to spread one ASF packet over * multiple RTP packets. */ if (asf->pktbuf && len_off != avio_tell(asf->pktbuf)) { ffio_free_dyn_buf(&asf->pktbuf); } if (!len_off && !asf->pktbuf && (res = avio_open_dyn_buf(&asf->pktbuf)) < 0) return res; if (!asf->pktbuf) return AVERROR(EIO); avio_write(asf->pktbuf, buf + off, len - off); avio_skip(pb, len - off); if (!(flags & RTP_FLAG_MARKER)) return -1; out_len = avio_close_dyn_buf(asf->pktbuf, &asf->buf); asf->pktbuf = NULL; } else { /** * If 0x40 is set, the len_off field specifies the length of * the next ASF packet that can be read from this payload * data alone. This is commonly the same as the payload size, * but could be less in case of packet splitting (i.e. * multiple ASF packets in one RTP packet). */ int cur_len = start_off + len_off - off; int prev_len = out_len; out_len += cur_len; if (FFMIN(cur_len, len - off) < 0) return -1; if ((res = av_reallocp(&asf->buf, out_len)) < 0) return res; memcpy(asf->buf + prev_len, buf + off, FFMIN(cur_len, len - off)); avio_skip(pb, cur_len); } } init_packetizer(pb, asf->buf, out_len); pb->pos += rt->asf_pb_pos; pb->eof_reached = 0; rt->asf_ctx->pb = pb; } for (;;) { int i; res = ff_read_packet(rt->asf_ctx, pkt); rt->asf_pb_pos = avio_tell(pb); if (res != 0) break; for (i = 0; i < s->nb_streams; i++) { if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) { pkt->stream_index = i; return 1; // FIXME: return 0 if last packet } } av_packet_unref(pkt); } return res == 1 ? -1 : res; } static void asfrtp_close_context(PayloadContext *asf) { ffio_free_dyn_buf(&asf->pktbuf); av_freep(&asf->buf); } #define RTP_ASF_HANDLER(n, s, t) \ const RTPDynamicProtocolHandler ff_ms_rtp_ ## n ## _handler = { \ .enc_name = s, \ .codec_type = t, \ .codec_id = AV_CODEC_ID_NONE, \ .priv_data_size = sizeof(PayloadContext), \ .parse_sdp_a_line = asfrtp_parse_sdp_line, \ .close = asfrtp_close_context, \ .parse_packet = asfrtp_parse_packet, \ } RTP_ASF_HANDLER(asf_pfv, "x-asf-pf", AVMEDIA_TYPE_VIDEO); RTP_ASF_HANDLER(asf_pfa, "x-asf-pf", AVMEDIA_TYPE_AUDIO);
utf-8
1
LGPL-3 or GPL-2
2006-2021 The Chromium Authors 2016-2021 The Qt Company Ltd.
xen-4.14.3+32-g9de3671772/tools/libs/toolcore/include/xentoolcore.h
/* * xentoolcore.h * * Copyright (c) 2017 Citrix * * Common features used/provided by all Xen tools libraries * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see <http://www.gnu.org/licenses/>. */ #ifndef XENTOOLCORE_H #define XENTOOLCORE_H #include <stdint.h> #include <xen/xen.h> /* * int xentoolcore_restrict_all(domid_t domid); * * Arranges that Xen library handles (fds etc.) which are currently held * by Xen libraries, can no longer be used other than to affect domid. * * Does not prevent effects that amount only to * - denial of service, possibly host-wide, by resource exhaustion etc. * * If this cannot be achieved, returns -1 and sets errno. * If called again with the same domid, it may succeed, or it may * fail (even though such a call is potentially meaningful). * (If called again with a different domid, it will necessarily fail.) * * Note for multi-threaded programs: If xentoolcore_restrict_all is * called concurrently with a function which /or closes Xen library * handles (e.g. libxl_ctx_free, xs_close), the restriction is only * guaranteed to be effective after all of the closing functions have * returned, even if that is later than the return from * xentoolcore_restrict_all. (Of course if xentoolcore_restrict_all * it is called concurrently with opening functions, the new handles * might or might not be restricted.) * * ==================================================================== * IMPORTANT - IMPLEMENTATION STATUS * * This function has been implemented insofar as it appears necessary * for the purposes of running a deprivileged qemu, and is believed to * be sufficient (subject to the caveats discussed in the appropriate * libxl documentation for this feature). * * However, this function is NOT implemented for all Xen libraries. * For each use case of this function, the designer must evaluate and * audit whether the implementation is sufficient in their specific * context. * * Of course, patches to extend the implementation are very welcome. * ==================================================================== * * Thread safe. * * We expect that no callers do the following: * - in one thread call xen_somelibrary_open|close * - in another thread call fork * - in the child of the fork, before exec, call * xen_some[other]library_open|close or xentoolcore_restrict_all * */ int xentoolcore_restrict_all(domid_t domid); #endif /* XENTOOLCORE_H */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */
utf-8
1
unknown
unknown