mirror of
https://github.com/CloverHackyColor/CloverBootloader.git
synced 2024-12-04 13:23:26 +01:00
7c0aa811ec
Signed-off-by: Sergey Isakov <isakov-sl@bk.ru>
756 lines
22 KiB
Perl
756 lines
22 KiB
Perl
#!/usr/bin/env perl
|
|
#
|
|
# ====================================================================
|
|
# Written by David Mosberger <David.Mosberger@acm.org> based on the
|
|
# Itanium optimized Crypto code which was released by HP Labs at
|
|
# http://www.hpl.hp.com/research/linux/crypto/.
|
|
#
|
|
# Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
|
|
#
|
|
# Permission is hereby granted, free of charge, to any person obtaining
|
|
# a copy of this software and associated documentation files (the
|
|
# "Software"), to deal in the Software without restriction, including
|
|
# without limitation the rights to use, copy, modify, merge, publish,
|
|
# distribute, sublicense, and/or sell copies of the Software, and to
|
|
# permit persons to whom the Software is furnished to do so, subject to
|
|
# the following conditions:
|
|
#
|
|
# The above copyright notice and this permission notice shall be
|
|
# included in all copies or substantial portions of the Software.
|
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
|
|
|
|
|
|
|
|
# This is a little helper program which generates a software-pipelined
|
|
# for RC4 encryption. The basic algorithm looks like this:
|
|
#
|
|
# for (counter = 0; counter < len; ++counter)
|
|
# {
|
|
# in = inp[counter];
|
|
# SI = S[I];
|
|
# J = (SI + J) & 0xff;
|
|
# SJ = S[J];
|
|
# T = (SI + SJ) & 0xff;
|
|
# S[I] = SJ, S[J] = SI;
|
|
# ST = S[T];
|
|
# outp[counter] = in ^ ST;
|
|
# I = (I + 1) & 0xff;
|
|
# }
|
|
#
|
|
# Pipelining this loop isn't easy, because the stores to the S[] array
|
|
# need to be observed in the right order. The loop generated by the
|
|
# code below has the following pipeline diagram:
|
|
#
|
|
# cycle
|
|
# | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |16 |17 |
|
|
# iter
|
|
# 1: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
|
|
# 2: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
|
|
# 3: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
|
|
#
|
|
# where:
|
|
# LDI = load of S[I]
|
|
# LDJ = load of S[J]
|
|
# SWP = swap of S[I] and S[J]
|
|
# LDT = load of S[T]
|
|
#
|
|
# Note that in the above diagram, the major trouble-spot is that LDI
|
|
# of the 2nd iteration is performed BEFORE the SWP of the first
|
|
# iteration. Fortunately, this is easy to detect (I of the 1st
|
|
# iteration will be equal to J of the 2nd iteration) and when this
|
|
# happens, we simply forward the proper value from the 1st iteration
|
|
# to the 2nd one. The proper value in this case is simply the value
|
|
# of S[I] from the first iteration (thanks to the fact that SWP
|
|
# simply swaps the contents of S[I] and S[J]).
|
|
#
|
|
# Another potential trouble-spot is in cycle 7, where SWP of the 1st
|
|
# iteration issues at the same time as the LDI of the 3rd iteration.
|
|
# However, thanks to IA-64 execution semantics, this can be taken
|
|
# care of simply by placing LDI later in the instruction-group than
|
|
# SWP. IA-64 CPUs will automatically forward the value if they
|
|
# detect that the SWP and LDI are accessing the same memory-location.
|
|
|
|
# The core-loop that can be pipelined then looks like this (annotated
|
|
# with McKinley/Madison issue port & latency numbers, assuming L1
|
|
# cache hits for the most part):
|
|
|
|
# operation: instruction: issue-ports: latency
|
|
# ------------------ ----------------------------- ------------- -------
|
|
|
|
# Data = *inp++ ld1 data = [inp], 1 M0-M1 1 cyc c0
|
|
# shladd Iptr = I, KeyTable, 3 M0-M3, I0, I1 1 cyc
|
|
# I = (I + 1) & 0xff padd1 nextI = I, one M0-M3, I0, I1 3 cyc
|
|
# ;;
|
|
# SI = S[I] ld8 SI = [Iptr] M0-M1 1 cyc c1 * after SWAP!
|
|
# ;;
|
|
# cmp.eq.unc pBypass = I, J * after J is valid!
|
|
# J = SI + J add J = J, SI M0-M3, I0, I1 1 cyc c2
|
|
# (pBypass) br.cond.spnt Bypass
|
|
# ;;
|
|
# ---------------------------------------------------------------------------------------
|
|
# J = J & 0xff zxt1 J = J I0, I1, 1 cyc c3
|
|
# ;;
|
|
# shladd Jptr = J, KeyTable, 3 M0-M3, I0, I1 1 cyc c4
|
|
# ;;
|
|
# SJ = S[J] ld8 SJ = [Jptr] M0-M1 1 cyc c5
|
|
# ;;
|
|
# ---------------------------------------------------------------------------------------
|
|
# T = (SI + SJ) add T = SI, SJ M0-M3, I0, I1 1 cyc c6
|
|
# ;;
|
|
# T = T & 0xff zxt1 T = T I0, I1 1 cyc
|
|
# S[I] = SJ st8 [Iptr] = SJ M2-M3 c7
|
|
# S[J] = SI st8 [Jptr] = SI M2-M3
|
|
# ;;
|
|
# shladd Tptr = T, KeyTable, 3 M0-M3, I0, I1 1 cyc c8
|
|
# ;;
|
|
# ---------------------------------------------------------------------------------------
|
|
# T = S[T] ld8 T = [Tptr] M0-M1 1 cyc c9
|
|
# ;;
|
|
# data ^= T xor data = data, T M0-M3, I0, I1 1 cyc c10
|
|
# ;;
|
|
# *out++ = Data ^ T dep word = word, data, 8, POS I0, I1 1 cyc c11
|
|
# ;;
|
|
# ---------------------------------------------------------------------------------------
|
|
|
|
# There are several points worth making here:
|
|
|
|
# - Note that due to the bypass/forwarding-path, the first two
|
|
# phases of the loop are strangly mingled together. In
|
|
# particular, note that the first stage of the pipeline is
|
|
# using the value of "J", as calculated by the second stage.
|
|
# - Each bundle-pair will have exactly 6 instructions.
|
|
# - Pipelined, the loop can execute in 3 cycles/iteration and
|
|
# 4 stages. However, McKinley/Madison can issue "st1" to
|
|
# the same bank at a rate of at most one per 4 cycles. Thus,
|
|
# instead of storing each byte, we accumulate them in a word
|
|
# and then write them back at once with a single "st8" (this
|
|
# implies that the setup code needs to ensure that the output
|
|
# buffer is properly aligned, if need be, by encoding the
|
|
# first few bytes separately).
|
|
# - There is no space for a "br.ctop" instruction. For this
|
|
# reason we can't use module-loop support in IA-64 and have
|
|
# to do a traditional, purely software-pipelined loop.
|
|
# - We can't replace any of the remaining "add/zxt1" pairs with
|
|
# "padd1" because the latency for that instruction is too high
|
|
# and would push the loop to the point where more bypasses
|
|
# would be needed, which we don't have space for.
|
|
# - The above loop runs at around 3.26 cycles/byte, or roughly
|
|
# 440 MByte/sec on a 1.5GHz Madison. This is well below the
|
|
# system bus bandwidth and hence with judicious use of
|
|
# "lfetch" this loop can run at (almost) peak speed even when
|
|
# the input and output data reside in memory. The
|
|
# max. latency that can be tolerated is (PREFETCH_DISTANCE *
|
|
# L2_LINE_SIZE * 3 cyc), or about 384 cycles assuming (at
|
|
# least) 1-ahead prefetching of 128 byte cache-lines. Note
|
|
# that we do NOT prefetch into L1, since that would only
|
|
# interfere with the S[] table values stored there. This is
|
|
# acceptable because there is a 10 cycle latency between
|
|
# load and first use of the input data.
|
|
# - We use a branch to out-of-line bypass-code of cycle-pressure:
|
|
# we calculate the next J, check for the need to activate the
|
|
# bypass path, and activate the bypass path ALL IN THE SAME
|
|
# CYCLE. If we didn't have these constraints, we could do
|
|
# the bypass with a simple conditional move instruction.
|
|
# Fortunately, the bypass paths get activated relatively
|
|
# infrequently, so the extra branches don't cost all that much
|
|
# (about 0.04 cycles/byte, measured on a 16396 byte file with
|
|
# random input data).
|
|
#
|
|
|
|
$phases = 4; # number of stages/phases in the pipelined-loop
|
|
$unroll_count = 6; # number of times we unrolled it
|
|
$pComI = (1 << 0);
|
|
$pComJ = (1 << 1);
|
|
$pComT = (1 << 2);
|
|
$pOut = (1 << 3);
|
|
|
|
$NData = 4;
|
|
$NIP = 3;
|
|
$NJP = 2;
|
|
$NI = 2;
|
|
$NSI = 3;
|
|
$NSJ = 2;
|
|
$NT = 2;
|
|
$NOutWord = 2;
|
|
|
|
#
|
|
# $threshold is the minimum length before we attempt to use the
|
|
# big software-pipelined loop. It MUST be greater-or-equal
|
|
# to:
|
|
# PHASES * (UNROLL_COUNT + 1) + 7
|
|
#
|
|
# The "+ 7" comes from the fact we may have to encode up to
|
|
# 7 bytes separately before the output pointer is aligned.
|
|
#
|
|
$threshold = (3 * ($phases * ($unroll_count + 1)) + 7);
|
|
|
|
sub I {
|
|
local *code = shift;
|
|
local $format = shift;
|
|
$code .= sprintf ("\t\t".$format."\n", @_);
|
|
}
|
|
|
|
sub P {
|
|
local *code = shift;
|
|
local $format = shift;
|
|
$code .= sprintf ($format."\n", @_);
|
|
}
|
|
|
|
sub STOP {
|
|
local *code = shift;
|
|
$code .=<<___;
|
|
;;
|
|
___
|
|
}
|
|
|
|
sub emit_body {
|
|
local *c = shift;
|
|
local *bypass = shift;
|
|
local ($iteration, $p) = @_;
|
|
|
|
local $i0 = $iteration;
|
|
local $i1 = $iteration - 1;
|
|
local $i2 = $iteration - 2;
|
|
local $i3 = $iteration - 3;
|
|
local $iw0 = ($iteration - 3) / 8;
|
|
local $iw1 = ($iteration > 3) ? ($iteration - 4) / 8 : 1;
|
|
local $byte_num = ($iteration - 3) % 8;
|
|
local $label = $iteration + 1;
|
|
local $pAny = ($p & 0xf) == 0xf;
|
|
local $pByp = (($p & $pComI) && ($iteration > 0));
|
|
|
|
$c.=<<___;
|
|
//////////////////////////////////////////////////
|
|
___
|
|
|
|
if (($p & 0xf) == 0) {
|
|
$c.="#ifdef HOST_IS_BIG_ENDIAN\n";
|
|
&I(\$c,"shr.u OutWord[%u] = OutWord[%u], 32;;",
|
|
$iw1 % $NOutWord, $iw1 % $NOutWord);
|
|
$c.="#endif\n";
|
|
&I(\$c, "st4 [OutPtr] = OutWord[%u], 4", $iw1 % $NOutWord);
|
|
return;
|
|
}
|
|
|
|
# Cycle 0
|
|
&I(\$c, "{ .mmi") if ($pAny);
|
|
&I(\$c, "ld1 Data[%u] = [InPtr], 1", $i0 % $NData) if ($p & $pComI);
|
|
&I(\$c, "padd1 I[%u] = One, I[%u]", $i0 % $NI, $i1 % $NI)if ($p & $pComI);
|
|
&I(\$c, "zxt1 J = J") if ($p & $pComJ);
|
|
&I(\$c, "}") if ($pAny);
|
|
&I(\$c, "{ .mmi") if ($pAny);
|
|
&I(\$c, "LKEY T[%u] = [T[%u]]", $i1 % $NT, $i1 % $NT) if ($p & $pOut);
|
|
&I(\$c, "add T[%u] = SI[%u], SJ[%u]",
|
|
$i0 % $NT, $i2 % $NSI, $i1 % $NSJ) if ($p & $pComT);
|
|
&I(\$c, "KEYADDR(IPr[%u], I[%u])", $i0 % $NIP, $i1 % $NI) if ($p & $pComI);
|
|
&I(\$c, "}") if ($pAny);
|
|
&STOP(\$c);
|
|
|
|
# Cycle 1
|
|
&I(\$c, "{ .mmi") if ($pAny);
|
|
&I(\$c, "SKEY [IPr[%u]] = SJ[%u]", $i2 % $NIP, $i1%$NSJ)if ($p & $pComT);
|
|
&I(\$c, "SKEY [JP[%u]] = SI[%u]", $i1 % $NJP, $i2%$NSI) if ($p & $pComT);
|
|
&I(\$c, "zxt1 T[%u] = T[%u]", $i0 % $NT, $i0 % $NT) if ($p & $pComT);
|
|
&I(\$c, "}") if ($pAny);
|
|
&I(\$c, "{ .mmi") if ($pAny);
|
|
&I(\$c, "LKEY SI[%u] = [IPr[%u]]", $i0 % $NSI, $i0%$NIP)if ($p & $pComI);
|
|
&I(\$c, "KEYADDR(JP[%u], J)", $i0 % $NJP) if ($p & $pComJ);
|
|
&I(\$c, "xor Data[%u] = Data[%u], T[%u]",
|
|
$i3 % $NData, $i3 % $NData, $i1 % $NT) if ($p & $pOut);
|
|
&I(\$c, "}") if ($pAny);
|
|
&STOP(\$c);
|
|
|
|
# Cycle 2
|
|
&I(\$c, "{ .mmi") if ($pAny);
|
|
&I(\$c, "LKEY SJ[%u] = [JP[%u]]", $i0 % $NSJ, $i0%$NJP) if ($p & $pComJ);
|
|
&I(\$c, "cmp.eq pBypass, p0 = I[%u], J", $i1 % $NI) if ($pByp);
|
|
&I(\$c, "dep OutWord[%u] = Data[%u], OutWord[%u], BYTE_POS(%u), 8",
|
|
$iw0%$NOutWord, $i3%$NData, $iw1%$NOutWord, $byte_num) if ($p & $pOut);
|
|
&I(\$c, "}") if ($pAny);
|
|
&I(\$c, "{ .mmb") if ($pAny);
|
|
&I(\$c, "add J = J, SI[%u]", $i0 % $NSI) if ($p & $pComI);
|
|
&I(\$c, "KEYADDR(T[%u], T[%u])", $i0 % $NT, $i0 % $NT) if ($p & $pComT);
|
|
&P(\$c, "(pBypass)\tbr.cond.spnt.many .rc4Bypass%u",$label)if ($pByp);
|
|
&I(\$c, "}") if ($pAny);
|
|
&STOP(\$c);
|
|
|
|
&P(\$c, ".rc4Resume%u:", $label) if ($pByp);
|
|
if ($byte_num == 0 && $iteration >= $phases) {
|
|
&I(\$c, "st8 [OutPtr] = OutWord[%u], 8",
|
|
$iw1 % $NOutWord) if ($p & $pOut);
|
|
if ($iteration == (1 + $unroll_count) * $phases - 1) {
|
|
if ($unroll_count == 6) {
|
|
&I(\$c, "mov OutWord[%u] = OutWord[%u]",
|
|
$iw1 % $NOutWord, $iw0 % $NOutWord);
|
|
}
|
|
&I(\$c, "lfetch.nt1 [InPrefetch], %u",
|
|
$unroll_count * $phases);
|
|
&I(\$c, "lfetch.excl.nt1 [OutPrefetch], %u",
|
|
$unroll_count * $phases);
|
|
&I(\$c, "br.cloop.sptk.few .rc4Loop");
|
|
}
|
|
}
|
|
|
|
if ($pByp) {
|
|
&P(\$bypass, ".rc4Bypass%u:", $label);
|
|
&I(\$bypass, "sub J = J, SI[%u]", $i0 % $NSI);
|
|
&I(\$bypass, "nop 0");
|
|
&I(\$bypass, "nop 0");
|
|
&I(\$bypass, ";;");
|
|
&I(\$bypass, "add J = J, SI[%u]", $i1 % $NSI);
|
|
&I(\$bypass, "mov SI[%u] = SI[%u]", $i0 % $NSI, $i1 % $NSI);
|
|
&I(\$bypass, "br.sptk.many .rc4Resume%u\n", $label);
|
|
&I(\$bypass, ";;");
|
|
}
|
|
}
|
|
|
|
$code=<<___;
|
|
.ident \"rc4-ia64.s, version 3.0\"
|
|
.ident \"Copyright (c) 2005 Hewlett-Packard Development Company, L.P.\"
|
|
|
|
#define LCSave r8
|
|
#define PRSave r9
|
|
|
|
/* Inputs become invalid once rotation begins! */
|
|
|
|
#define StateTable in0
|
|
#define DataLen in1
|
|
#define InputBuffer in2
|
|
#define OutputBuffer in3
|
|
|
|
#define KTable r14
|
|
#define J r15
|
|
#define InPtr r16
|
|
#define OutPtr r17
|
|
#define InPrefetch r18
|
|
#define OutPrefetch r19
|
|
#define One r20
|
|
#define LoopCount r21
|
|
#define Remainder r22
|
|
#define IFinal r23
|
|
#define EndPtr r24
|
|
|
|
#define tmp0 r25
|
|
#define tmp1 r26
|
|
|
|
#define pBypass p6
|
|
#define pDone p7
|
|
#define pSmall p8
|
|
#define pAligned p9
|
|
#define pUnaligned p10
|
|
|
|
#define pComputeI pPhase[0]
|
|
#define pComputeJ pPhase[1]
|
|
#define pComputeT pPhase[2]
|
|
#define pOutput pPhase[3]
|
|
|
|
#define RetVal r8
|
|
#define L_OK p7
|
|
#define L_NOK p8
|
|
|
|
#define _NINPUTS 4
|
|
#define _NOUTPUT 0
|
|
|
|
#define _NROTATE 24
|
|
#define _NLOCALS (_NROTATE - _NINPUTS - _NOUTPUT)
|
|
|
|
#ifndef SZ
|
|
# define SZ 4 // this must be set to sizeof(RC4_INT)
|
|
#endif
|
|
|
|
#if SZ == 1
|
|
# define LKEY ld1
|
|
# define SKEY st1
|
|
# define KEYADDR(dst, i) add dst = i, KTable
|
|
#elif SZ == 2
|
|
# define LKEY ld2
|
|
# define SKEY st2
|
|
# define KEYADDR(dst, i) shladd dst = i, 1, KTable
|
|
#elif SZ == 4
|
|
# define LKEY ld4
|
|
# define SKEY st4
|
|
# define KEYADDR(dst, i) shladd dst = i, 2, KTable
|
|
#else
|
|
# define LKEY ld8
|
|
# define SKEY st8
|
|
# define KEYADDR(dst, i) shladd dst = i, 3, KTable
|
|
#endif
|
|
|
|
#if defined(_HPUX_SOURCE) && !defined(_LP64)
|
|
# define ADDP addp4
|
|
#else
|
|
# define ADDP add
|
|
#endif
|
|
|
|
/* Define a macro for the bit number of the n-th byte: */
|
|
|
|
#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
|
|
# define HOST_IS_BIG_ENDIAN
|
|
# define BYTE_POS(n) (56 - (8 * (n)))
|
|
#else
|
|
# define BYTE_POS(n) (8 * (n))
|
|
#endif
|
|
|
|
/*
|
|
We must perform the first phase of the pipeline explicitly since
|
|
we will always load from the stable the first time. The br.cexit
|
|
will never be taken since regardless of the number of bytes because
|
|
the epilogue count is 4.
|
|
*/
|
|
/* MODSCHED_RC4 macro was split to _PROLOGUE and _LOOP, because HP-UX
|
|
assembler failed on original macro with syntax error. <appro> */
|
|
#define MODSCHED_RC4_PROLOGUE \\
|
|
{ \\
|
|
ld1 Data[0] = [InPtr], 1; \\
|
|
add IFinal = 1, I[1]; \\
|
|
KEYADDR(IPr[0], I[1]); \\
|
|
} ;; \\
|
|
{ \\
|
|
LKEY SI[0] = [IPr[0]]; \\
|
|
mov pr.rot = 0x10000; \\
|
|
mov ar.ec = 4; \\
|
|
} ;; \\
|
|
{ \\
|
|
add J = J, SI[0]; \\
|
|
zxt1 I[0] = IFinal; \\
|
|
br.cexit.spnt.few .+16; /* never taken */ \\
|
|
} ;;
|
|
#define MODSCHED_RC4_LOOP(label) \\
|
|
label: \\
|
|
{ .mmi; \\
|
|
(pComputeI) ld1 Data[0] = [InPtr], 1; \\
|
|
(pComputeI) add IFinal = 1, I[1]; \\
|
|
(pComputeJ) zxt1 J = J; \\
|
|
}{ .mmi; \\
|
|
(pOutput) LKEY T[1] = [T[1]]; \\
|
|
(pComputeT) add T[0] = SI[2], SJ[1]; \\
|
|
(pComputeI) KEYADDR(IPr[0], I[1]); \\
|
|
} ;; \\
|
|
{ .mmi; \\
|
|
(pComputeT) SKEY [IPr[2]] = SJ[1]; \\
|
|
(pComputeT) SKEY [JP[1]] = SI[2]; \\
|
|
(pComputeT) zxt1 T[0] = T[0]; \\
|
|
}{ .mmi; \\
|
|
(pComputeI) LKEY SI[0] = [IPr[0]]; \\
|
|
(pComputeJ) KEYADDR(JP[0], J); \\
|
|
(pComputeI) cmp.eq.unc pBypass, p0 = I[1], J; \\
|
|
} ;; \\
|
|
{ .mmi; \\
|
|
(pComputeJ) LKEY SJ[0] = [JP[0]]; \\
|
|
(pOutput) xor Data[3] = Data[3], T[1]; \\
|
|
nop 0x0; \\
|
|
}{ .mmi; \\
|
|
(pComputeT) KEYADDR(T[0], T[0]); \\
|
|
(pBypass) mov SI[0] = SI[1]; \\
|
|
(pComputeI) zxt1 I[0] = IFinal; \\
|
|
} ;; \\
|
|
{ .mmb; \\
|
|
(pOutput) st1 [OutPtr] = Data[3], 1; \\
|
|
(pComputeI) add J = J, SI[0]; \\
|
|
br.ctop.sptk.few label; \\
|
|
} ;;
|
|
|
|
.text
|
|
|
|
.align 32
|
|
|
|
.type RC4, \@function
|
|
.global RC4
|
|
|
|
.proc RC4
|
|
.prologue
|
|
|
|
RC4:
|
|
{
|
|
.mmi
|
|
alloc r2 = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE
|
|
|
|
.rotr Data[4], I[2], IPr[3], SI[3], JP[2], SJ[2], T[2], \\
|
|
OutWord[2]
|
|
.rotp pPhase[4]
|
|
|
|
ADDP InPrefetch = 0, InputBuffer
|
|
ADDP KTable = 0, StateTable
|
|
}
|
|
{
|
|
.mmi
|
|
ADDP InPtr = 0, InputBuffer
|
|
ADDP OutPtr = 0, OutputBuffer
|
|
mov RetVal = r0
|
|
}
|
|
;;
|
|
{
|
|
.mmi
|
|
lfetch.nt1 [InPrefetch], 0x80
|
|
ADDP OutPrefetch = 0, OutputBuffer
|
|
}
|
|
{ // Return 0 if the input length is nonsensical
|
|
.mib
|
|
ADDP StateTable = 0, StateTable
|
|
cmp.ge.unc L_NOK, L_OK = r0, DataLen
|
|
(L_NOK) br.ret.sptk.few rp
|
|
}
|
|
;;
|
|
{
|
|
.mib
|
|
cmp.eq.or L_NOK, L_OK = r0, InPtr
|
|
cmp.eq.or L_NOK, L_OK = r0, OutPtr
|
|
nop 0x0
|
|
}
|
|
{
|
|
.mib
|
|
cmp.eq.or L_NOK, L_OK = r0, StateTable
|
|
nop 0x0
|
|
(L_NOK) br.ret.sptk.few rp
|
|
}
|
|
;;
|
|
LKEY I[1] = [KTable], SZ
|
|
/* Prefetch the state-table. It contains 256 elements of size SZ */
|
|
|
|
#if SZ == 1
|
|
ADDP tmp0 = 1*128, StateTable
|
|
#elif SZ == 2
|
|
ADDP tmp0 = 3*128, StateTable
|
|
ADDP tmp1 = 2*128, StateTable
|
|
#elif SZ == 4
|
|
ADDP tmp0 = 7*128, StateTable
|
|
ADDP tmp1 = 6*128, StateTable
|
|
#elif SZ == 8
|
|
ADDP tmp0 = 15*128, StateTable
|
|
ADDP tmp1 = 14*128, StateTable
|
|
#endif
|
|
;;
|
|
#if SZ >= 8
|
|
lfetch.fault.nt1 [tmp0], -256 // 15
|
|
lfetch.fault.nt1 [tmp1], -256;;
|
|
lfetch.fault.nt1 [tmp0], -256 // 13
|
|
lfetch.fault.nt1 [tmp1], -256;;
|
|
lfetch.fault.nt1 [tmp0], -256 // 11
|
|
lfetch.fault.nt1 [tmp1], -256;;
|
|
lfetch.fault.nt1 [tmp0], -256 // 9
|
|
lfetch.fault.nt1 [tmp1], -256;;
|
|
#endif
|
|
#if SZ >= 4
|
|
lfetch.fault.nt1 [tmp0], -256 // 7
|
|
lfetch.fault.nt1 [tmp1], -256;;
|
|
lfetch.fault.nt1 [tmp0], -256 // 5
|
|
lfetch.fault.nt1 [tmp1], -256;;
|
|
#endif
|
|
#if SZ >= 2
|
|
lfetch.fault.nt1 [tmp0], -256 // 3
|
|
lfetch.fault.nt1 [tmp1], -256;;
|
|
#endif
|
|
{
|
|
.mii
|
|
lfetch.fault.nt1 [tmp0] // 1
|
|
add I[1]=1,I[1];;
|
|
zxt1 I[1]=I[1]
|
|
}
|
|
{
|
|
.mmi
|
|
lfetch.nt1 [InPrefetch], 0x80
|
|
lfetch.excl.nt1 [OutPrefetch], 0x80
|
|
.save pr, PRSave
|
|
mov PRSave = pr
|
|
} ;;
|
|
{
|
|
.mmi
|
|
lfetch.excl.nt1 [OutPrefetch], 0x80
|
|
LKEY J = [KTable], SZ
|
|
ADDP EndPtr = DataLen, InPtr
|
|
} ;;
|
|
{
|
|
.mmi
|
|
ADDP EndPtr = -1, EndPtr // Make it point to
|
|
// last data byte.
|
|
mov One = 1
|
|
.save ar.lc, LCSave
|
|
mov LCSave = ar.lc
|
|
.body
|
|
} ;;
|
|
{
|
|
.mmb
|
|
sub Remainder = 0, OutPtr
|
|
cmp.gtu pSmall, p0 = $threshold, DataLen
|
|
(pSmall) br.cond.dpnt .rc4Remainder // Data too small for
|
|
// big loop.
|
|
} ;;
|
|
{
|
|
.mmi
|
|
and Remainder = 0x7, Remainder
|
|
;;
|
|
cmp.eq pAligned, pUnaligned = Remainder, r0
|
|
nop 0x0
|
|
} ;;
|
|
{
|
|
.mmb
|
|
.pred.rel "mutex",pUnaligned,pAligned
|
|
(pUnaligned) add Remainder = -1, Remainder
|
|
(pAligned) sub Remainder = EndPtr, InPtr
|
|
(pAligned) br.cond.dptk.many .rc4Aligned
|
|
} ;;
|
|
{
|
|
.mmi
|
|
nop 0x0
|
|
nop 0x0
|
|
mov.i ar.lc = Remainder
|
|
}
|
|
|
|
/* Do the initial few bytes via the compact, modulo-scheduled loop
|
|
until the output pointer is 8-byte-aligned. */
|
|
|
|
MODSCHED_RC4_PROLOGUE
|
|
MODSCHED_RC4_LOOP(.RC4AlignLoop)
|
|
|
|
{
|
|
.mib
|
|
sub Remainder = EndPtr, InPtr
|
|
zxt1 IFinal = IFinal
|
|
clrrrb // Clear CFM.rrb.pr so
|
|
;; // next "mov pr.rot = N"
|
|
// does the right thing.
|
|
}
|
|
{
|
|
.mmi
|
|
mov I[1] = IFinal
|
|
nop 0x0
|
|
nop 0x0
|
|
} ;;
|
|
|
|
|
|
.rc4Aligned:
|
|
|
|
/*
|
|
Unrolled loop count = (Remainder - ($unroll_count+1)*$phases)/($unroll_count*$phases)
|
|
*/
|
|
|
|
{
|
|
.mlx
|
|
add LoopCount = 1 - ($unroll_count + 1)*$phases, Remainder
|
|
movl Remainder = 0xaaaaaaaaaaaaaaab
|
|
} ;;
|
|
{
|
|
.mmi
|
|
setf.sig f6 = LoopCount // M2, M3 6 cyc
|
|
setf.sig f7 = Remainder // M2, M3 6 cyc
|
|
nop 0x0
|
|
} ;;
|
|
{
|
|
.mfb
|
|
nop 0x0
|
|
xmpy.hu f6 = f6, f7
|
|
nop 0x0
|
|
} ;;
|
|
{
|
|
.mmi
|
|
getf.sig LoopCount = f6;; // M2 5 cyc
|
|
nop 0x0
|
|
shr.u LoopCount = LoopCount, 4
|
|
} ;;
|
|
{
|
|
.mmi
|
|
nop 0x0
|
|
nop 0x0
|
|
mov.i ar.lc = LoopCount
|
|
} ;;
|
|
|
|
/* Now comes the unrolled loop: */
|
|
|
|
.rc4Prologue:
|
|
___
|
|
|
|
$iteration = 0;
|
|
|
|
# Generate the prologue:
|
|
$predicates = 1;
|
|
for ($i = 0; $i < $phases; ++$i) {
|
|
&emit_body (\$code, \$bypass, $iteration++, $predicates);
|
|
$predicates = ($predicates << 1) | 1;
|
|
}
|
|
|
|
$code.=<<___;
|
|
.rc4Loop:
|
|
___
|
|
|
|
# Generate the body:
|
|
for ($i = 0; $i < $unroll_count*$phases; ++$i) {
|
|
&emit_body (\$code, \$bypass, $iteration++, $predicates);
|
|
}
|
|
|
|
$code.=<<___;
|
|
.rc4Epilogue:
|
|
___
|
|
|
|
# Generate the epilogue:
|
|
for ($i = 0; $i < $phases; ++$i) {
|
|
$predicates <<= 1;
|
|
&emit_body (\$code, \$bypass, $iteration++, $predicates);
|
|
}
|
|
|
|
$code.=<<___;
|
|
{
|
|
.mmi
|
|
lfetch.nt1 [EndPtr] // fetch line with last byte
|
|
mov IFinal = I[1]
|
|
nop 0x0
|
|
}
|
|
|
|
.rc4Remainder:
|
|
{
|
|
.mmi
|
|
sub Remainder = EndPtr, InPtr // Calculate
|
|
// # of bytes
|
|
// left - 1
|
|
nop 0x0
|
|
nop 0x0
|
|
} ;;
|
|
{
|
|
.mib
|
|
cmp.eq pDone, p0 = -1, Remainder // done already?
|
|
mov.i ar.lc = Remainder
|
|
(pDone) br.cond.dptk.few .rc4Complete
|
|
}
|
|
|
|
/* Do the remaining bytes via the compact, modulo-scheduled loop */
|
|
|
|
MODSCHED_RC4_PROLOGUE
|
|
MODSCHED_RC4_LOOP(.RC4RestLoop)
|
|
|
|
.rc4Complete:
|
|
{
|
|
.mmi
|
|
add KTable = -SZ, KTable
|
|
add IFinal = -1, IFinal
|
|
mov ar.lc = LCSave
|
|
} ;;
|
|
{
|
|
.mii
|
|
SKEY [KTable] = J,-SZ
|
|
zxt1 IFinal = IFinal
|
|
mov pr = PRSave, 0x1FFFF
|
|
} ;;
|
|
{
|
|
.mib
|
|
SKEY [KTable] = IFinal
|
|
add RetVal = 1, r0
|
|
br.ret.sptk.few rp
|
|
} ;;
|
|
___
|
|
|
|
# Last but not least, emit the code for the bypass-code of the unrolled loop:
|
|
|
|
$code.=$bypass;
|
|
|
|
$code.=<<___;
|
|
.endp RC4
|
|
___
|
|
|
|
print $code;
|