summaryrefslogtreecommitdiff
path: root/drivers/builtin_openssl2/crypto/rc4/asm/rc4-ia64.pl
blob: 49cd5b5e6945a16fd3f67d343d028864174d06c8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
#!/usr/bin/env perl
#
# ====================================================================
# Written by David Mosberger <David.Mosberger@acm.org> based on the
# Itanium optimized Crypto code which was released by HP Labs at
# http://www.hpl.hp.com/research/linux/crypto/.
#
# Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.

# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */



# This is a little helper program which generates a software-pipelined
# for RC4 encryption.  The basic algorithm looks like this:
#
#   for (counter = 0; counter < len; ++counter)
#     {
#       in = inp[counter];
#       SI = S[I];
#       J = (SI + J) & 0xff;
#       SJ = S[J];
#       T = (SI + SJ) & 0xff;
#       S[I] = SJ, S[J] = SI;
#       ST = S[T];
#       outp[counter] = in ^ ST;
#       I = (I + 1) & 0xff;
#     }
#
# Pipelining this loop isn't easy, because the stores to the S[] array
# need to be observed in the right order.  The loop generated by the
# code below has the following pipeline diagram:
#
#      cycle
#     | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |16 |17 |
# iter
#   1: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
#   2:             xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
#   3:                         xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
#
#   where:
# 	LDI = load of S[I]
# 	LDJ = load of S[J]
# 	SWP = swap of S[I] and S[J]
# 	LDT = load of S[T]
#
# Note that in the above diagram, the major trouble-spot is that LDI
# of the 2nd iteration is performed BEFORE the SWP of the first
# iteration.  Fortunately, this is easy to detect (I of the 1st
# iteration will be equal to J of the 2nd iteration) and when this
# happens, we simply forward the proper value from the 1st iteration
# to the 2nd one.  The proper value in this case is simply the value
# of S[I] from the first iteration (thanks to the fact that SWP
# simply swaps the contents of S[I] and S[J]).
#
# Another potential trouble-spot is in cycle 7, where SWP of the 1st
# iteration issues at the same time as the LDI of the 3rd iteration.
# However, thanks to IA-64 execution semantics, this can be taken
# care of simply by placing LDI later in the instruction-group than
# SWP.  IA-64 CPUs will automatically forward the value if they
# detect that the SWP and LDI are accessing the same memory-location.

# The core-loop that can be pipelined then looks like this (annotated
# with McKinley/Madison issue port & latency numbers, assuming L1
# cache hits for the most part):

# operation:	    instruction:		    issue-ports:  latency
# ------------------  -----------------------------   ------------- -------

# Data = *inp++       ld1 data = [inp], 1             M0-M1         1 cyc     c0
#                     shladd Iptr = I, KeyTable, 3    M0-M3, I0, I1 1 cyc
# I = (I + 1) & 0xff  padd1 nextI = I, one            M0-M3, I0, I1 3 cyc
#                     ;;
# SI = S[I]           ld8 SI = [Iptr]                 M0-M1         1 cyc     c1 * after SWAP!
#                     ;;
#                     cmp.eq.unc pBypass = I, J                                  * after J is valid!
# J = SI + J          add J = J, SI                   M0-M3, I0, I1 1 cyc     c2
#                     (pBypass) br.cond.spnt Bypass
#                     ;;
# ---------------------------------------------------------------------------------------
# J = J & 0xff        zxt1 J = J                      I0, I1, 1 cyc           c3
#                     ;;
#                     shladd Jptr = J, KeyTable, 3    M0-M3, I0, I1 1 cyc     c4
#                     ;;
# SJ = S[J]           ld8 SJ = [Jptr]                 M0-M1         1 cyc     c5
#                     ;;
# ---------------------------------------------------------------------------------------
# T = (SI + SJ)       add T = SI, SJ                  M0-M3, I0, I1 1 cyc     c6
#                     ;;
# T = T & 0xff        zxt1 T = T                      I0, I1        1 cyc
# S[I] = SJ           st8 [Iptr] = SJ                 M2-M3                   c7
# S[J] = SI           st8 [Jptr] = SI                 M2-M3
#                     ;;
#                     shladd Tptr = T, KeyTable, 3    M0-M3, I0, I1 1 cyc     c8
#                     ;;
# ---------------------------------------------------------------------------------------
# T = S[T]            ld8 T = [Tptr]                  M0-M1         1 cyc     c9
#                     ;;
# data ^= T           xor data = data, T              M0-M3, I0, I1 1 cyc     c10
#                     ;;
# *out++ = Data ^ T   dep word = word, data, 8, POS   I0, I1        1 cyc     c11
#                     ;;
# ---------------------------------------------------------------------------------------

# There are several points worth making here:

#   - Note that due to the bypass/forwarding-path, the first two
#     phases of the loop are strangly mingled together.  In
#     particular, note that the first stage of the pipeline is
#     using the value of "J", as calculated by the second stage.
#   - Each bundle-pair will have exactly 6 instructions.
#   - Pipelined, the loop can execute in 3 cycles/iteration and
#     4 stages.  However, McKinley/Madison can issue "st1" to
#     the same bank at a rate of at most one per 4 cycles.  Thus,
#     instead of storing each byte, we accumulate them in a word
#     and then write them back at once with a single "st8" (this
#     implies that the setup code needs to ensure that the output
#     buffer is properly aligned, if need be, by encoding the
#     first few bytes separately).
#   - There is no space for a "br.ctop" instruction.  For this
#     reason we can't use module-loop support in IA-64 and have
#     to do a traditional, purely software-pipelined loop.
#   - We can't replace any of the remaining "add/zxt1" pairs with
#     "padd1" because the latency for that instruction is too high
#     and would push the loop to the point where more bypasses
#     would be needed, which we don't have space for.
#   - The above loop runs at around 3.26 cycles/byte, or roughly
#     440 MByte/sec on a 1.5GHz Madison.  This is well below the
#     system bus bandwidth and hence with judicious use of
#     "lfetch" this loop can run at (almost) peak speed even when
#     the input and output data reside in memory.  The
#     max. latency that can be tolerated is (PREFETCH_DISTANCE *
#     L2_LINE_SIZE * 3 cyc), or about 384 cycles assuming (at
#     least) 1-ahead prefetching of 128 byte cache-lines.  Note
#     that we do NOT prefetch into L1, since that would only
#     interfere with the S[] table values stored there.  This is
#     acceptable because there is a 10 cycle latency between
#     load and first use of the input data.
#   - We use a branch to out-of-line bypass-code of cycle-pressure:
#     we calculate the next J, check for the need to activate the
#     bypass path, and activate the bypass path ALL IN THE SAME
#     CYCLE.  If we didn't have these constraints, we could do
#     the bypass with a simple conditional move instruction.
#     Fortunately, the bypass paths get activated relatively
#     infrequently, so the extra branches don't cost all that much
#     (about 0.04 cycles/byte, measured on a 16396 byte file with
#     random input data).
#

$phases = 4;		# number of stages/phases in the pipelined-loop
$unroll_count = 6;	# number of times we unrolled it
$pComI = (1 << 0);
$pComJ = (1 << 1);
$pComT = (1 << 2);
$pOut  = (1 << 3);

$NData = 4;
$NIP = 3;
$NJP = 2;
$NI = 2;
$NSI = 3;
$NSJ = 2;
$NT = 2;
$NOutWord = 2;

#
# $threshold is the minimum length before we attempt to use the
# big software-pipelined loop.  It MUST be greater-or-equal
# to:
#  		PHASES * (UNROLL_COUNT + 1) + 7
#
# The "+ 7" comes from the fact we may have to encode up to
#   7 bytes separately before the output pointer is aligned.
#
$threshold = (3 * ($phases * ($unroll_count + 1)) + 7);

sub I {
    local *code = shift;
    local $format = shift;
    $code .= sprintf ("\t\t".$format."\n", @_);
}

sub P {
    local *code = shift;
    local $format = shift;
    $code .= sprintf ($format."\n", @_);
}

sub STOP {
    local *code = shift;
    $code .=<<___;
		;;
___
}

sub emit_body {
    local *c = shift;
    local *bypass = shift;
    local ($iteration, $p) = @_;

    local $i0 = $iteration;
    local $i1 = $iteration - 1;
    local $i2 = $iteration - 2;
    local $i3 = $iteration - 3;
    local $iw0 = ($iteration - 3) / 8;
    local $iw1 = ($iteration > 3) ? ($iteration - 4) / 8 : 1;
    local $byte_num = ($iteration - 3) % 8;
    local $label = $iteration + 1;
    local $pAny = ($p & 0xf) == 0xf;
    local $pByp = (($p & $pComI) && ($iteration > 0));

    $c.=<<___;
//////////////////////////////////////////////////
___

    if (($p & 0xf) == 0) {
	$c.="#ifdef HOST_IS_BIG_ENDIAN\n";
	&I(\$c,"shr.u	OutWord[%u] = OutWord[%u], 32;;",
				$iw1 % $NOutWord, $iw1 % $NOutWord);
	$c.="#endif\n";
	&I(\$c, "st4 [OutPtr] = OutWord[%u], 4", $iw1 % $NOutWord);
	return;
    }

    # Cycle 0
    &I(\$c, "{ .mmi")					      if ($pAny);
    &I(\$c, "ld1    Data[%u] = [InPtr], 1", $i0 % $NData)     if ($p & $pComI);
    &I(\$c, "padd1  I[%u] = One, I[%u]", $i0 % $NI, $i1 % $NI)if ($p & $pComI);
    &I(\$c, "zxt1   J = J")				      if ($p & $pComJ);
    &I(\$c, "}")					      if ($pAny);
    &I(\$c, "{ .mmi")					      if ($pAny);
    &I(\$c, "LKEY   T[%u] = [T[%u]]", $i1 % $NT, $i1 % $NT)   if ($p & $pOut);
    &I(\$c, "add    T[%u] = SI[%u], SJ[%u]",
       $i0 % $NT, $i2 % $NSI, $i1 % $NSJ)		      if ($p & $pComT);
    &I(\$c, "KEYADDR(IPr[%u], I[%u])", $i0 % $NIP, $i1 % $NI) if ($p & $pComI);
    &I(\$c, "}")					      if ($pAny);
    &STOP(\$c);

    # Cycle 1
    &I(\$c, "{ .mmi")					      if ($pAny);
    &I(\$c, "SKEY   [IPr[%u]] = SJ[%u]", $i2 % $NIP, $i1%$NSJ)if ($p & $pComT);
    &I(\$c, "SKEY   [JP[%u]] = SI[%u]", $i1 % $NJP, $i2%$NSI) if ($p & $pComT);
    &I(\$c, "zxt1   T[%u] = T[%u]", $i0 % $NT, $i0 % $NT)     if ($p & $pComT);
    &I(\$c, "}")					      if ($pAny);
    &I(\$c, "{ .mmi")					      if ($pAny);
    &I(\$c, "LKEY   SI[%u] = [IPr[%u]]", $i0 % $NSI, $i0%$NIP)if ($p & $pComI);
    &I(\$c, "KEYADDR(JP[%u], J)", $i0 % $NJP)		      if ($p & $pComJ);
    &I(\$c, "xor    Data[%u] = Data[%u], T[%u]",
       $i3 % $NData, $i3 % $NData, $i1 % $NT)		      if ($p & $pOut);
    &I(\$c, "}")					      if ($pAny);
    &STOP(\$c);

    # Cycle 2
    &I(\$c, "{ .mmi")					      if ($pAny);
    &I(\$c, "LKEY   SJ[%u] = [JP[%u]]", $i0 % $NSJ, $i0%$NJP) if ($p & $pComJ);
    &I(\$c, "cmp.eq pBypass, p0 = I[%u], J", $i1 % $NI)	      if ($pByp);
    &I(\$c, "dep OutWord[%u] = Data[%u], OutWord[%u], BYTE_POS(%u), 8",
       $iw0%$NOutWord, $i3%$NData, $iw1%$NOutWord, $byte_num) if ($p & $pOut);
    &I(\$c, "}")					      if ($pAny);
    &I(\$c, "{ .mmb")					      if ($pAny);
    &I(\$c, "add    J = J, SI[%u]", $i0 % $NSI)		      if ($p & $pComI);
    &I(\$c, "KEYADDR(T[%u], T[%u])", $i0 % $NT, $i0 % $NT)    if ($p & $pComT);
    &P(\$c, "(pBypass)\tbr.cond.spnt.many .rc4Bypass%u",$label)if ($pByp);
    &I(\$c, "}") if ($pAny);
    &STOP(\$c);

    &P(\$c, ".rc4Resume%u:", $label)			      if ($pByp);
    if ($byte_num == 0 && $iteration >= $phases) {
	&I(\$c, "st8 [OutPtr] = OutWord[%u], 8",
	   $iw1 % $NOutWord)				      if ($p & $pOut);
	if ($iteration == (1 + $unroll_count) * $phases - 1) {
	    if ($unroll_count == 6) {
		&I(\$c, "mov OutWord[%u] = OutWord[%u]",
		   $iw1 % $NOutWord, $iw0 % $NOutWord);
	    }
	    &I(\$c, "lfetch.nt1 [InPrefetch], %u",
	       $unroll_count * $phases);
	    &I(\$c, "lfetch.excl.nt1 [OutPrefetch], %u",
	       $unroll_count * $phases);
	    &I(\$c, "br.cloop.sptk.few .rc4Loop");
	}
    }

    if ($pByp) {
	&P(\$bypass, ".rc4Bypass%u:", $label);
	&I(\$bypass, "sub J = J, SI[%u]", $i0 % $NSI);
	&I(\$bypass, "nop 0");
	&I(\$bypass, "nop 0");
	&I(\$bypass, ";;");
	&I(\$bypass, "add J = J, SI[%u]", $i1 % $NSI);
	&I(\$bypass, "mov SI[%u] = SI[%u]", $i0 % $NSI, $i1 % $NSI);
	&I(\$bypass, "br.sptk.many .rc4Resume%u\n", $label);
	&I(\$bypass, ";;");
    }
}

$code=<<___;
.ident \"rc4-ia64.s, version 3.0\"
.ident \"Copyright (c) 2005 Hewlett-Packard Development Company, L.P.\"

#define LCSave		r8
#define PRSave		r9

/* Inputs become invalid once rotation begins!  */

#define StateTable	in0
#define DataLen		in1
#define InputBuffer	in2
#define OutputBuffer	in3

#define KTable		r14
#define J		r15
#define InPtr		r16
#define OutPtr		r17
#define InPrefetch	r18
#define OutPrefetch	r19
#define One		r20
#define LoopCount	r21
#define Remainder	r22
#define IFinal		r23
#define EndPtr		r24

#define tmp0		r25
#define tmp1		r26

#define pBypass		p6
#define pDone		p7
#define pSmall		p8
#define pAligned	p9
#define pUnaligned	p10

#define pComputeI	pPhase[0]
#define pComputeJ	pPhase[1]
#define pComputeT	pPhase[2]
#define pOutput		pPhase[3]

#define RetVal		r8
#define L_OK		p7
#define L_NOK		p8

#define	_NINPUTS	4
#define	_NOUTPUT	0

#define	_NROTATE	24
#define	_NLOCALS	(_NROTATE - _NINPUTS - _NOUTPUT)

#ifndef SZ
# define SZ	4	// this must be set to sizeof(RC4_INT)
#endif

#if SZ == 1
# define LKEY			ld1
# define SKEY			st1
# define KEYADDR(dst, i)	add dst = i, KTable
#elif SZ == 2
# define LKEY			ld2
# define SKEY			st2
# define KEYADDR(dst, i)	shladd dst = i, 1, KTable
#elif SZ == 4
# define LKEY			ld4
# define SKEY			st4
# define KEYADDR(dst, i)	shladd dst = i, 2, KTable
#else
# define LKEY			ld8
# define SKEY			st8
# define KEYADDR(dst, i)	shladd dst = i, 3, KTable
#endif

#if defined(_HPUX_SOURCE) && !defined(_LP64)
# define ADDP	addp4
#else
# define ADDP	add
#endif

/* Define a macro for the bit number of the n-th byte: */

#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
# define HOST_IS_BIG_ENDIAN
# define BYTE_POS(n)	(56 - (8 * (n)))
#else
# define BYTE_POS(n)	(8 * (n))
#endif

/*
   We must perform the first phase of the pipeline explicitly since
   we will always load from the stable the first time. The br.cexit
   will never be taken since regardless of the number of bytes because
   the epilogue count is 4.
*/
/* MODSCHED_RC4 macro was split to _PROLOGUE and _LOOP, because HP-UX
   assembler failed on original macro with syntax error. <appro> */
#define MODSCHED_RC4_PROLOGUE						   \\
	{								   \\
				ld1		Data[0] = [InPtr], 1;	   \\
				add		IFinal = 1, I[1];	   \\
				KEYADDR(IPr[0], I[1]);			   \\
	} ;;								   \\
	{								   \\
				LKEY		SI[0] = [IPr[0]];	   \\
				mov		pr.rot = 0x10000;	   \\
				mov		ar.ec = 4;		   \\
	} ;;								   \\
	{								   \\
				add		J = J, SI[0];		   \\
				zxt1		I[0] = IFinal;		   \\
				br.cexit.spnt.few .+16; /* never taken */  \\
	} ;;
#define MODSCHED_RC4_LOOP(label)					   \\
label:									   \\
	{	.mmi;							   \\
		(pComputeI)	ld1		Data[0] = [InPtr], 1;	   \\
		(pComputeI)	add		IFinal = 1, I[1];	   \\
		(pComputeJ)	zxt1		J = J;			   \\
	}{	.mmi;							   \\
		(pOutput)	LKEY		T[1] = [T[1]];		   \\
		(pComputeT)	add		T[0] = SI[2], SJ[1];	   \\
		(pComputeI)	KEYADDR(IPr[0], I[1]);			   \\
	} ;;								   \\
	{	.mmi;							   \\
		(pComputeT)	SKEY		[IPr[2]] = SJ[1];	   \\
		(pComputeT)	SKEY		[JP[1]] = SI[2];	   \\
		(pComputeT)	zxt1		T[0] = T[0];		   \\
	}{	.mmi;							   \\
		(pComputeI)	LKEY		SI[0] = [IPr[0]];	   \\
		(pComputeJ)	KEYADDR(JP[0], J);			   \\
		(pComputeI)	cmp.eq.unc	pBypass, p0 = I[1], J;	   \\
	} ;;								   \\
	{	.mmi;							   \\
		(pComputeJ)	LKEY		SJ[0] = [JP[0]];	   \\
		(pOutput)	xor		Data[3] = Data[3], T[1];   \\
				nop		0x0;			   \\
	}{	.mmi;							   \\
		(pComputeT)	KEYADDR(T[0], T[0]);			   \\
		(pBypass)	mov		SI[0] = SI[1];		   \\
		(pComputeI)	zxt1		I[0] = IFinal;		   \\
	} ;;								   \\
	{	.mmb;							   \\
		(pOutput)	st1		[OutPtr] = Data[3], 1;	   \\
		(pComputeI)	add		J = J, SI[0];		   \\
				br.ctop.sptk.few label;			   \\
	} ;;

	.text

	.align	32

	.type	RC4, \@function
	.global	RC4

	.proc	RC4
	.prologue

RC4:
	{
	  	.mmi
		alloc	r2 = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE

		.rotr Data[4], I[2], IPr[3], SI[3], JP[2], SJ[2], T[2], \\
		      OutWord[2]
		.rotp pPhase[4]

		ADDP		InPrefetch = 0, InputBuffer
		ADDP		KTable = 0, StateTable
	}
	{
		.mmi
		ADDP		InPtr = 0, InputBuffer
		ADDP		OutPtr = 0, OutputBuffer
		mov		RetVal = r0
	}
	;;
	{
		.mmi
		lfetch.nt1	[InPrefetch], 0x80
		ADDP		OutPrefetch = 0, OutputBuffer
	}
	{               // Return 0 if the input length is nonsensical
        	.mib
		ADDP		StateTable = 0, StateTable
        	cmp.ge.unc  	L_NOK, L_OK = r0, DataLen
	(L_NOK) br.ret.sptk.few rp
	}
	;;
	{
        	.mib
        	cmp.eq.or  	L_NOK, L_OK = r0, InPtr
        	cmp.eq.or  	L_NOK, L_OK = r0, OutPtr
		nop		0x0
	}
	{
		.mib
        	cmp.eq.or  	L_NOK, L_OK = r0, StateTable
		nop		0x0
	(L_NOK) br.ret.sptk.few rp
	}
	;;
		LKEY		I[1] = [KTable], SZ
/* Prefetch the state-table. It contains 256 elements of size SZ */

#if SZ == 1
		ADDP		tmp0 = 1*128, StateTable
#elif SZ == 2
		ADDP		tmp0 = 3*128, StateTable
		ADDP		tmp1 = 2*128, StateTable
#elif SZ == 4
		ADDP		tmp0 = 7*128, StateTable
		ADDP		tmp1 = 6*128, StateTable
#elif SZ == 8
		ADDP		tmp0 = 15*128, StateTable
		ADDP		tmp1 = 14*128, StateTable
#endif
		;;
#if SZ >= 8
		lfetch.fault.nt1		[tmp0], -256	// 15
		lfetch.fault.nt1		[tmp1], -256;;
		lfetch.fault.nt1		[tmp0], -256	// 13
		lfetch.fault.nt1		[tmp1], -256;;
		lfetch.fault.nt1		[tmp0], -256	// 11
		lfetch.fault.nt1		[tmp1], -256;;
		lfetch.fault.nt1		[tmp0], -256	//  9
		lfetch.fault.nt1		[tmp1], -256;;
#endif
#if SZ >= 4
		lfetch.fault.nt1		[tmp0], -256	//  7
		lfetch.fault.nt1		[tmp1], -256;;
		lfetch.fault.nt1		[tmp0], -256	//  5
		lfetch.fault.nt1		[tmp1], -256;;
#endif
#if SZ >= 2
		lfetch.fault.nt1		[tmp0], -256	//  3
		lfetch.fault.nt1		[tmp1], -256;;
#endif
	{
		.mii
		lfetch.fault.nt1		[tmp0]		//  1
		add		I[1]=1,I[1];;
		zxt1		I[1]=I[1]
	}
	{
		.mmi
		lfetch.nt1	[InPrefetch], 0x80
		lfetch.excl.nt1	[OutPrefetch], 0x80
		.save		pr, PRSave
		mov		PRSave = pr
	} ;;
	{
		.mmi
		lfetch.excl.nt1	[OutPrefetch], 0x80
		LKEY		J = [KTable], SZ
		ADDP		EndPtr = DataLen, InPtr
	}  ;;
	{
		.mmi
		ADDP		EndPtr = -1, EndPtr	// Make it point to
							// last data byte.
		mov		One = 1
		.save		ar.lc, LCSave
		mov		LCSave = ar.lc
		.body
	} ;;
	{
		.mmb
		sub		Remainder = 0, OutPtr
		cmp.gtu		pSmall, p0 = $threshold, DataLen
(pSmall)	br.cond.dpnt	.rc4Remainder		// Data too small for
							// big loop.
	} ;;
	{
		.mmi
		and		Remainder = 0x7, Remainder
		;;
		cmp.eq		pAligned, pUnaligned = Remainder, r0
		nop		0x0
	} ;;
	{
		.mmb
.pred.rel	"mutex",pUnaligned,pAligned
(pUnaligned)	add		Remainder = -1, Remainder
(pAligned)	sub		Remainder = EndPtr, InPtr
(pAligned)	br.cond.dptk.many .rc4Aligned
	} ;;
	{
		.mmi
		nop		0x0
		nop		0x0
		mov.i		ar.lc = Remainder
	}

/* Do the initial few bytes via the compact, modulo-scheduled loop
   until the output pointer is 8-byte-aligned.  */

		MODSCHED_RC4_PROLOGUE
		MODSCHED_RC4_LOOP(.RC4AlignLoop)

	{
		.mib
		sub		Remainder = EndPtr, InPtr
		zxt1		IFinal = IFinal
		clrrrb				// Clear CFM.rrb.pr so
		;;				// next "mov pr.rot = N"
						// does the right thing.
	}
	{
		.mmi
		mov		I[1] = IFinal
		nop		0x0
		nop		0x0
	} ;;


.rc4Aligned:

/*
   Unrolled loop count = (Remainder - ($unroll_count+1)*$phases)/($unroll_count*$phases)
 */

	{
		.mlx
		add	LoopCount = 1 - ($unroll_count + 1)*$phases, Remainder
		movl		Remainder = 0xaaaaaaaaaaaaaaab
	} ;;
	{
		.mmi
		setf.sig	f6 = LoopCount		// M2, M3	6 cyc
		setf.sig	f7 = Remainder		// M2, M3	6 cyc
		nop		0x0
	} ;;
	{
		.mfb
		nop		0x0
		xmpy.hu		f6 = f6, f7
		nop		0x0
	} ;;
	{
		.mmi
		getf.sig	LoopCount = f6;;	// M2		5 cyc
		nop		0x0
		shr.u		LoopCount = LoopCount, 4
	} ;;
	{
		.mmi
		nop		0x0
		nop		0x0
		mov.i		ar.lc = LoopCount
	} ;;

/* Now comes the unrolled loop: */

.rc4Prologue:
___

$iteration = 0;

# Generate the prologue:
$predicates = 1;
for ($i = 0; $i < $phases; ++$i) {
    &emit_body (\$code, \$bypass, $iteration++, $predicates);
    $predicates = ($predicates << 1) | 1;
}

$code.=<<___;
.rc4Loop:
___

# Generate the body:
for ($i = 0; $i < $unroll_count*$phases; ++$i) {
    &emit_body (\$code, \$bypass, $iteration++, $predicates);
}

$code.=<<___;
.rc4Epilogue:
___

# Generate the epilogue:
for ($i = 0; $i < $phases; ++$i) {
    $predicates <<= 1;
    &emit_body (\$code, \$bypass, $iteration++, $predicates);
}

$code.=<<___;
	{
		.mmi
		lfetch.nt1	[EndPtr]	// fetch line with last byte
		mov		IFinal = I[1]
		nop		0x0
	}

.rc4Remainder:
	{
		.mmi
		sub		Remainder = EndPtr, InPtr	// Calculate
								// # of bytes
								// left - 1
		nop		0x0
		nop		0x0
	} ;;
	{
		.mib
		cmp.eq		pDone, p0 = -1, Remainder // done already?
		mov.i		ar.lc = Remainder
(pDone)		br.cond.dptk.few .rc4Complete
	}

/* Do the remaining bytes via the compact, modulo-scheduled loop */

		MODSCHED_RC4_PROLOGUE
		MODSCHED_RC4_LOOP(.RC4RestLoop)

.rc4Complete:
	{
		.mmi
		add		KTable = -SZ, KTable
		add		IFinal = -1, IFinal
		mov		ar.lc = LCSave
	} ;;
	{
		.mii
		SKEY		[KTable] = J,-SZ
		zxt1		IFinal = IFinal
		mov		pr = PRSave, 0x1FFFF
	} ;;
	{
		.mib
		SKEY		[KTable] = IFinal
		add		RetVal = 1, r0
		br.ret.sptk.few	rp
	} ;;
___

# Last but not least, emit the code for the bypass-code of the unrolled loop:

$code.=$bypass;

$code.=<<___;
	.endp RC4
___

print $code;