PageRenderTime 42ms CodeModel.GetById 14ms app.highlight 23ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/xtensa/lib/checksum.S

http://github.com/mirrors/linux
Assembly | 394 lines | 366 code | 28 blank | 0 comment | 23 complexity | af7fc1a8da38b6baa1d3543a909b6565 MD5 | raw file
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		IP/TCP/UDP checksumming routines
  8 *
  9 * Xtensa version:  Copyright (C) 2001 Tensilica, Inc. by Kevin Chea
 10 *                  Optimized by Joe Taylor
 11 */
 12
 13#include <linux/errno.h>
 14#include <linux/linkage.h>
 15#include <asm/asmmacro.h>
 16#include <asm/core.h>
 17
 18/*
 19 * computes a partial checksum, e.g. for TCP/UDP fragments
 20 */
 21
 22/*
 23 * unsigned int csum_partial(const unsigned char *buf, int len,
 24 *                           unsigned int sum);
 25 *    a2 = buf
 26 *    a3 = len
 27 *    a4 = sum
 28 *
 29 * This function assumes 2- or 4-byte alignment.  Other alignments will fail!
 30 */
 31
 32/* ONES_ADD converts twos-complement math to ones-complement. */
 33#define ONES_ADD(sum, val)	  \
 34	add	sum, sum, val	; \
 35	bgeu	sum, val, 99f	; \
 36	addi	sum, sum, 1	; \
 3799:				;
 38
 39.text
 40ENTRY(csum_partial)
 41
 42	/*
 43	 * Experiments with Ethernet and SLIP connections show that buf
 44	 * is aligned on either a 2-byte or 4-byte boundary.
 45	 */
 46	abi_entry_default
 47	extui	a5, a2, 0, 2
 48	bnez	a5, 8f		/* branch if 2-byte aligned */
 49	/* Fall-through on common case, 4-byte alignment */
 501:
 51	srli	a5, a3, 5	/* 32-byte chunks */
 52#if XCHAL_HAVE_LOOPS
 53	loopgtz	a5, 2f
 54#else
 55	beqz	a5, 2f
 56	slli	a5, a5, 5
 57	add	a5, a5, a2	/* a5 = end of last 32-byte chunk */
 58.Loop1:
 59#endif
 60	l32i	a6, a2, 0
 61	l32i	a7, a2, 4
 62	ONES_ADD(a4, a6)
 63	ONES_ADD(a4, a7)
 64	l32i	a6, a2, 8
 65	l32i	a7, a2, 12
 66	ONES_ADD(a4, a6)
 67	ONES_ADD(a4, a7)
 68	l32i	a6, a2, 16
 69	l32i	a7, a2, 20
 70	ONES_ADD(a4, a6)
 71	ONES_ADD(a4, a7)
 72	l32i	a6, a2, 24
 73	l32i	a7, a2, 28
 74	ONES_ADD(a4, a6)
 75	ONES_ADD(a4, a7)
 76	addi	a2, a2, 4*8
 77#if !XCHAL_HAVE_LOOPS
 78	blt	a2, a5, .Loop1
 79#endif
 802:
 81	extui	a5, a3, 2, 3	/* remaining 4-byte chunks */
 82#if XCHAL_HAVE_LOOPS
 83	loopgtz	a5, 3f
 84#else
 85	beqz	a5, 3f
 86	slli	a5, a5, 2
 87	add	a5, a5, a2	/* a5 = end of last 4-byte chunk */
 88.Loop2:
 89#endif
 90	l32i	a6, a2, 0
 91	ONES_ADD(a4, a6)
 92	addi	a2, a2, 4
 93#if !XCHAL_HAVE_LOOPS
 94	blt	a2, a5, .Loop2
 95#endif
 963:
 97	_bbci.l	a3, 1, 5f	/* remaining 2-byte chunk */
 98	l16ui	a6, a2, 0
 99	ONES_ADD(a4, a6)
100	addi	a2, a2, 2
1015:
102	_bbci.l	a3, 0, 7f	/* remaining 1-byte chunk */
1036:	l8ui	a6, a2, 0
104#ifdef __XTENSA_EB__
105	slli	a6, a6, 8	/* load byte into bits 8..15 */
106#endif
107	ONES_ADD(a4, a6)
1087:
109	mov	a2, a4
110	abi_ret_default
111
112	/* uncommon case, buf is 2-byte aligned */
1138:
114	beqz	a3, 7b		/* branch if len == 0 */
115	beqi	a3, 1, 6b	/* branch if len == 1 */
116
117	extui	a5, a2, 0, 1
118	bnez	a5, 8f		/* branch if 1-byte aligned */
119
120	l16ui	a6, a2, 0	/* common case, len >= 2 */
121	ONES_ADD(a4, a6)
122	addi	a2, a2, 2	/* adjust buf */
123	addi	a3, a3, -2	/* adjust len */
124	j	1b		/* now buf is 4-byte aligned */
125
126	/* case: odd-byte aligned, len > 1
127	 * This case is dog slow, so don't give us an odd address.
128	 * (I don't think this ever happens, but just in case.)
129	 */
1308:
131	srli	a5, a3, 2	/* 4-byte chunks */
132#if XCHAL_HAVE_LOOPS
133	loopgtz	a5, 2f
134#else
135	beqz	a5, 2f
136	slli	a5, a5, 2
137	add	a5, a5, a2	/* a5 = end of last 4-byte chunk */
138.Loop3:
139#endif
140	l8ui	a6, a2, 0	/* bits 24..31 */
141	l16ui	a7, a2, 1	/* bits  8..23 */
142	l8ui	a8, a2, 3	/* bits  0.. 8 */
143#ifdef	__XTENSA_EB__
144	slli	a6, a6, 24
145#else
146	slli	a8, a8, 24
147#endif
148	slli	a7, a7, 8
149	or	a7, a7, a6
150	or	a7, a7, a8
151	ONES_ADD(a4, a7)
152	addi	a2, a2, 4
153#if !XCHAL_HAVE_LOOPS
154	blt	a2, a5, .Loop3
155#endif
1562:
157	_bbci.l	a3, 1, 3f	/* remaining 2-byte chunk, still odd addr */
158	l8ui	a6, a2, 0
159	l8ui	a7, a2, 1
160#ifdef	__XTENSA_EB__
161	slli	a6, a6, 8
162#else
163	slli	a7, a7, 8
164#endif
165	or	a7, a7, a6
166	ONES_ADD(a4, a7)
167	addi	a2, a2, 2
1683:
169	j	5b		/* branch to handle the remaining byte */
170
171ENDPROC(csum_partial)
172
173/*
174 * Copy from ds while checksumming, otherwise like csum_partial
175 */
176
177/*
178unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
179					int sum, int *src_err_ptr, int *dst_err_ptr)
180	a2  = src
181	a3  = dst
182	a4  = len
183	a5  = sum
184	a6  = src_err_ptr
185	a7  = dst_err_ptr
186	a8  = temp
187	a9  = temp
188	a10 = temp
189	a11 = original len for exception handling
190	a12 = original dst for exception handling
191
192    This function is optimized for 4-byte aligned addresses.  Other
193    alignments work, but not nearly as efficiently.
194 */
195
196ENTRY(csum_partial_copy_generic)
197
198	abi_entry_default
199	mov	a12, a3
200	mov	a11, a4
201	or	a10, a2, a3
202
203	/* We optimize the following alignment tests for the 4-byte
204	aligned case.  Two bbsi.l instructions might seem more optimal
205	(commented out below).  However, both labels 5: and 3: are out
206	of the imm8 range, so the assembler relaxes them into
207	equivalent bbci.l, j combinations, which is actually
208	slower. */
209
210	extui	a9, a10, 0, 2
211	beqz	a9, 1f		/* branch if both are 4-byte aligned */
212	bbsi.l	a10, 0, 5f	/* branch if one address is odd */
213	j	3f		/* one address is 2-byte aligned */
214
215/*	_bbsi.l	a10, 0, 5f */	/* branch if odd address */
216/*	_bbsi.l	a10, 1, 3f */	/* branch if 2-byte-aligned address */
217
2181:
219	/* src and dst are both 4-byte aligned */
220	srli	a10, a4, 5	/* 32-byte chunks */
221#if XCHAL_HAVE_LOOPS
222	loopgtz	a10, 2f
223#else
224	beqz	a10, 2f
225	slli	a10, a10, 5
226	add	a10, a10, a2	/* a10 = end of last 32-byte src chunk */
227.Loop5:
228#endif
229EX(10f)	l32i	a9, a2, 0
230EX(10f)	l32i	a8, a2, 4
231EX(11f)	s32i	a9, a3, 0
232EX(11f)	s32i	a8, a3, 4
233	ONES_ADD(a5, a9)
234	ONES_ADD(a5, a8)
235EX(10f)	l32i	a9, a2, 8
236EX(10f)	l32i	a8, a2, 12
237EX(11f)	s32i	a9, a3, 8
238EX(11f)	s32i	a8, a3, 12
239	ONES_ADD(a5, a9)
240	ONES_ADD(a5, a8)
241EX(10f)	l32i	a9, a2, 16
242EX(10f)	l32i	a8, a2, 20
243EX(11f)	s32i	a9, a3, 16
244EX(11f)	s32i	a8, a3, 20
245	ONES_ADD(a5, a9)
246	ONES_ADD(a5, a8)
247EX(10f)	l32i	a9, a2, 24
248EX(10f)	l32i	a8, a2, 28
249EX(11f)	s32i	a9, a3, 24
250EX(11f)	s32i	a8, a3, 28
251	ONES_ADD(a5, a9)
252	ONES_ADD(a5, a8)
253	addi	a2, a2, 32
254	addi	a3, a3, 32
255#if !XCHAL_HAVE_LOOPS
256	blt	a2, a10, .Loop5
257#endif
2582:
259	extui	a10, a4, 2, 3	/* remaining 4-byte chunks */
260	extui	a4, a4, 0, 2	/* reset len for general-case, 2-byte chunks */
261#if XCHAL_HAVE_LOOPS
262	loopgtz	a10, 3f
263#else
264	beqz	a10, 3f
265	slli	a10, a10, 2
266	add	a10, a10, a2	/* a10 = end of last 4-byte src chunk */
267.Loop6:
268#endif
269EX(10f)	l32i	a9, a2, 0
270EX(11f)	s32i	a9, a3, 0
271	ONES_ADD(a5, a9)
272	addi	a2, a2, 4
273	addi	a3, a3, 4
274#if !XCHAL_HAVE_LOOPS
275	blt	a2, a10, .Loop6
276#endif
2773:
278	/*
279	Control comes to here in two cases: (1) It may fall through
280	to here from the 4-byte alignment case to process, at most,
281	one 2-byte chunk.  (2) It branches to here from above if
282	either src or dst is 2-byte aligned, and we process all bytes
283	here, except for perhaps a trailing odd byte.  It's
284	inefficient, so align your addresses to 4-byte boundaries.
285
286	a2 = src
287	a3 = dst
288	a4 = len
289	a5 = sum
290	*/
291	srli	a10, a4, 1	/* 2-byte chunks */
292#if XCHAL_HAVE_LOOPS
293	loopgtz	a10, 4f
294#else
295	beqz	a10, 4f
296	slli	a10, a10, 1
297	add	a10, a10, a2	/* a10 = end of last 2-byte src chunk */
298.Loop7:
299#endif
300EX(10f)	l16ui	a9, a2, 0
301EX(11f)	s16i	a9, a3, 0
302	ONES_ADD(a5, a9)
303	addi	a2, a2, 2
304	addi	a3, a3, 2
305#if !XCHAL_HAVE_LOOPS
306	blt	a2, a10, .Loop7
307#endif
3084:
309	/* This section processes a possible trailing odd byte. */
310	_bbci.l	a4, 0, 8f	/* 1-byte chunk */
311EX(10f)	l8ui	a9, a2, 0
312EX(11f)	s8i	a9, a3, 0
313#ifdef __XTENSA_EB__
314	slli	a9, a9, 8	/* shift byte to bits 8..15 */
315#endif
316	ONES_ADD(a5, a9)
3178:
318	mov	a2, a5
319	abi_ret_default
320
3215:
322	/* Control branch to here when either src or dst is odd.  We
323	process all bytes using 8-bit accesses.  Grossly inefficient,
324	so don't feed us an odd address. */
325
326	srli	a10, a4, 1	/* handle in pairs for 16-bit csum */
327#if XCHAL_HAVE_LOOPS
328	loopgtz	a10, 6f
329#else
330	beqz	a10, 6f
331	slli	a10, a10, 1
332	add	a10, a10, a2	/* a10 = end of last odd-aligned, 2-byte src chunk */
333.Loop8:
334#endif
335EX(10f)	l8ui	a9, a2, 0
336EX(10f)	l8ui	a8, a2, 1
337EX(11f)	s8i	a9, a3, 0
338EX(11f)	s8i	a8, a3, 1
339#ifdef __XTENSA_EB__
340	slli	a9, a9, 8	/* combine into a single 16-bit value */
341#else				/* for checksum computation */
342	slli	a8, a8, 8
343#endif
344	or	a9, a9, a8
345	ONES_ADD(a5, a9)
346	addi	a2, a2, 2
347	addi	a3, a3, 2
348#if !XCHAL_HAVE_LOOPS
349	blt	a2, a10, .Loop8
350#endif
3516:
352	j	4b		/* process the possible trailing odd byte */
353
354ENDPROC(csum_partial_copy_generic)
355
356
357# Exception handler:
358.section .fixup, "ax"
359/*
360	a6  = src_err_ptr
361	a7  = dst_err_ptr
362	a11 = original len for exception handling
363	a12 = original dst for exception handling
364*/
365
36610:
367	_movi	a2, -EFAULT
368	s32i	a2, a6, 0	/* src_err_ptr */
369
370	# clear the complete destination - computing the rest
371	# is too much work
372	movi	a2, 0
373#if XCHAL_HAVE_LOOPS
374	loopgtz	a11, 2f
375#else
376	beqz	a11, 2f
377	add	a11, a11, a12	/* a11 = ending address */
378.Leloop:
379#endif
380	s8i	a2, a12, 0
381	addi	a12, a12, 1
382#if !XCHAL_HAVE_LOOPS
383	blt	a12, a11, .Leloop
384#endif
3852:
386	abi_ret_default
387
38811:
389	movi	a2, -EFAULT
390	s32i	a2, a7, 0	/* dst_err_ptr */
391	movi	a2, 0
392	abi_ret_default
393
394.previous