Bug Summary

File:numpy/core/src/_simd/_simd_convert.inc
Warning:line 87, column 25
PyObject ownership leak with reference count of 1

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name _simd.dispatch.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-output=html -analyzer-checker=python -analyzer-disable-checker=deadcode -analyzer-config prune-paths=true,suppress-c++-stdlib=true,suppress-null-return-paths=false,crosscheck-with-z3=true,model-path=/opt/pyrefcon/lib/pyrefcon/models/models -analyzer-config experimental-enable-naive-ctu-analysis=true,ctu-dir=/tmp/pyrefcon/numpy/csa-scan,ctu-index-name=/tmp/pyrefcon/numpy/csa-scan/externalDefMap.txt,ctu-invocation-list=/tmp/pyrefcon/numpy/csa-scan/invocations.yaml,display-ctu-progress=false -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +sse -target-feature +sse2 -target-feature +sse3 -tune-cpu generic -debug-info-kind=limited -dwarf-version=4 -debugger-tuning=gdb -fcoverage-compilation-dir=/tmp/pyrefcon/numpy -resource-dir /opt/pyrefcon/lib/clang/13.0.0 -isystem /opt/pyrefcon/lib/pyrefcon/models/python3.8 -D NDEBUG -D _FORTIFY_SOURCE=2 -D NPY_INTERNAL_BUILD=1 -D HAVE_NPY_CONFIG_H=1 -D _FILE_OFFSET_BITS=64 -D _LARGEFILE_SOURCE=1 -D _LARGEFILE64_SOURCE=1 -I build/src.linux-x86_64-3.8/numpy/core/src/_simd -I numpy/core/include -I build/src.linux-x86_64-3.8/numpy/core/include/numpy -I build/src.linux-x86_64-3.8/numpy/distutils/include -I numpy/core/src/common -I numpy/core/src -I numpy/core -I numpy/core/src/npymath -I numpy/core/src/multiarray -I numpy/core/src/umath -I numpy/core/src/npysort -I numpy/core/src/_simd -I build/src.linux-x86_64-3.8/numpy/core/src/common -I build/src.linux-x86_64-3.8/numpy/core/src/npymath -internal-isystem /opt/pyrefcon/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-result -Wsign-compare -Wall -Wformat -Werror=format-security -Wformat -Werror=format-security -Wdate-time -fdebug-compilation-dir=/tmp/pyrefcon/numpy -ferror-limit 19 -fwrapv -pthread -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/pyrefcon/numpy/csa-scan/reports -x c build/src.linux-x86_64-3.8/numpy/core/src/_simd/_simd.dispatch.c

build/src.linux-x86_64-3.8/numpy/core/src/_simd/_simd.dispatch.c

1#line 1 "numpy/core/src/_simd/_simd.dispatch.c.src"
2
3/*
4 *****************************************************************************
5 ** This file was autogenerated from a template DO NOT EDIT!!!! **
6 ** Changes should be made to the original source (.src) file **
7 *****************************************************************************
8 */
9
10#line 1
11/*@targets #simd_test*/
12#include "_simd.h"
13#include "_simd_inc.h"
14
15#if NPY_SIMD128
16#include "_simd_data.inc"
17#include "_simd_convert.inc"
18#include "_simd_vector.inc"
19#include "_simd_arg.inc"
20#include "_simd_easyintrin.inc"
21
22//#########################################################################
23//## Defining NPYV intrinsics as module functions
24//#########################################################################
25#line 34
26#if 1
27/***************************
28 * Memory
29 ***************************/
30#line 41
31SIMD_IMPL_INTRIN_1(load_u8, vu8, qu8)static PyObject *simd__intrin_load_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu8}; if (!PyArg_ParseTuple( args, "O&:"
"load_u8", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu8 = npyv_load_u8( arg.data.qu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
32
33#line 41
34SIMD_IMPL_INTRIN_1(loada_u8, vu8, qu8)static PyObject *simd__intrin_loada_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu8}; if (!PyArg_ParseTuple( args, "O&:"
"loada_u8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu8 = npyv_loada_u8( arg.data.qu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
35
36#line 41
37SIMD_IMPL_INTRIN_1(loads_u8, vu8, qu8)static PyObject *simd__intrin_loads_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu8}; if (!PyArg_ParseTuple( args, "O&:"
"loads_u8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu8 = npyv_loads_u8( arg.data.qu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
38
39#line 41
40SIMD_IMPL_INTRIN_1(loadl_u8, vu8, qu8)static PyObject *simd__intrin_loadl_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu8}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_u8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu8 = npyv_loadl_u8( arg.data.qu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
41
42#line 46
43// special definition due to the nature of store
44static PyObject *
45simd__intrin_store_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
46{
47 simd_arg seq_arg = {.dtype = simd_data_qu8};
48 simd_arg vec_arg = {.dtype = simd_data_vu8};
49 if (!PyArg_ParseTuple(
50 args, "O&O&:store_u8",
51 simd_arg_converter, &seq_arg,
52 simd_arg_converter, &vec_arg
53 )) {
54 return NULL((void*)0);
55 }
56 npyv_store_u8(seq_arg.data.qu8, vec_arg.data.vu8);
57 // write-back
58 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
59 simd_arg_free(&seq_arg);
60 return NULL((void*)0);
61 }
62 simd_arg_free(&seq_arg);
63 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
64}
65
66#line 46
67// special definition due to the nature of storea
68static PyObject *
69simd__intrin_storea_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
70{
71 simd_arg seq_arg = {.dtype = simd_data_qu8};
72 simd_arg vec_arg = {.dtype = simd_data_vu8};
73 if (!PyArg_ParseTuple(
74 args, "O&O&:storea_u8",
75 simd_arg_converter, &seq_arg,
76 simd_arg_converter, &vec_arg
77 )) {
78 return NULL((void*)0);
79 }
80 npyv_storea_u8(seq_arg.data.qu8, vec_arg.data.vu8);
81 // write-back
82 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
83 simd_arg_free(&seq_arg);
84 return NULL((void*)0);
85 }
86 simd_arg_free(&seq_arg);
87 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
88}
89
90#line 46
91// special definition due to the nature of stores
92static PyObject *
93simd__intrin_stores_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
94{
95 simd_arg seq_arg = {.dtype = simd_data_qu8};
96 simd_arg vec_arg = {.dtype = simd_data_vu8};
97 if (!PyArg_ParseTuple(
98 args, "O&O&:stores_u8",
99 simd_arg_converter, &seq_arg,
100 simd_arg_converter, &vec_arg
101 )) {
102 return NULL((void*)0);
103 }
104 npyv_stores_u8(seq_arg.data.qu8, vec_arg.data.vu8);
105 // write-back
106 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
107 simd_arg_free(&seq_arg);
108 return NULL((void*)0);
109 }
110 simd_arg_free(&seq_arg);
111 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
112}
113
114#line 46
115// special definition due to the nature of storel
116static PyObject *
117simd__intrin_storel_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
118{
119 simd_arg seq_arg = {.dtype = simd_data_qu8};
120 simd_arg vec_arg = {.dtype = simd_data_vu8};
121 if (!PyArg_ParseTuple(
122 args, "O&O&:storel_u8",
123 simd_arg_converter, &seq_arg,
124 simd_arg_converter, &vec_arg
125 )) {
126 return NULL((void*)0);
127 }
128 npyv_storel_u8(seq_arg.data.qu8, vec_arg.data.vu8);
129 // write-back
130 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
131 simd_arg_free(&seq_arg);
132 return NULL((void*)0);
133 }
134 simd_arg_free(&seq_arg);
135 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
136}
137
138#line 46
139// special definition due to the nature of storeh
140static PyObject *
141simd__intrin_storeh_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
142{
143 simd_arg seq_arg = {.dtype = simd_data_qu8};
144 simd_arg vec_arg = {.dtype = simd_data_vu8};
145 if (!PyArg_ParseTuple(
146 args, "O&O&:storeh_u8",
147 simd_arg_converter, &seq_arg,
148 simd_arg_converter, &vec_arg
149 )) {
150 return NULL((void*)0);
151 }
152 npyv_storeh_u8(seq_arg.data.qu8, vec_arg.data.vu8);
153 // write-back
154 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
155 simd_arg_free(&seq_arg);
156 return NULL((void*)0);
157 }
158 simd_arg_free(&seq_arg);
159 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
160}
161
162
163/****************************************
164 * Non-contiguous/Partial Memory access
165 ****************************************/
166#if 0
167// Partial Load
168SIMD_IMPL_INTRIN_3(load_till_u8, vu8, qu8, u32, u8)static PyObject *simd__intrin_load_till_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu8}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_u8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu8 = npyv_load_till_u8
( arg1.data.qu8, arg2.data.u32, arg3.data.u8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
169SIMD_IMPL_INTRIN_2(load_tillz_u8, vu8, qu8, u32)static PyObject *simd__intrin_load_tillz_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu8}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_u8"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu8 = npyv_load_tillz_u8
( arg1.data.qu8, arg2.data.u32 )}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
170
171// Partial Store
172static PyObject *
173simd__intrin_store_till_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
174{
175 simd_arg seq_arg = {.dtype = simd_data_qu8};
176 simd_arg nlane_arg = {.dtype = simd_data_u32};
177 simd_arg vec_arg = {.dtype = simd_data_vu8};
178 if (!PyArg_ParseTuple(
179 args, "O&O&O&:store_till_u8",
180 simd_arg_converter, &seq_arg,
181 simd_arg_converter, &nlane_arg,
182 simd_arg_converter, &vec_arg
183 )) {
184 return NULL((void*)0);
185 }
186 npyv_store_till_u8(
187 seq_arg.data.qu8, nlane_arg.data.u32, vec_arg.data.vu8
188 );
189 // write-back
190 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
191 simd_arg_free(&seq_arg);
192 return NULL((void*)0);
193 }
194 simd_arg_free(&seq_arg);
195 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
196}
197
198// Non-contiguous Load
199#line 112
200static PyObject *
201simd__intrin_loadn_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
202{
203 simd_arg seq_arg = {.dtype = simd_data_qu8};
204 simd_arg stride_arg = {.dtype = simd_data_s64};
205#if 0
206 simd_arg nlane_arg = {.dtype = simd_data_u32};
207#endif // till
208#if 0
209 simd_arg fill_arg = {.dtype = simd_data_u8};
210#endif
211 if (!PyArg_ParseTuple(
212 args, "O&O&:loadn_u8",
213 simd_arg_converter, &seq_arg,
214 simd_arg_converter, &stride_arg
215#if 0
216 ,simd_arg_converter, &nlane_arg
217#endif
218#if 0
219 ,simd_arg_converter, &fill_arg
220#endif
221 )) {
222 return NULL((void*)0);
223 }
224 npyv_lanetype_u8 *seq_ptr = seq_arg.data.qu8;
225 npy_intp stride = (npy_intp)stride_arg.data.s64;
226 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
227 Py_ssize_t min_seq_len = stride * npyv_nlanes_u816;
228 if (stride < 0) {
229 seq_ptr += cur_seq_len -1;
230 min_seq_len = -min_seq_len;
231 }
232 if (cur_seq_len < min_seq_len) {
233 PyErr_Format(PyExc_ValueError,
234 "loadn_u8(), according to provided stride %d, the "
235 "minimum acceptable size of the required sequence is %d, given(%d)",
236 stride, min_seq_len, cur_seq_len
237 );
238 goto err;
239 }
240 npyv_u8 rvec = npyv_loadn_u8(
241 seq_ptr, stride
242 #if 0
243 , nlane_arg.data.u32
244 #endif
245 #if 0
246 , fill_arg.data.u8
247 #endif
248 );
249 simd_arg ret = {
250 .dtype = simd_data_vu8, .data = {.vu8=rvec}
251 };
252 simd_arg_free(&seq_arg);
253 return simd_arg_to_obj(&ret);
254err:
255 simd_arg_free(&seq_arg);
256 return NULL((void*)0);
257}
258
259#line 112
260static PyObject *
261simd__intrin_loadn_till_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
262{
263 simd_arg seq_arg = {.dtype = simd_data_qu8};
264 simd_arg stride_arg = {.dtype = simd_data_s64};
265#if 1
266 simd_arg nlane_arg = {.dtype = simd_data_u32};
267#endif // till
268#if 1
269 simd_arg fill_arg = {.dtype = simd_data_u8};
270#endif
271 if (!PyArg_ParseTuple(
272 args, "O&O&O&O&:loadn_till_u8",
273 simd_arg_converter, &seq_arg,
274 simd_arg_converter, &stride_arg
275#if 1
276 ,simd_arg_converter, &nlane_arg
277#endif
278#if 1
279 ,simd_arg_converter, &fill_arg
280#endif
281 )) {
282 return NULL((void*)0);
283 }
284 npyv_lanetype_u8 *seq_ptr = seq_arg.data.qu8;
285 npy_intp stride = (npy_intp)stride_arg.data.s64;
286 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
287 Py_ssize_t min_seq_len = stride * npyv_nlanes_u816;
288 if (stride < 0) {
289 seq_ptr += cur_seq_len -1;
290 min_seq_len = -min_seq_len;
291 }
292 if (cur_seq_len < min_seq_len) {
293 PyErr_Format(PyExc_ValueError,
294 "loadn_till_u8(), according to provided stride %d, the "
295 "minimum acceptable size of the required sequence is %d, given(%d)",
296 stride, min_seq_len, cur_seq_len
297 );
298 goto err;
299 }
300 npyv_u8 rvec = npyv_loadn_till_u8(
301 seq_ptr, stride
302 #if 1
303 , nlane_arg.data.u32
304 #endif
305 #if 1
306 , fill_arg.data.u8
307 #endif
308 );
309 simd_arg ret = {
310 .dtype = simd_data_vu8, .data = {.vu8=rvec}
311 };
312 simd_arg_free(&seq_arg);
313 return simd_arg_to_obj(&ret);
314err:
315 simd_arg_free(&seq_arg);
316 return NULL((void*)0);
317}
318
319#line 112
320static PyObject *
321simd__intrin_loadn_tillz_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
322{
323 simd_arg seq_arg = {.dtype = simd_data_qu8};
324 simd_arg stride_arg = {.dtype = simd_data_s64};
325#if 1
326 simd_arg nlane_arg = {.dtype = simd_data_u32};
327#endif // till
328#if 0
329 simd_arg fill_arg = {.dtype = simd_data_u8};
330#endif
331 if (!PyArg_ParseTuple(
332 args, "O&O&O&:loadn_tillz_u8",
333 simd_arg_converter, &seq_arg,
334 simd_arg_converter, &stride_arg
335#if 1
336 ,simd_arg_converter, &nlane_arg
337#endif
338#if 0
339 ,simd_arg_converter, &fill_arg
340#endif
341 )) {
342 return NULL((void*)0);
343 }
344 npyv_lanetype_u8 *seq_ptr = seq_arg.data.qu8;
345 npy_intp stride = (npy_intp)stride_arg.data.s64;
346 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
347 Py_ssize_t min_seq_len = stride * npyv_nlanes_u816;
348 if (stride < 0) {
349 seq_ptr += cur_seq_len -1;
350 min_seq_len = -min_seq_len;
351 }
352 if (cur_seq_len < min_seq_len) {
353 PyErr_Format(PyExc_ValueError,
354 "loadn_tillz_u8(), according to provided stride %d, the "
355 "minimum acceptable size of the required sequence is %d, given(%d)",
356 stride, min_seq_len, cur_seq_len
357 );
358 goto err;
359 }
360 npyv_u8 rvec = npyv_loadn_tillz_u8(
361 seq_ptr, stride
362 #if 1
363 , nlane_arg.data.u32
364 #endif
365 #if 0
366 , fill_arg.data.u8
367 #endif
368 );
369 simd_arg ret = {
370 .dtype = simd_data_vu8, .data = {.vu8=rvec}
371 };
372 simd_arg_free(&seq_arg);
373 return simd_arg_to_obj(&ret);
374err:
375 simd_arg_free(&seq_arg);
376 return NULL((void*)0);
377}
378
379
380// Non-contiguous Store
381#line 178
382static PyObject *
383simd__intrin_storen_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
384{
385 simd_arg seq_arg = {.dtype = simd_data_qu8};
386 simd_arg stride_arg = {.dtype = simd_data_s64};
387 simd_arg vec_arg = {.dtype = simd_data_vu8};
388#if 0
389 simd_arg nlane_arg = {.dtype = simd_data_u32};
390#endif
391 if (!PyArg_ParseTuple(
392 args, "O&O&O&:storen_u8",
393 simd_arg_converter, &seq_arg,
394 simd_arg_converter, &stride_arg
395#if 0
396 ,simd_arg_converter, &nlane_arg
397#endif
398 ,simd_arg_converter, &vec_arg
399 )) {
400 return NULL((void*)0);
401 }
402 npyv_lanetype_u8 *seq_ptr = seq_arg.data.qu8;
403 npy_intp stride = (npy_intp)stride_arg.data.s64;
404 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
405 Py_ssize_t min_seq_len = stride * npyv_nlanes_u816;
406 if (stride < 0) {
407 seq_ptr += cur_seq_len -1;
408 min_seq_len = -min_seq_len;
409 }
410 // overflow guard
411 if (cur_seq_len < min_seq_len) {
412 PyErr_Format(PyExc_ValueError,
413 "storen_u8(), according to provided stride %d, the"
414 "minimum acceptable size of the required sequence is %d, given(%d)",
415 stride, min_seq_len, cur_seq_len
416 );
417 goto err;
418 }
419 npyv_storen_u8(
420 seq_ptr, stride
421 #if 0
422 ,nlane_arg.data.u32
423 #endif
424 ,vec_arg.data.vu8
425 );
426 // write-back
427 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
428 goto err;
429 }
430 simd_arg_free(&seq_arg);
431 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
432err:
433 simd_arg_free(&seq_arg);
434 return NULL((void*)0);
435}
436
437#line 178
438static PyObject *
439simd__intrin_storen_till_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
440{
441 simd_arg seq_arg = {.dtype = simd_data_qu8};
442 simd_arg stride_arg = {.dtype = simd_data_s64};
443 simd_arg vec_arg = {.dtype = simd_data_vu8};
444#if 1
445 simd_arg nlane_arg = {.dtype = simd_data_u32};
446#endif
447 if (!PyArg_ParseTuple(
448 args, "O&O&O&O&:storen_u8",
449 simd_arg_converter, &seq_arg,
450 simd_arg_converter, &stride_arg
451#if 1
452 ,simd_arg_converter, &nlane_arg
453#endif
454 ,simd_arg_converter, &vec_arg
455 )) {
456 return NULL((void*)0);
457 }
458 npyv_lanetype_u8 *seq_ptr = seq_arg.data.qu8;
459 npy_intp stride = (npy_intp)stride_arg.data.s64;
460 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
461 Py_ssize_t min_seq_len = stride * npyv_nlanes_u816;
462 if (stride < 0) {
463 seq_ptr += cur_seq_len -1;
464 min_seq_len = -min_seq_len;
465 }
466 // overflow guard
467 if (cur_seq_len < min_seq_len) {
468 PyErr_Format(PyExc_ValueError,
469 "storen_till_u8(), according to provided stride %d, the"
470 "minimum acceptable size of the required sequence is %d, given(%d)",
471 stride, min_seq_len, cur_seq_len
472 );
473 goto err;
474 }
475 npyv_storen_till_u8(
476 seq_ptr, stride
477 #if 1
478 ,nlane_arg.data.u32
479 #endif
480 ,vec_arg.data.vu8
481 );
482 // write-back
483 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu8, simd_data_qu8)) {
484 goto err;
485 }
486 simd_arg_free(&seq_arg);
487 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
488err:
489 simd_arg_free(&seq_arg);
490 return NULL((void*)0);
491}
492
493#endif // 0
494
495/***************************
496 * Misc
497 ***************************/
498SIMD_IMPL_INTRIN_0(zero_u8, vu8)static PyObject *simd__intrin_zero_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_u8") ) return ((void*)0); simd_arg a = { .dtype
= simd_data_vu8, .data = {.vu8 = _mm_setzero_si128()}, }; return
simd_arg_to_obj(&a); }
499SIMD_IMPL_INTRIN_1(setall_u8, vu8, u8)static PyObject *simd__intrin_setall_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u8}; if (!PyArg_ParseTuple( args, "O&:"
"setall_u8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu8 = _mm_set1_epi8((char)(arg.data.u8)
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vu8 }; return simd_arg_to_obj(&ret); }
500SIMD_IMPL_INTRIN_3(select_u8, vu8, vb8, vu8, vu8)static PyObject *simd__intrin_select_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; simd_arg arg3 = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_u8", simd_arg_converter,
&arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu8 = npyv_select_u8
( arg1.data.vb8, arg2.data.vu8, arg3.data.vu8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
501
502#line 246
503#if 1
504SIMD_IMPL_INTRIN_1(reinterpret_u8_u8, vu8, vu8)static PyObject *simd__intrin_reinterpret_u8_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"reinterpret_u8_u8", simd_arg_converter, &arg )) return (
(void*)0); simd_data data = {.vu8 = arg.data.vu8}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
505#endif // simd_sup2
506
507#line 246
508#if 1
509SIMD_IMPL_INTRIN_1(reinterpret_s8_u8, vs8, vu8)static PyObject *simd__intrin_reinterpret_s8_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"reinterpret_s8_u8", simd_arg_converter, &arg )) return (
(void*)0); simd_data data = {.vs8 = arg.data.vu8}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
510#endif // simd_sup2
511
512#line 246
513#if 1
514SIMD_IMPL_INTRIN_1(reinterpret_u16_u8, vu16, vu8)static PyObject *simd__intrin_reinterpret_u16_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vu8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret);
}
515#endif // simd_sup2
516
517#line 246
518#if 1
519SIMD_IMPL_INTRIN_1(reinterpret_s16_u8, vs16, vu8)static PyObject *simd__intrin_reinterpret_s16_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vu8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret);
}
520#endif // simd_sup2
521
522#line 246
523#if 1
524SIMD_IMPL_INTRIN_1(reinterpret_u32_u8, vu32, vu8)static PyObject *simd__intrin_reinterpret_u32_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vu8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret);
}
525#endif // simd_sup2
526
527#line 246
528#if 1
529SIMD_IMPL_INTRIN_1(reinterpret_s32_u8, vs32, vu8)static PyObject *simd__intrin_reinterpret_s32_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vu8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret);
}
530#endif // simd_sup2
531
532#line 246
533#if 1
534SIMD_IMPL_INTRIN_1(reinterpret_u64_u8, vu64, vu8)static PyObject *simd__intrin_reinterpret_u64_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vu8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret);
}
535#endif // simd_sup2
536
537#line 246
538#if 1
539SIMD_IMPL_INTRIN_1(reinterpret_s64_u8, vs64, vu8)static PyObject *simd__intrin_reinterpret_s64_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vu8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret);
}
540#endif // simd_sup2
541
542#line 246
543#if 1
544SIMD_IMPL_INTRIN_1(reinterpret_f32_u8, vf32, vu8)static PyObject *simd__intrin_reinterpret_f32_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vu8 )}; simd_arg_free(&arg); simd_arg ret = { .
data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
545#endif // simd_sup2
546
547#line 246
548#if NPY_SIMD_F641
549SIMD_IMPL_INTRIN_1(reinterpret_f64_u8, vf64, vu8)static PyObject *simd__intrin_reinterpret_f64_u8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_u8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vu8 )}; simd_arg_free(&arg); simd_arg ret = { .
data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
550#endif // simd_sup2
551
552
553/**
554 * special definition due to the nature of intrinsics
555 * npyv_setf_u8 and npy_set_u8.
556*/
557#line 258
558static PyObject *
559simd__intrin_setf_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
560{
561 npyv_lanetype_u8 *data = simd_sequence_from_iterable(args, simd_data_qu8, npyv_nlanes_u816);
562 if (data == NULL((void*)0)) {
563 return NULL((void*)0);
564 }
565 simd_data r = {.vu8 = npyv_setf_u8(npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
566 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
567 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
568 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
569 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
570 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
571 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
572 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
573 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
574 data[64] // for setfnpyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
575 )npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
};
576 simd_sequence_free(data);
577 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu8);
578}
579
580#line 258
581static PyObject *
582simd__intrin_set_u8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
583{
584 npyv_lanetype_u8 *data = simd_sequence_from_iterable(args, simd_data_qu8, npyv_nlanes_u816);
585 if (data == NULL((void*)0)) {
586 return NULL((void*)0);
587 }
588 simd_data r = {.vu8 = npyv_set_u8(npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
589 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
590 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
591 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
592 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
593 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
594 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
595 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
596 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
597 data[64] // for setfnpyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
598 )npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
};
599 simd_sequence_free(data);
600 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu8);
601}
602
603
604/***************************
605 * Reorder
606 ***************************/
607#line 287
608SIMD_IMPL_INTRIN_2(combinel_u8, vu8, vu8, vu8)static PyObject *simd__intrin_combinel_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_u8",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu8 = _mm_unpacklo_epi64
( arg1.data.vu8, arg2.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
609
610#line 287
611SIMD_IMPL_INTRIN_2(combineh_u8, vu8, vu8, vu8)static PyObject *simd__intrin_combineh_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_u8",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu8 = _mm_unpackhi_epi64
( arg1.data.vu8, arg2.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
612
613
614#line 293
615SIMD_IMPL_INTRIN_2(combine_u8, vu8x2, vu8, vu8)static PyObject *simd__intrin_combine_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8x2 = npyv__combine( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8x2 };
return simd_arg_to_obj(&ret); }
616
617#line 293
618SIMD_IMPL_INTRIN_2(zip_u8, vu8x2, vu8, vu8)static PyObject *simd__intrin_zip_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8x2 = npyv_zip_u8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8x2 };
return simd_arg_to_obj(&ret); }
619
620
621#if 1
622SIMD_IMPL_INTRIN_1(rev64_u8, vu8, vu8)static PyObject *simd__intrin_rev64_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_u8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu8 = npyv_rev64_u8( arg.data.vu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
623#endif
624
625/***************************
626 * Operators
627 ***************************/
628#if 0 > 0
629SIMD_IMPL_INTRIN_2(shl_u8, vu8, vu8, u8)static PyObject *simd__intrin_shl_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = npyv_shl_u8( arg1.data.vu8, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
630SIMD_IMPL_INTRIN_2(shr_u8, vu8, vu8, u8)static PyObject *simd__intrin_shr_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = npyv_shr_u8( arg1.data.vu8, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
631// immediate constant
632SIMD_IMPL_INTRIN_2IMM(shli_u8, vu8, vu8, 0)static PyObject *simd__intrin_shli_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu8 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shli_u8, vu8 ) data.vu8; simd_arg_free(&
arg1); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }
; return simd_arg_to_obj(&ret); }
633SIMD_IMPL_INTRIN_2IMM(shri_u8, vu8, vu8, 0)static PyObject *simd__intrin_shri_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu8 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shri_u8, vu8 ) data.vu8; simd_arg_free(&
arg1); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }
; return simd_arg_to_obj(&ret); }
634#endif // shl_imm
635
636#line 314
637SIMD_IMPL_INTRIN_2(and_u8, vu8, vu8, vu8)static PyObject *simd__intrin_and_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""and_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_and_si128( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
638
639#line 314
640SIMD_IMPL_INTRIN_2(or_u8, vu8, vu8, vu8)static PyObject *simd__intrin_or_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""or_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_or_si128( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
641
642#line 314
643SIMD_IMPL_INTRIN_2(xor_u8, vu8, vu8, vu8)static PyObject *simd__intrin_xor_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_xor_si128( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
644
645
646SIMD_IMPL_INTRIN_1(not_u8, vu8, vu8)static PyObject *simd__intrin_not_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"not_u8", simd_arg_converter, &arg )) return ((void*)0); simd_data
data = {.vu8 = _mm_xor_si128(arg.data.vu8, _mm_set1_epi32(-1
))}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vu8 }; return simd_arg_to_obj(&ret); }
647
648#line 322
649SIMD_IMPL_INTRIN_2(cmpeq_u8, vb8, vu8, vu8)static PyObject *simd__intrin_cmpeq_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_cmpeq_epi8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
650
651#line 322
652SIMD_IMPL_INTRIN_2(cmpneq_u8, vb8, vu8, vu8)static PyObject *simd__intrin_cmpneq_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_xor_si128(_mm_cmpeq_epi8(arg1
.data.vu8, arg2.data.vu8), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb8 }; return simd_arg_to_obj(&
ret); }
653
654#line 322
655SIMD_IMPL_INTRIN_2(cmpgt_u8, vb8, vu8, vu8)static PyObject *simd__intrin_cmpgt_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = npyv_cmpgt_u8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
656
657#line 322
658SIMD_IMPL_INTRIN_2(cmpge_u8, vb8, vu8, vu8)static PyObject *simd__intrin_cmpge_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = npyv_cmpge_u8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
659
660#line 322
661SIMD_IMPL_INTRIN_2(cmplt_u8, vb8, vu8, vu8)static PyObject *simd__intrin_cmplt_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = npyv_cmpgt_u8(arg2.data.vu8, arg1
.data.vu8)}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
662
663#line 322
664SIMD_IMPL_INTRIN_2(cmple_u8, vb8, vu8, vu8)static PyObject *simd__intrin_cmple_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = npyv_cmpge_u8(arg2.data.vu8, arg1
.data.vu8)}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
665
666
667/***************************
668 * Conversion
669 ***************************/
670SIMD_IMPL_INTRIN_1(cvt_u8_b8, vu8, vb8)static PyObject *simd__intrin_cvt_u8_b8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb8}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_u8_b8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu8 = arg.data.vb8}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }
; return simd_arg_to_obj(&ret); }
671SIMD_IMPL_INTRIN_1(cvt_b8_u8, vb8, vu8)static PyObject *simd__intrin_cvt_b8_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b8_u8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vb8 = arg.data.vu8}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }
; return simd_arg_to_obj(&ret); }
672#if 1
673SIMD_IMPL_INTRIN_1(expand_u16_u8, vu16x2, vu8)static PyObject *simd__intrin_expand_u16_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"expand_u16_u8", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vu16x2 = npyv_expand_u16_u8( arg.data
.vu8 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16x2 }; return simd_arg_to_obj(&ret
); }
674#endif // expand_sup
675/***************************
676 * Arithmetic
677 ***************************/
678#line 339
679SIMD_IMPL_INTRIN_2(add_u8, vu8, vu8, vu8)static PyObject *simd__intrin_add_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""add_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_add_epi8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
680
681#line 339
682SIMD_IMPL_INTRIN_2(sub_u8, vu8, vu8, vu8)static PyObject *simd__intrin_sub_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_sub_epi8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
683
684
685#if 1
686#line 346
687SIMD_IMPL_INTRIN_2(adds_u8, vu8, vu8, vu8)static PyObject *simd__intrin_adds_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_adds_epu8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
688
689#line 346
690SIMD_IMPL_INTRIN_2(subs_u8, vu8, vu8, vu8)static PyObject *simd__intrin_subs_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_subs_epu8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
691
692#endif // sat_sup
693
694#if 1
695SIMD_IMPL_INTRIN_2(mul_u8, vu8, vu8, vu8)static PyObject *simd__intrin_mul_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = npyv_mul_u8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
696#endif // mul_sup
697
698#if 0
699SIMD_IMPL_INTRIN_2(div_u8, vu8, vu8, vu8)static PyObject *simd__intrin_div_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""div_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = npyv_div_u8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
700#endif // div_sup
701
702#if 1
703SIMD_IMPL_INTRIN_1(divisor_u8, vu8x3, u8)static PyObject *simd__intrin_divisor_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u8}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_u8", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vu8x3 = npyv_divisor_u8( arg.data.u8 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu8x3 }; return simd_arg_to_obj(&ret); }
704SIMD_IMPL_INTRIN_2(divc_u8, vu8, vu8, vu8x3)static PyObject *simd__intrin_divc_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = npyv_divc_u8( arg1.data.vu8, arg2
.data.vu8x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }
; return simd_arg_to_obj(&ret); }
705#endif // intdiv_sup
706
707#if 0
708#line 367
709SIMD_IMPL_INTRIN_3(muladd_u8, vu8, vu8, vu8, vu8)static PyObject *simd__intrin_muladd_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; simd_arg arg3 = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_u8", simd_arg_converter,
&arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu8 = npyv_muladd_u8
( arg1.data.vu8, arg2.data.vu8, arg3.data.vu8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
710
711#line 367
712SIMD_IMPL_INTRIN_3(mulsub_u8, vu8, vu8, vu8, vu8)static PyObject *simd__intrin_mulsub_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; simd_arg arg3 = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_u8", simd_arg_converter,
&arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu8 = npyv_mulsub_u8
( arg1.data.vu8, arg2.data.vu8, arg3.data.vu8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
713
714#line 367
715SIMD_IMPL_INTRIN_3(nmuladd_u8, vu8, vu8, vu8, vu8)static PyObject *simd__intrin_nmuladd_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; simd_arg arg3 = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu8 = npyv_nmuladd_u8
( arg1.data.vu8, arg2.data.vu8, arg3.data.vu8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
716
717#line 367
718SIMD_IMPL_INTRIN_3(nmulsub_u8, vu8, vu8, vu8, vu8)static PyObject *simd__intrin_nmulsub_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; simd_arg arg3 = {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu8 = npyv_nmulsub_u8
( arg1.data.vu8, arg2.data.vu8, arg3.data.vu8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
719
720#endif // fused_sup
721
722#if 0
723SIMD_IMPL_INTRIN_1(sum_u8, u8, vu8)static PyObject *simd__intrin_sum_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"sum_u8", simd_arg_converter, &arg )) return ((void*)0); simd_data
data = {.u8 = npyv_sum_u8( arg.data.vu8 )}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_u8 };
return simd_arg_to_obj(&ret); }
724#endif // sum_sup
725
726#if 1
727SIMD_IMPL_INTRIN_1(sumup_u8, u16, vu8)static PyObject *simd__intrin_sumup_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_u8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.u16 = npyv_sumup_u8( arg.data.vu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_u16
}; return simd_arg_to_obj(&ret); }
728#endif // sumup_sup
729
730/***************************
731 * Math
732 ***************************/
733#if 0
734#line 386
735SIMD_IMPL_INTRIN_1(sqrt_u8, vu8, vu8)static PyObject *simd__intrin_sqrt_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_u8", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu8 = npyv_sqrt_u8( arg.data.vu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
736
737#line 386
738SIMD_IMPL_INTRIN_1(recip_u8, vu8, vu8)static PyObject *simd__intrin_recip_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"recip_u8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu8 = npyv_recip_u8( arg.data.vu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
739
740#line 386
741SIMD_IMPL_INTRIN_1(abs_u8, vu8, vu8)static PyObject *simd__intrin_abs_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"abs_u8", simd_arg_converter, &arg )) return ((void*)0); simd_data
data = {.vu8 = npyv_abs_u8( arg.data.vu8 )}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }
; return simd_arg_to_obj(&ret); }
742
743#line 386
744SIMD_IMPL_INTRIN_1(square_u8, vu8, vu8)static PyObject *simd__intrin_square_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&:"
"square_u8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu8 = npyv_square_u8( arg.data.vu8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
745
746#endif
747
748#line 393
749SIMD_IMPL_INTRIN_2(max_u8, vu8, vu8, vu8)static PyObject *simd__intrin_max_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""max_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_max_epu8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
750
751#line 393
752SIMD_IMPL_INTRIN_2(min_u8, vu8, vu8, vu8)static PyObject *simd__intrin_min_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""min_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = _mm_min_epu8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
753
754
755#if 0
756#line 400
757SIMD_IMPL_INTRIN_2(maxp_u8, vu8, vu8, vu8)static PyObject *simd__intrin_maxp_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = npyv_maxp_u8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
758
759#line 400
760SIMD_IMPL_INTRIN_2(minp_u8, vu8, vu8, vu8)static PyObject *simd__intrin_minp_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_u8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu8 = npyv_minp_u8( arg1.data.vu8, arg2
.data.vu8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu8 }; return
simd_arg_to_obj(&ret); }
761
762#endif
763
764/***************************
765 * Mask operations
766 ***************************/
767#line 410
768 SIMD_IMPL_INTRIN_4(ifadd_u8, vu8, vb8, vu8, vu8, vu8)static PyObject *simd__intrin_ifadd_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; simd_arg arg3 = {.dtype = simd_data_vu8}; simd_arg arg4 = {
.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_u8", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu8 = npyv_ifadd_u8
( arg1.data.vb8, arg2.data.vu8, arg3.data.vu8, arg4.data.vu8 )
}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu8 }; return simd_arg_to_obj(&
ret); }
769
770#line 410
771 SIMD_IMPL_INTRIN_4(ifsub_u8, vu8, vb8, vu8, vu8, vu8)static PyObject *simd__intrin_ifsub_u8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vu8
}; simd_arg arg3 = {.dtype = simd_data_vu8}; simd_arg arg4 = {
.dtype = simd_data_vu8}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_u8", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu8 = npyv_ifsub_u8
( arg1.data.vb8, arg2.data.vu8, arg3.data.vu8, arg4.data.vu8 )
}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu8 }; return simd_arg_to_obj(&
ret); }
772
773
774#endif // simd_sup
775
776#line 34
777#if 1
778/***************************
779 * Memory
780 ***************************/
781#line 41
782SIMD_IMPL_INTRIN_1(load_s8, vs8, qs8)static PyObject *simd__intrin_load_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs8}; if (!PyArg_ParseTuple( args, "O&:"
"load_s8", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs8 = npyv_load_s8( arg.data.qs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
783
784#line 41
785SIMD_IMPL_INTRIN_1(loada_s8, vs8, qs8)static PyObject *simd__intrin_loada_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs8}; if (!PyArg_ParseTuple( args, "O&:"
"loada_s8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs8 = npyv_loada_s8( arg.data.qs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
786
787#line 41
788SIMD_IMPL_INTRIN_1(loads_s8, vs8, qs8)static PyObject *simd__intrin_loads_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs8}; if (!PyArg_ParseTuple( args, "O&:"
"loads_s8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs8 = npyv_loads_s8( arg.data.qs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
789
790#line 41
791SIMD_IMPL_INTRIN_1(loadl_s8, vs8, qs8)static PyObject *simd__intrin_loadl_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs8}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_s8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs8 = npyv_loadl_s8( arg.data.qs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
792
793#line 46
794// special definition due to the nature of store
795static PyObject *
796simd__intrin_store_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
797{
798 simd_arg seq_arg = {.dtype = simd_data_qs8};
799 simd_arg vec_arg = {.dtype = simd_data_vs8};
800 if (!PyArg_ParseTuple(
801 args, "O&O&:store_s8",
802 simd_arg_converter, &seq_arg,
803 simd_arg_converter, &vec_arg
804 )) {
805 return NULL((void*)0);
806 }
807 npyv_store_s8(seq_arg.data.qs8, vec_arg.data.vs8);
808 // write-back
809 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
810 simd_arg_free(&seq_arg);
811 return NULL((void*)0);
812 }
813 simd_arg_free(&seq_arg);
814 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
815}
816
817#line 46
818// special definition due to the nature of storea
819static PyObject *
820simd__intrin_storea_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
821{
822 simd_arg seq_arg = {.dtype = simd_data_qs8};
823 simd_arg vec_arg = {.dtype = simd_data_vs8};
824 if (!PyArg_ParseTuple(
825 args, "O&O&:storea_s8",
826 simd_arg_converter, &seq_arg,
827 simd_arg_converter, &vec_arg
828 )) {
829 return NULL((void*)0);
830 }
831 npyv_storea_s8(seq_arg.data.qs8, vec_arg.data.vs8);
832 // write-back
833 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
834 simd_arg_free(&seq_arg);
835 return NULL((void*)0);
836 }
837 simd_arg_free(&seq_arg);
838 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
839}
840
841#line 46
842// special definition due to the nature of stores
843static PyObject *
844simd__intrin_stores_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
845{
846 simd_arg seq_arg = {.dtype = simd_data_qs8};
847 simd_arg vec_arg = {.dtype = simd_data_vs8};
848 if (!PyArg_ParseTuple(
849 args, "O&O&:stores_s8",
850 simd_arg_converter, &seq_arg,
851 simd_arg_converter, &vec_arg
852 )) {
853 return NULL((void*)0);
854 }
855 npyv_stores_s8(seq_arg.data.qs8, vec_arg.data.vs8);
856 // write-back
857 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
858 simd_arg_free(&seq_arg);
859 return NULL((void*)0);
860 }
861 simd_arg_free(&seq_arg);
862 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
863}
864
865#line 46
866// special definition due to the nature of storel
867static PyObject *
868simd__intrin_storel_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
869{
870 simd_arg seq_arg = {.dtype = simd_data_qs8};
871 simd_arg vec_arg = {.dtype = simd_data_vs8};
872 if (!PyArg_ParseTuple(
873 args, "O&O&:storel_s8",
874 simd_arg_converter, &seq_arg,
875 simd_arg_converter, &vec_arg
876 )) {
877 return NULL((void*)0);
878 }
879 npyv_storel_s8(seq_arg.data.qs8, vec_arg.data.vs8);
880 // write-back
881 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
882 simd_arg_free(&seq_arg);
883 return NULL((void*)0);
884 }
885 simd_arg_free(&seq_arg);
886 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
887}
888
889#line 46
890// special definition due to the nature of storeh
891static PyObject *
892simd__intrin_storeh_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
893{
894 simd_arg seq_arg = {.dtype = simd_data_qs8};
895 simd_arg vec_arg = {.dtype = simd_data_vs8};
896 if (!PyArg_ParseTuple(
897 args, "O&O&:storeh_s8",
898 simd_arg_converter, &seq_arg,
899 simd_arg_converter, &vec_arg
900 )) {
901 return NULL((void*)0);
902 }
903 npyv_storeh_s8(seq_arg.data.qs8, vec_arg.data.vs8);
904 // write-back
905 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
906 simd_arg_free(&seq_arg);
907 return NULL((void*)0);
908 }
909 simd_arg_free(&seq_arg);
910 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
911}
912
913
914/****************************************
915 * Non-contiguous/Partial Memory access
916 ****************************************/
917#if 0
918// Partial Load
919SIMD_IMPL_INTRIN_3(load_till_s8, vs8, qs8, u32, s8)static PyObject *simd__intrin_load_till_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs8}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_s8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs8 = npyv_load_till_s8
( arg1.data.qs8, arg2.data.u32, arg3.data.s8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
920SIMD_IMPL_INTRIN_2(load_tillz_s8, vs8, qs8, u32)static PyObject *simd__intrin_load_tillz_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs8}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_s8"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs8 = npyv_load_tillz_s8
( arg1.data.qs8, arg2.data.u32 )}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
921
922// Partial Store
923static PyObject *
924simd__intrin_store_till_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
925{
926 simd_arg seq_arg = {.dtype = simd_data_qs8};
927 simd_arg nlane_arg = {.dtype = simd_data_u32};
928 simd_arg vec_arg = {.dtype = simd_data_vs8};
929 if (!PyArg_ParseTuple(
930 args, "O&O&O&:store_till_s8",
931 simd_arg_converter, &seq_arg,
932 simd_arg_converter, &nlane_arg,
933 simd_arg_converter, &vec_arg
934 )) {
935 return NULL((void*)0);
936 }
937 npyv_store_till_s8(
938 seq_arg.data.qs8, nlane_arg.data.u32, vec_arg.data.vs8
939 );
940 // write-back
941 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
942 simd_arg_free(&seq_arg);
943 return NULL((void*)0);
944 }
945 simd_arg_free(&seq_arg);
946 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
947}
948
949// Non-contiguous Load
950#line 112
951static PyObject *
952simd__intrin_loadn_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
953{
954 simd_arg seq_arg = {.dtype = simd_data_qs8};
955 simd_arg stride_arg = {.dtype = simd_data_s64};
956#if 0
957 simd_arg nlane_arg = {.dtype = simd_data_u32};
958#endif // till
959#if 0
960 simd_arg fill_arg = {.dtype = simd_data_s8};
961#endif
962 if (!PyArg_ParseTuple(
963 args, "O&O&:loadn_s8",
964 simd_arg_converter, &seq_arg,
965 simd_arg_converter, &stride_arg
966#if 0
967 ,simd_arg_converter, &nlane_arg
968#endif
969#if 0
970 ,simd_arg_converter, &fill_arg
971#endif
972 )) {
973 return NULL((void*)0);
974 }
975 npyv_lanetype_s8 *seq_ptr = seq_arg.data.qs8;
976 npy_intp stride = (npy_intp)stride_arg.data.s64;
977 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
978 Py_ssize_t min_seq_len = stride * npyv_nlanes_s816;
979 if (stride < 0) {
980 seq_ptr += cur_seq_len -1;
981 min_seq_len = -min_seq_len;
982 }
983 if (cur_seq_len < min_seq_len) {
984 PyErr_Format(PyExc_ValueError,
985 "loadn_s8(), according to provided stride %d, the "
986 "minimum acceptable size of the required sequence is %d, given(%d)",
987 stride, min_seq_len, cur_seq_len
988 );
989 goto err;
990 }
991 npyv_s8 rvec = npyv_loadn_s8(
992 seq_ptr, stride
993 #if 0
994 , nlane_arg.data.u32
995 #endif
996 #if 0
997 , fill_arg.data.s8
998 #endif
999 );
1000 simd_arg ret = {
1001 .dtype = simd_data_vs8, .data = {.vs8=rvec}
1002 };
1003 simd_arg_free(&seq_arg);
1004 return simd_arg_to_obj(&ret);
1005err:
1006 simd_arg_free(&seq_arg);
1007 return NULL((void*)0);
1008}
1009
1010#line 112
1011static PyObject *
1012simd__intrin_loadn_till_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1013{
1014 simd_arg seq_arg = {.dtype = simd_data_qs8};
1015 simd_arg stride_arg = {.dtype = simd_data_s64};
1016#if 1
1017 simd_arg nlane_arg = {.dtype = simd_data_u32};
1018#endif // till
1019#if 1
1020 simd_arg fill_arg = {.dtype = simd_data_s8};
1021#endif
1022 if (!PyArg_ParseTuple(
1023 args, "O&O&O&O&:loadn_till_s8",
1024 simd_arg_converter, &seq_arg,
1025 simd_arg_converter, &stride_arg
1026#if 1
1027 ,simd_arg_converter, &nlane_arg
1028#endif
1029#if 1
1030 ,simd_arg_converter, &fill_arg
1031#endif
1032 )) {
1033 return NULL((void*)0);
1034 }
1035 npyv_lanetype_s8 *seq_ptr = seq_arg.data.qs8;
1036 npy_intp stride = (npy_intp)stride_arg.data.s64;
1037 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1038 Py_ssize_t min_seq_len = stride * npyv_nlanes_s816;
1039 if (stride < 0) {
1040 seq_ptr += cur_seq_len -1;
1041 min_seq_len = -min_seq_len;
1042 }
1043 if (cur_seq_len < min_seq_len) {
1044 PyErr_Format(PyExc_ValueError,
1045 "loadn_till_s8(), according to provided stride %d, the "
1046 "minimum acceptable size of the required sequence is %d, given(%d)",
1047 stride, min_seq_len, cur_seq_len
1048 );
1049 goto err;
1050 }
1051 npyv_s8 rvec = npyv_loadn_till_s8(
1052 seq_ptr, stride
1053 #if 1
1054 , nlane_arg.data.u32
1055 #endif
1056 #if 1
1057 , fill_arg.data.s8
1058 #endif
1059 );
1060 simd_arg ret = {
1061 .dtype = simd_data_vs8, .data = {.vs8=rvec}
1062 };
1063 simd_arg_free(&seq_arg);
1064 return simd_arg_to_obj(&ret);
1065err:
1066 simd_arg_free(&seq_arg);
1067 return NULL((void*)0);
1068}
1069
1070#line 112
1071static PyObject *
1072simd__intrin_loadn_tillz_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1073{
1074 simd_arg seq_arg = {.dtype = simd_data_qs8};
1075 simd_arg stride_arg = {.dtype = simd_data_s64};
1076#if 1
1077 simd_arg nlane_arg = {.dtype = simd_data_u32};
1078#endif // till
1079#if 0
1080 simd_arg fill_arg = {.dtype = simd_data_s8};
1081#endif
1082 if (!PyArg_ParseTuple(
1083 args, "O&O&O&:loadn_tillz_s8",
1084 simd_arg_converter, &seq_arg,
1085 simd_arg_converter, &stride_arg
1086#if 1
1087 ,simd_arg_converter, &nlane_arg
1088#endif
1089#if 0
1090 ,simd_arg_converter, &fill_arg
1091#endif
1092 )) {
1093 return NULL((void*)0);
1094 }
1095 npyv_lanetype_s8 *seq_ptr = seq_arg.data.qs8;
1096 npy_intp stride = (npy_intp)stride_arg.data.s64;
1097 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1098 Py_ssize_t min_seq_len = stride * npyv_nlanes_s816;
1099 if (stride < 0) {
1100 seq_ptr += cur_seq_len -1;
1101 min_seq_len = -min_seq_len;
1102 }
1103 if (cur_seq_len < min_seq_len) {
1104 PyErr_Format(PyExc_ValueError,
1105 "loadn_tillz_s8(), according to provided stride %d, the "
1106 "minimum acceptable size of the required sequence is %d, given(%d)",
1107 stride, min_seq_len, cur_seq_len
1108 );
1109 goto err;
1110 }
1111 npyv_s8 rvec = npyv_loadn_tillz_s8(
1112 seq_ptr, stride
1113 #if 1
1114 , nlane_arg.data.u32
1115 #endif
1116 #if 0
1117 , fill_arg.data.s8
1118 #endif
1119 );
1120 simd_arg ret = {
1121 .dtype = simd_data_vs8, .data = {.vs8=rvec}
1122 };
1123 simd_arg_free(&seq_arg);
1124 return simd_arg_to_obj(&ret);
1125err:
1126 simd_arg_free(&seq_arg);
1127 return NULL((void*)0);
1128}
1129
1130
1131// Non-contiguous Store
1132#line 178
1133static PyObject *
1134simd__intrin_storen_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1135{
1136 simd_arg seq_arg = {.dtype = simd_data_qs8};
1137 simd_arg stride_arg = {.dtype = simd_data_s64};
1138 simd_arg vec_arg = {.dtype = simd_data_vs8};
1139#if 0
1140 simd_arg nlane_arg = {.dtype = simd_data_u32};
1141#endif
1142 if (!PyArg_ParseTuple(
1143 args, "O&O&O&:storen_s8",
1144 simd_arg_converter, &seq_arg,
1145 simd_arg_converter, &stride_arg
1146#if 0
1147 ,simd_arg_converter, &nlane_arg
1148#endif
1149 ,simd_arg_converter, &vec_arg
1150 )) {
1151 return NULL((void*)0);
1152 }
1153 npyv_lanetype_s8 *seq_ptr = seq_arg.data.qs8;
1154 npy_intp stride = (npy_intp)stride_arg.data.s64;
1155 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1156 Py_ssize_t min_seq_len = stride * npyv_nlanes_s816;
1157 if (stride < 0) {
1158 seq_ptr += cur_seq_len -1;
1159 min_seq_len = -min_seq_len;
1160 }
1161 // overflow guard
1162 if (cur_seq_len < min_seq_len) {
1163 PyErr_Format(PyExc_ValueError,
1164 "storen_s8(), according to provided stride %d, the"
1165 "minimum acceptable size of the required sequence is %d, given(%d)",
1166 stride, min_seq_len, cur_seq_len
1167 );
1168 goto err;
1169 }
1170 npyv_storen_s8(
1171 seq_ptr, stride
1172 #if 0
1173 ,nlane_arg.data.u32
1174 #endif
1175 ,vec_arg.data.vs8
1176 );
1177 // write-back
1178 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
1179 goto err;
1180 }
1181 simd_arg_free(&seq_arg);
1182 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1183err:
1184 simd_arg_free(&seq_arg);
1185 return NULL((void*)0);
1186}
1187
1188#line 178
1189static PyObject *
1190simd__intrin_storen_till_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1191{
1192 simd_arg seq_arg = {.dtype = simd_data_qs8};
1193 simd_arg stride_arg = {.dtype = simd_data_s64};
1194 simd_arg vec_arg = {.dtype = simd_data_vs8};
1195#if 1
1196 simd_arg nlane_arg = {.dtype = simd_data_u32};
1197#endif
1198 if (!PyArg_ParseTuple(
1199 args, "O&O&O&O&:storen_s8",
1200 simd_arg_converter, &seq_arg,
1201 simd_arg_converter, &stride_arg
1202#if 1
1203 ,simd_arg_converter, &nlane_arg
1204#endif
1205 ,simd_arg_converter, &vec_arg
1206 )) {
1207 return NULL((void*)0);
1208 }
1209 npyv_lanetype_s8 *seq_ptr = seq_arg.data.qs8;
1210 npy_intp stride = (npy_intp)stride_arg.data.s64;
1211 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1212 Py_ssize_t min_seq_len = stride * npyv_nlanes_s816;
1213 if (stride < 0) {
1214 seq_ptr += cur_seq_len -1;
1215 min_seq_len = -min_seq_len;
1216 }
1217 // overflow guard
1218 if (cur_seq_len < min_seq_len) {
1219 PyErr_Format(PyExc_ValueError,
1220 "storen_till_s8(), according to provided stride %d, the"
1221 "minimum acceptable size of the required sequence is %d, given(%d)",
1222 stride, min_seq_len, cur_seq_len
1223 );
1224 goto err;
1225 }
1226 npyv_storen_till_s8(
1227 seq_ptr, stride
1228 #if 1
1229 ,nlane_arg.data.u32
1230 #endif
1231 ,vec_arg.data.vs8
1232 );
1233 // write-back
1234 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs8, simd_data_qs8)) {
1235 goto err;
1236 }
1237 simd_arg_free(&seq_arg);
1238 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1239err:
1240 simd_arg_free(&seq_arg);
1241 return NULL((void*)0);
1242}
1243
1244#endif // 0
1245
1246/***************************
1247 * Misc
1248 ***************************/
1249SIMD_IMPL_INTRIN_0(zero_s8, vs8)static PyObject *simd__intrin_zero_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_s8") ) return ((void*)0); simd_arg a = { .dtype
= simd_data_vs8, .data = {.vs8 = _mm_setzero_si128()}, }; return
simd_arg_to_obj(&a); }
1250SIMD_IMPL_INTRIN_1(setall_s8, vs8, s8)static PyObject *simd__intrin_setall_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s8}; if (!PyArg_ParseTuple( args, "O&:"
"setall_s8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs8 = _mm_set1_epi8((char)(arg.data.s8)
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vs8 }; return simd_arg_to_obj(&ret); }
1251SIMD_IMPL_INTRIN_3(select_s8, vs8, vb8, vs8, vs8)static PyObject *simd__intrin_select_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; simd_arg arg3 = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_s8", simd_arg_converter,
&arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs8 = npyv_select_u8
( arg1.data.vb8, arg2.data.vs8, arg3.data.vs8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1252
1253#line 246
1254#if 1
1255SIMD_IMPL_INTRIN_1(reinterpret_u8_s8, vu8, vs8)static PyObject *simd__intrin_reinterpret_u8_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"reinterpret_u8_s8", simd_arg_converter, &arg )) return (
(void*)0); simd_data data = {.vu8 = arg.data.vs8}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu8
}; return simd_arg_to_obj(&ret); }
1256#endif // simd_sup2
1257
1258#line 246
1259#if 1
1260SIMD_IMPL_INTRIN_1(reinterpret_s8_s8, vs8, vs8)static PyObject *simd__intrin_reinterpret_s8_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"reinterpret_s8_s8", simd_arg_converter, &arg )) return (
(void*)0); simd_data data = {.vs8 = arg.data.vs8}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
1261#endif // simd_sup2
1262
1263#line 246
1264#if 1
1265SIMD_IMPL_INTRIN_1(reinterpret_u16_s8, vu16, vs8)static PyObject *simd__intrin_reinterpret_u16_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vs8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret);
}
1266#endif // simd_sup2
1267
1268#line 246
1269#if 1
1270SIMD_IMPL_INTRIN_1(reinterpret_s16_s8, vs16, vs8)static PyObject *simd__intrin_reinterpret_s16_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vs8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret);
}
1271#endif // simd_sup2
1272
1273#line 246
1274#if 1
1275SIMD_IMPL_INTRIN_1(reinterpret_u32_s8, vu32, vs8)static PyObject *simd__intrin_reinterpret_u32_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vs8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret);
}
1276#endif // simd_sup2
1277
1278#line 246
1279#if 1
1280SIMD_IMPL_INTRIN_1(reinterpret_s32_s8, vs32, vs8)static PyObject *simd__intrin_reinterpret_s32_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vs8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret);
}
1281#endif // simd_sup2
1282
1283#line 246
1284#if 1
1285SIMD_IMPL_INTRIN_1(reinterpret_u64_s8, vu64, vs8)static PyObject *simd__intrin_reinterpret_u64_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vs8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret);
}
1286#endif // simd_sup2
1287
1288#line 246
1289#if 1
1290SIMD_IMPL_INTRIN_1(reinterpret_s64_s8, vs64, vs8)static PyObject *simd__intrin_reinterpret_s64_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vs8}; simd_arg_free(&arg); simd_arg ret = { .data = data,
.dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret);
}
1291#endif // simd_sup2
1292
1293#line 246
1294#if 1
1295SIMD_IMPL_INTRIN_1(reinterpret_f32_s8, vf32, vs8)static PyObject *simd__intrin_reinterpret_f32_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vs8 )}; simd_arg_free(&arg); simd_arg ret = { .
data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
1296#endif // simd_sup2
1297
1298#line 246
1299#if NPY_SIMD_F641
1300SIMD_IMPL_INTRIN_1(reinterpret_f64_s8, vf64, vs8)static PyObject *simd__intrin_reinterpret_f64_s8 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_s8", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vs8 )}; simd_arg_free(&arg); simd_arg ret = { .
data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
1301#endif // simd_sup2
1302
1303
1304/**
1305 * special definition due to the nature of intrinsics
1306 * npyv_setf_s8 and npy_set_s8.
1307*/
1308#line 258
1309static PyObject *
1310simd__intrin_setf_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1311{
1312 npyv_lanetype_s8 *data = simd_sequence_from_iterable(args, simd_data_qs8, npyv_nlanes_s816);
1313 if (data == NULL((void*)0)) {
1314 return NULL((void*)0);
1315 }
1316 simd_data r = {.vs8 = npyv_setf_s8(npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1317 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1318 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1319 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1320 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1321 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1322 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1323 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1324 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1325 data[64] // for setfnpyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
1326 )npyv__setr_epi8((char)(data[1]), (char)(data[2]), (char)(data
[3]), (char)(data[4]), (char)(data[5]), (char)(data[6]), (char
)(data[7]), (char)(data[8]), (char)(data[9]), (char)(data[10]
), (char)(data[11]), (char)(data[12]), (char)(data[13]), (char
)(data[14]), (char)(data[15]), (char)(data[16]))
};
1327 simd_sequence_free(data);
1328 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs8);
1329}
1330
1331#line 258
1332static PyObject *
1333simd__intrin_set_s8(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1334{
1335 npyv_lanetype_s8 *data = simd_sequence_from_iterable(args, simd_data_qs8, npyv_nlanes_s816);
1336 if (data == NULL((void*)0)) {
1337 return NULL((void*)0);
1338 }
1339 simd_data r = {.vs8 = npyv_set_s8(npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1340 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1341 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1342 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1343 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1344 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1345 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1346 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1347 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1348 data[64] // for setfnpyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
1349 )npyv__setr_epi8((char)(data[0]), (char)(data[1]), (char)(data
[2]), (char)(data[3]), (char)(data[4]), (char)(data[5]), (char
)(data[6]), (char)(data[7]), (char)(data[8]), (char)(data[9])
, (char)(data[10]), (char)(data[11]), (char)(data[12]), (char
)(data[13]), (char)(data[14]), (char)(data[15]))
};
1350 simd_sequence_free(data);
1351 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs8);
1352}
1353
1354
1355/***************************
1356 * Reorder
1357 ***************************/
1358#line 287
1359SIMD_IMPL_INTRIN_2(combinel_s8, vs8, vs8, vs8)static PyObject *simd__intrin_combinel_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_s8",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs8 = _mm_unpacklo_epi64
( arg1.data.vs8, arg2.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
1360
1361#line 287
1362SIMD_IMPL_INTRIN_2(combineh_s8, vs8, vs8, vs8)static PyObject *simd__intrin_combineh_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_s8",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs8 = _mm_unpackhi_epi64
( arg1.data.vs8, arg2.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
1363
1364
1365#line 293
1366SIMD_IMPL_INTRIN_2(combine_s8, vs8x2, vs8, vs8)static PyObject *simd__intrin_combine_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8x2 = npyv__combine( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8x2 };
return simd_arg_to_obj(&ret); }
1367
1368#line 293
1369SIMD_IMPL_INTRIN_2(zip_s8, vs8x2, vs8, vs8)static PyObject *simd__intrin_zip_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8x2 = npyv_zip_s8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8x2 };
return simd_arg_to_obj(&ret); }
1370
1371
1372#if 1
1373SIMD_IMPL_INTRIN_1(rev64_s8, vs8, vs8)static PyObject *simd__intrin_rev64_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_s8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs8 = npyv_rev64_u8( arg.data.vs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
1374#endif
1375
1376/***************************
1377 * Operators
1378 ***************************/
1379#if 0 > 0
1380SIMD_IMPL_INTRIN_2(shl_s8, vs8, vs8, u8)static PyObject *simd__intrin_shl_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_shl_s8( arg1.data.vs8, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1381SIMD_IMPL_INTRIN_2(shr_s8, vs8, vs8, u8)static PyObject *simd__intrin_shr_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_shr_s8( arg1.data.vs8, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1382// immediate constant
1383SIMD_IMPL_INTRIN_2IMM(shli_s8, vs8, vs8, 0)static PyObject *simd__intrin_shli_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs8 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shli_s8, vs8 ) data.vs8; simd_arg_free(&
arg1); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }
; return simd_arg_to_obj(&ret); }
1384SIMD_IMPL_INTRIN_2IMM(shri_s8, vs8, vs8, 0)static PyObject *simd__intrin_shri_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs8 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shri_s8, vs8 ) data.vs8; simd_arg_free(&
arg1); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }
; return simd_arg_to_obj(&ret); }
1385#endif // shl_imm
1386
1387#line 314
1388SIMD_IMPL_INTRIN_2(and_s8, vs8, vs8, vs8)static PyObject *simd__intrin_and_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""and_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = _mm_and_si128( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1389
1390#line 314
1391SIMD_IMPL_INTRIN_2(or_s8, vs8, vs8, vs8)static PyObject *simd__intrin_or_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""or_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = _mm_or_si128( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1392
1393#line 314
1394SIMD_IMPL_INTRIN_2(xor_s8, vs8, vs8, vs8)static PyObject *simd__intrin_xor_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = _mm_xor_si128( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1395
1396
1397SIMD_IMPL_INTRIN_1(not_s8, vs8, vs8)static PyObject *simd__intrin_not_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"not_s8", simd_arg_converter, &arg )) return ((void*)0); simd_data
data = {.vs8 = _mm_xor_si128(arg.data.vs8, _mm_set1_epi32(-1
))}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vs8 }; return simd_arg_to_obj(&ret); }
1398
1399#line 322
1400SIMD_IMPL_INTRIN_2(cmpeq_s8, vb8, vs8, vs8)static PyObject *simd__intrin_cmpeq_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_cmpeq_epi8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
1401
1402#line 322
1403SIMD_IMPL_INTRIN_2(cmpneq_s8, vb8, vs8, vs8)static PyObject *simd__intrin_cmpneq_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_xor_si128(_mm_cmpeq_epi8(arg1
.data.vs8, arg2.data.vs8), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb8 }; return simd_arg_to_obj(&
ret); }
1404
1405#line 322
1406SIMD_IMPL_INTRIN_2(cmpgt_s8, vb8, vs8, vs8)static PyObject *simd__intrin_cmpgt_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_cmpgt_epi8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
1407
1408#line 322
1409SIMD_IMPL_INTRIN_2(cmpge_s8, vb8, vs8, vs8)static PyObject *simd__intrin_cmpge_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_xor_si128(_mm_cmpgt_epi8(arg2
.data.vs8, arg1.data.vs8), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb8 }; return simd_arg_to_obj(&
ret); }
1410
1411#line 322
1412SIMD_IMPL_INTRIN_2(cmplt_s8, vb8, vs8, vs8)static PyObject *simd__intrin_cmplt_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_cmpgt_epi8(arg2.data.vs8, arg1
.data.vs8)}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
1413
1414#line 322
1415SIMD_IMPL_INTRIN_2(cmple_s8, vb8, vs8, vs8)static PyObject *simd__intrin_cmple_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_xor_si128(_mm_cmpgt_epi8(arg1
.data.vs8, arg2.data.vs8), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb8 }; return simd_arg_to_obj(&
ret); }
1416
1417
1418/***************************
1419 * Conversion
1420 ***************************/
1421SIMD_IMPL_INTRIN_1(cvt_s8_b8, vs8, vb8)static PyObject *simd__intrin_cvt_s8_b8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb8}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_s8_b8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs8 = arg.data.vb8}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }
; return simd_arg_to_obj(&ret); }
1422SIMD_IMPL_INTRIN_1(cvt_b8_s8, vb8, vs8)static PyObject *simd__intrin_cvt_b8_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b8_s8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vb8 = arg.data.vs8}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }
; return simd_arg_to_obj(&ret); }
1423#if 0
1424SIMD_IMPL_INTRIN_1(expand_s8_s8, vs8x2, vs8)static PyObject *simd__intrin_expand_s8_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"expand_s8_s8", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vs8x2 = npyv_expand_s8_s8( arg.data.
vs8 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs8x2 }; return simd_arg_to_obj(&ret
); }
1425#endif // expand_sup
1426/***************************
1427 * Arithmetic
1428 ***************************/
1429#line 339
1430SIMD_IMPL_INTRIN_2(add_s8, vs8, vs8, vs8)static PyObject *simd__intrin_add_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""add_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = _mm_add_epi8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1431
1432#line 339
1433SIMD_IMPL_INTRIN_2(sub_s8, vs8, vs8, vs8)static PyObject *simd__intrin_sub_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = _mm_sub_epi8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1434
1435
1436#if 1
1437#line 346
1438SIMD_IMPL_INTRIN_2(adds_s8, vs8, vs8, vs8)static PyObject *simd__intrin_adds_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = _mm_adds_epi8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1439
1440#line 346
1441SIMD_IMPL_INTRIN_2(subs_s8, vs8, vs8, vs8)static PyObject *simd__intrin_subs_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = _mm_subs_epi8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1442
1443#endif // sat_sup
1444
1445#if 1
1446SIMD_IMPL_INTRIN_2(mul_s8, vs8, vs8, vs8)static PyObject *simd__intrin_mul_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_mul_u8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1447#endif // mul_sup
1448
1449#if 0
1450SIMD_IMPL_INTRIN_2(div_s8, vs8, vs8, vs8)static PyObject *simd__intrin_div_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""div_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_div_s8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1451#endif // div_sup
1452
1453#if 1
1454SIMD_IMPL_INTRIN_1(divisor_s8, vs8x3, s8)static PyObject *simd__intrin_divisor_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s8}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_s8", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vs8x3 = npyv_divisor_s8( arg.data.s8 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs8x3 }; return simd_arg_to_obj(&ret); }
1455SIMD_IMPL_INTRIN_2(divc_s8, vs8, vs8, vs8x3)static PyObject *simd__intrin_divc_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_divc_s8( arg1.data.vs8, arg2
.data.vs8x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }
; return simd_arg_to_obj(&ret); }
1456#endif // intdiv_sup
1457
1458#if 0
1459#line 367
1460SIMD_IMPL_INTRIN_3(muladd_s8, vs8, vs8, vs8, vs8)static PyObject *simd__intrin_muladd_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; simd_arg arg3 = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_s8", simd_arg_converter,
&arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs8 = npyv_muladd_s8
( arg1.data.vs8, arg2.data.vs8, arg3.data.vs8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1461
1462#line 367
1463SIMD_IMPL_INTRIN_3(mulsub_s8, vs8, vs8, vs8, vs8)static PyObject *simd__intrin_mulsub_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; simd_arg arg3 = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_s8", simd_arg_converter,
&arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs8 = npyv_mulsub_s8
( arg1.data.vs8, arg2.data.vs8, arg3.data.vs8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1464
1465#line 367
1466SIMD_IMPL_INTRIN_3(nmuladd_s8, vs8, vs8, vs8, vs8)static PyObject *simd__intrin_nmuladd_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; simd_arg arg3 = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs8 = npyv_nmuladd_s8
( arg1.data.vs8, arg2.data.vs8, arg3.data.vs8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1467
1468#line 367
1469SIMD_IMPL_INTRIN_3(nmulsub_s8, vs8, vs8, vs8, vs8)static PyObject *simd__intrin_nmulsub_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; simd_arg arg3 = {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs8 = npyv_nmulsub_s8
( arg1.data.vs8, arg2.data.vs8, arg3.data.vs8 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1470
1471#endif // fused_sup
1472
1473#if 0
1474SIMD_IMPL_INTRIN_1(sum_s8, s8, vs8)static PyObject *simd__intrin_sum_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"sum_s8", simd_arg_converter, &arg )) return ((void*)0); simd_data
data = {.s8 = npyv_sum_s8( arg.data.vs8 )}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_s8 };
return simd_arg_to_obj(&ret); }
1475#endif // sum_sup
1476
1477#if 0
1478SIMD_IMPL_INTRIN_1(sumup_s8, s8, vs8)static PyObject *simd__intrin_sumup_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_s8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.s8 = npyv_sumup_s8( arg.data.vs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_s8
}; return simd_arg_to_obj(&ret); }
1479#endif // sumup_sup
1480
1481/***************************
1482 * Math
1483 ***************************/
1484#if 0
1485#line 386
1486SIMD_IMPL_INTRIN_1(sqrt_s8, vs8, vs8)static PyObject *simd__intrin_sqrt_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_s8", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs8 = npyv_sqrt_s8( arg.data.vs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
1487
1488#line 386
1489SIMD_IMPL_INTRIN_1(recip_s8, vs8, vs8)static PyObject *simd__intrin_recip_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"recip_s8", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs8 = npyv_recip_s8( arg.data.vs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
1490
1491#line 386
1492SIMD_IMPL_INTRIN_1(abs_s8, vs8, vs8)static PyObject *simd__intrin_abs_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"abs_s8", simd_arg_converter, &arg )) return ((void*)0); simd_data
data = {.vs8 = npyv_abs_s8( arg.data.vs8 )}; simd_arg_free(&
arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }
; return simd_arg_to_obj(&ret); }
1493
1494#line 386
1495SIMD_IMPL_INTRIN_1(square_s8, vs8, vs8)static PyObject *simd__intrin_square_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&:"
"square_s8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs8 = npyv_square_s8( arg.data.vs8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs8
}; return simd_arg_to_obj(&ret); }
1496
1497#endif
1498
1499#line 393
1500SIMD_IMPL_INTRIN_2(max_s8, vs8, vs8, vs8)static PyObject *simd__intrin_max_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""max_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_max_s8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1501
1502#line 393
1503SIMD_IMPL_INTRIN_2(min_s8, vs8, vs8, vs8)static PyObject *simd__intrin_min_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""min_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_min_s8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1504
1505
1506#if 0
1507#line 400
1508SIMD_IMPL_INTRIN_2(maxp_s8, vs8, vs8, vs8)static PyObject *simd__intrin_maxp_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_maxp_s8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1509
1510#line 400
1511SIMD_IMPL_INTRIN_2(minp_s8, vs8, vs8, vs8)static PyObject *simd__intrin_minp_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_s8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs8 = npyv_minp_s8( arg1.data.vs8, arg2
.data.vs8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs8 }; return
simd_arg_to_obj(&ret); }
1512
1513#endif
1514
1515/***************************
1516 * Mask operations
1517 ***************************/
1518#line 410
1519 SIMD_IMPL_INTRIN_4(ifadd_s8, vs8, vb8, vs8, vs8, vs8)static PyObject *simd__intrin_ifadd_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; simd_arg arg3 = {.dtype = simd_data_vs8}; simd_arg arg4 = {
.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_s8", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs8 = npyv_ifadd_s8
( arg1.data.vb8, arg2.data.vs8, arg3.data.vs8, arg4.data.vs8 )
}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs8 }; return simd_arg_to_obj(&
ret); }
1520
1521#line 410
1522 SIMD_IMPL_INTRIN_4(ifsub_s8, vs8, vb8, vs8, vs8, vs8)static PyObject *simd__intrin_ifsub_s8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vs8
}; simd_arg arg3 = {.dtype = simd_data_vs8}; simd_arg arg4 = {
.dtype = simd_data_vs8}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_s8", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs8 = npyv_ifsub_s8
( arg1.data.vb8, arg2.data.vs8, arg3.data.vs8, arg4.data.vs8 )
}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs8 }; return simd_arg_to_obj(&
ret); }
1523
1524
1525#endif // simd_sup
1526
1527#line 34
1528#if 1
1529/***************************
1530 * Memory
1531 ***************************/
1532#line 41
1533SIMD_IMPL_INTRIN_1(load_u16, vu16, qu16)static PyObject *simd__intrin_load_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu16}; if (!PyArg_ParseTuple( args, "O&:"
"load_u16", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu16 = npyv_load_u16( arg.data.qu16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
1534
1535#line 41
1536SIMD_IMPL_INTRIN_1(loada_u16, vu16, qu16)static PyObject *simd__intrin_loada_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu16}; if (!PyArg_ParseTuple( args, "O&:"
"loada_u16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu16 = npyv_loada_u16( arg.data.qu16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
1537
1538#line 41
1539SIMD_IMPL_INTRIN_1(loads_u16, vu16, qu16)static PyObject *simd__intrin_loads_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu16}; if (!PyArg_ParseTuple( args, "O&:"
"loads_u16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu16 = npyv_loads_u16( arg.data.qu16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
1540
1541#line 41
1542SIMD_IMPL_INTRIN_1(loadl_u16, vu16, qu16)static PyObject *simd__intrin_loadl_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu16}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_u16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu16 = npyv_loadl_u16( arg.data.qu16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
1543
1544#line 46
1545// special definition due to the nature of store
1546static PyObject *
1547simd__intrin_store_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1548{
1549 simd_arg seq_arg = {.dtype = simd_data_qu16};
1550 simd_arg vec_arg = {.dtype = simd_data_vu16};
1551 if (!PyArg_ParseTuple(
1552 args, "O&O&:store_u16",
1553 simd_arg_converter, &seq_arg,
1554 simd_arg_converter, &vec_arg
1555 )) {
1556 return NULL((void*)0);
1557 }
1558 npyv_store_u16(seq_arg.data.qu16, vec_arg.data.vu16);
1559 // write-back
1560 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1561 simd_arg_free(&seq_arg);
1562 return NULL((void*)0);
1563 }
1564 simd_arg_free(&seq_arg);
1565 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1566}
1567
1568#line 46
1569// special definition due to the nature of storea
1570static PyObject *
1571simd__intrin_storea_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1572{
1573 simd_arg seq_arg = {.dtype = simd_data_qu16};
1574 simd_arg vec_arg = {.dtype = simd_data_vu16};
1575 if (!PyArg_ParseTuple(
1576 args, "O&O&:storea_u16",
1577 simd_arg_converter, &seq_arg,
1578 simd_arg_converter, &vec_arg
1579 )) {
1580 return NULL((void*)0);
1581 }
1582 npyv_storea_u16(seq_arg.data.qu16, vec_arg.data.vu16);
1583 // write-back
1584 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1585 simd_arg_free(&seq_arg);
1586 return NULL((void*)0);
1587 }
1588 simd_arg_free(&seq_arg);
1589 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1590}
1591
1592#line 46
1593// special definition due to the nature of stores
1594static PyObject *
1595simd__intrin_stores_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1596{
1597 simd_arg seq_arg = {.dtype = simd_data_qu16};
1598 simd_arg vec_arg = {.dtype = simd_data_vu16};
1599 if (!PyArg_ParseTuple(
1600 args, "O&O&:stores_u16",
1601 simd_arg_converter, &seq_arg,
1602 simd_arg_converter, &vec_arg
1603 )) {
1604 return NULL((void*)0);
1605 }
1606 npyv_stores_u16(seq_arg.data.qu16, vec_arg.data.vu16);
1607 // write-back
1608 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1609 simd_arg_free(&seq_arg);
1610 return NULL((void*)0);
1611 }
1612 simd_arg_free(&seq_arg);
1613 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1614}
1615
1616#line 46
1617// special definition due to the nature of storel
1618static PyObject *
1619simd__intrin_storel_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1620{
1621 simd_arg seq_arg = {.dtype = simd_data_qu16};
1622 simd_arg vec_arg = {.dtype = simd_data_vu16};
1623 if (!PyArg_ParseTuple(
1624 args, "O&O&:storel_u16",
1625 simd_arg_converter, &seq_arg,
1626 simd_arg_converter, &vec_arg
1627 )) {
1628 return NULL((void*)0);
1629 }
1630 npyv_storel_u16(seq_arg.data.qu16, vec_arg.data.vu16);
1631 // write-back
1632 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1633 simd_arg_free(&seq_arg);
1634 return NULL((void*)0);
1635 }
1636 simd_arg_free(&seq_arg);
1637 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1638}
1639
1640#line 46
1641// special definition due to the nature of storeh
1642static PyObject *
1643simd__intrin_storeh_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1644{
1645 simd_arg seq_arg = {.dtype = simd_data_qu16};
1646 simd_arg vec_arg = {.dtype = simd_data_vu16};
1647 if (!PyArg_ParseTuple(
1648 args, "O&O&:storeh_u16",
1649 simd_arg_converter, &seq_arg,
1650 simd_arg_converter, &vec_arg
1651 )) {
1652 return NULL((void*)0);
1653 }
1654 npyv_storeh_u16(seq_arg.data.qu16, vec_arg.data.vu16);
1655 // write-back
1656 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1657 simd_arg_free(&seq_arg);
1658 return NULL((void*)0);
1659 }
1660 simd_arg_free(&seq_arg);
1661 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1662}
1663
1664
1665/****************************************
1666 * Non-contiguous/Partial Memory access
1667 ****************************************/
1668#if 0
1669// Partial Load
1670SIMD_IMPL_INTRIN_3(load_till_u16, vu16, qu16, u32, u16)static PyObject *simd__intrin_load_till_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu16}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_u16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu16 = npyv_load_till_u16
( arg1.data.qu16, arg2.data.u32, arg3.data.u16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
1671SIMD_IMPL_INTRIN_2(load_tillz_u16, vu16, qu16, u32)static PyObject *simd__intrin_load_tillz_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu16}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_u16"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu16 = npyv_load_tillz_u16
( arg1.data.qu16, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
1672
1673// Partial Store
1674static PyObject *
1675simd__intrin_store_till_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1676{
1677 simd_arg seq_arg = {.dtype = simd_data_qu16};
1678 simd_arg nlane_arg = {.dtype = simd_data_u32};
1679 simd_arg vec_arg = {.dtype = simd_data_vu16};
1680 if (!PyArg_ParseTuple(
1681 args, "O&O&O&:store_till_u16",
1682 simd_arg_converter, &seq_arg,
1683 simd_arg_converter, &nlane_arg,
1684 simd_arg_converter, &vec_arg
1685 )) {
1686 return NULL((void*)0);
1687 }
1688 npyv_store_till_u16(
1689 seq_arg.data.qu16, nlane_arg.data.u32, vec_arg.data.vu16
1690 );
1691 // write-back
1692 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1693 simd_arg_free(&seq_arg);
1694 return NULL((void*)0);
1695 }
1696 simd_arg_free(&seq_arg);
1697 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1698}
1699
1700// Non-contiguous Load
1701#line 112
1702static PyObject *
1703simd__intrin_loadn_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1704{
1705 simd_arg seq_arg = {.dtype = simd_data_qu16};
1706 simd_arg stride_arg = {.dtype = simd_data_s64};
1707#if 0
1708 simd_arg nlane_arg = {.dtype = simd_data_u32};
1709#endif // till
1710#if 0
1711 simd_arg fill_arg = {.dtype = simd_data_u16};
1712#endif
1713 if (!PyArg_ParseTuple(
1714 args, "O&O&:loadn_u16",
1715 simd_arg_converter, &seq_arg,
1716 simd_arg_converter, &stride_arg
1717#if 0
1718 ,simd_arg_converter, &nlane_arg
1719#endif
1720#if 0
1721 ,simd_arg_converter, &fill_arg
1722#endif
1723 )) {
1724 return NULL((void*)0);
1725 }
1726 npyv_lanetype_u16 *seq_ptr = seq_arg.data.qu16;
1727 npy_intp stride = (npy_intp)stride_arg.data.s64;
1728 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1729 Py_ssize_t min_seq_len = stride * npyv_nlanes_u168;
1730 if (stride < 0) {
1731 seq_ptr += cur_seq_len -1;
1732 min_seq_len = -min_seq_len;
1733 }
1734 if (cur_seq_len < min_seq_len) {
1735 PyErr_Format(PyExc_ValueError,
1736 "loadn_u16(), according to provided stride %d, the "
1737 "minimum acceptable size of the required sequence is %d, given(%d)",
1738 stride, min_seq_len, cur_seq_len
1739 );
1740 goto err;
1741 }
1742 npyv_u16 rvec = npyv_loadn_u16(
1743 seq_ptr, stride
1744 #if 0
1745 , nlane_arg.data.u32
1746 #endif
1747 #if 0
1748 , fill_arg.data.u16
1749 #endif
1750 );
1751 simd_arg ret = {
1752 .dtype = simd_data_vu16, .data = {.vu16=rvec}
1753 };
1754 simd_arg_free(&seq_arg);
1755 return simd_arg_to_obj(&ret);
1756err:
1757 simd_arg_free(&seq_arg);
1758 return NULL((void*)0);
1759}
1760
1761#line 112
1762static PyObject *
1763simd__intrin_loadn_till_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1764{
1765 simd_arg seq_arg = {.dtype = simd_data_qu16};
1766 simd_arg stride_arg = {.dtype = simd_data_s64};
1767#if 1
1768 simd_arg nlane_arg = {.dtype = simd_data_u32};
1769#endif // till
1770#if 1
1771 simd_arg fill_arg = {.dtype = simd_data_u16};
1772#endif
1773 if (!PyArg_ParseTuple(
1774 args, "O&O&O&O&:loadn_till_u16",
1775 simd_arg_converter, &seq_arg,
1776 simd_arg_converter, &stride_arg
1777#if 1
1778 ,simd_arg_converter, &nlane_arg
1779#endif
1780#if 1
1781 ,simd_arg_converter, &fill_arg
1782#endif
1783 )) {
1784 return NULL((void*)0);
1785 }
1786 npyv_lanetype_u16 *seq_ptr = seq_arg.data.qu16;
1787 npy_intp stride = (npy_intp)stride_arg.data.s64;
1788 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1789 Py_ssize_t min_seq_len = stride * npyv_nlanes_u168;
1790 if (stride < 0) {
1791 seq_ptr += cur_seq_len -1;
1792 min_seq_len = -min_seq_len;
1793 }
1794 if (cur_seq_len < min_seq_len) {
1795 PyErr_Format(PyExc_ValueError,
1796 "loadn_till_u16(), according to provided stride %d, the "
1797 "minimum acceptable size of the required sequence is %d, given(%d)",
1798 stride, min_seq_len, cur_seq_len
1799 );
1800 goto err;
1801 }
1802 npyv_u16 rvec = npyv_loadn_till_u16(
1803 seq_ptr, stride
1804 #if 1
1805 , nlane_arg.data.u32
1806 #endif
1807 #if 1
1808 , fill_arg.data.u16
1809 #endif
1810 );
1811 simd_arg ret = {
1812 .dtype = simd_data_vu16, .data = {.vu16=rvec}
1813 };
1814 simd_arg_free(&seq_arg);
1815 return simd_arg_to_obj(&ret);
1816err:
1817 simd_arg_free(&seq_arg);
1818 return NULL((void*)0);
1819}
1820
1821#line 112
1822static PyObject *
1823simd__intrin_loadn_tillz_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1824{
1825 simd_arg seq_arg = {.dtype = simd_data_qu16};
1826 simd_arg stride_arg = {.dtype = simd_data_s64};
1827#if 1
1828 simd_arg nlane_arg = {.dtype = simd_data_u32};
1829#endif // till
1830#if 0
1831 simd_arg fill_arg = {.dtype = simd_data_u16};
1832#endif
1833 if (!PyArg_ParseTuple(
1834 args, "O&O&O&:loadn_tillz_u16",
1835 simd_arg_converter, &seq_arg,
1836 simd_arg_converter, &stride_arg
1837#if 1
1838 ,simd_arg_converter, &nlane_arg
1839#endif
1840#if 0
1841 ,simd_arg_converter, &fill_arg
1842#endif
1843 )) {
1844 return NULL((void*)0);
1845 }
1846 npyv_lanetype_u16 *seq_ptr = seq_arg.data.qu16;
1847 npy_intp stride = (npy_intp)stride_arg.data.s64;
1848 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1849 Py_ssize_t min_seq_len = stride * npyv_nlanes_u168;
1850 if (stride < 0) {
1851 seq_ptr += cur_seq_len -1;
1852 min_seq_len = -min_seq_len;
1853 }
1854 if (cur_seq_len < min_seq_len) {
1855 PyErr_Format(PyExc_ValueError,
1856 "loadn_tillz_u16(), according to provided stride %d, the "
1857 "minimum acceptable size of the required sequence is %d, given(%d)",
1858 stride, min_seq_len, cur_seq_len
1859 );
1860 goto err;
1861 }
1862 npyv_u16 rvec = npyv_loadn_tillz_u16(
1863 seq_ptr, stride
1864 #if 1
1865 , nlane_arg.data.u32
1866 #endif
1867 #if 0
1868 , fill_arg.data.u16
1869 #endif
1870 );
1871 simd_arg ret = {
1872 .dtype = simd_data_vu16, .data = {.vu16=rvec}
1873 };
1874 simd_arg_free(&seq_arg);
1875 return simd_arg_to_obj(&ret);
1876err:
1877 simd_arg_free(&seq_arg);
1878 return NULL((void*)0);
1879}
1880
1881
1882// Non-contiguous Store
1883#line 178
1884static PyObject *
1885simd__intrin_storen_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1886{
1887 simd_arg seq_arg = {.dtype = simd_data_qu16};
1888 simd_arg stride_arg = {.dtype = simd_data_s64};
1889 simd_arg vec_arg = {.dtype = simd_data_vu16};
1890#if 0
1891 simd_arg nlane_arg = {.dtype = simd_data_u32};
1892#endif
1893 if (!PyArg_ParseTuple(
1894 args, "O&O&O&:storen_u16",
1895 simd_arg_converter, &seq_arg,
1896 simd_arg_converter, &stride_arg
1897#if 0
1898 ,simd_arg_converter, &nlane_arg
1899#endif
1900 ,simd_arg_converter, &vec_arg
1901 )) {
1902 return NULL((void*)0);
1903 }
1904 npyv_lanetype_u16 *seq_ptr = seq_arg.data.qu16;
1905 npy_intp stride = (npy_intp)stride_arg.data.s64;
1906 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1907 Py_ssize_t min_seq_len = stride * npyv_nlanes_u168;
1908 if (stride < 0) {
1909 seq_ptr += cur_seq_len -1;
1910 min_seq_len = -min_seq_len;
1911 }
1912 // overflow guard
1913 if (cur_seq_len < min_seq_len) {
1914 PyErr_Format(PyExc_ValueError,
1915 "storen_u16(), according to provided stride %d, the"
1916 "minimum acceptable size of the required sequence is %d, given(%d)",
1917 stride, min_seq_len, cur_seq_len
1918 );
1919 goto err;
1920 }
1921 npyv_storen_u16(
1922 seq_ptr, stride
1923 #if 0
1924 ,nlane_arg.data.u32
1925 #endif
1926 ,vec_arg.data.vu16
1927 );
1928 // write-back
1929 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1930 goto err;
1931 }
1932 simd_arg_free(&seq_arg);
1933 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1934err:
1935 simd_arg_free(&seq_arg);
1936 return NULL((void*)0);
1937}
1938
1939#line 178
1940static PyObject *
1941simd__intrin_storen_till_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
1942{
1943 simd_arg seq_arg = {.dtype = simd_data_qu16};
1944 simd_arg stride_arg = {.dtype = simd_data_s64};
1945 simd_arg vec_arg = {.dtype = simd_data_vu16};
1946#if 1
1947 simd_arg nlane_arg = {.dtype = simd_data_u32};
1948#endif
1949 if (!PyArg_ParseTuple(
1950 args, "O&O&O&O&:storen_u16",
1951 simd_arg_converter, &seq_arg,
1952 simd_arg_converter, &stride_arg
1953#if 1
1954 ,simd_arg_converter, &nlane_arg
1955#endif
1956 ,simd_arg_converter, &vec_arg
1957 )) {
1958 return NULL((void*)0);
1959 }
1960 npyv_lanetype_u16 *seq_ptr = seq_arg.data.qu16;
1961 npy_intp stride = (npy_intp)stride_arg.data.s64;
1962 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
1963 Py_ssize_t min_seq_len = stride * npyv_nlanes_u168;
1964 if (stride < 0) {
1965 seq_ptr += cur_seq_len -1;
1966 min_seq_len = -min_seq_len;
1967 }
1968 // overflow guard
1969 if (cur_seq_len < min_seq_len) {
1970 PyErr_Format(PyExc_ValueError,
1971 "storen_till_u16(), according to provided stride %d, the"
1972 "minimum acceptable size of the required sequence is %d, given(%d)",
1973 stride, min_seq_len, cur_seq_len
1974 );
1975 goto err;
1976 }
1977 npyv_storen_till_u16(
1978 seq_ptr, stride
1979 #if 1
1980 ,nlane_arg.data.u32
1981 #endif
1982 ,vec_arg.data.vu16
1983 );
1984 // write-back
1985 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu16, simd_data_qu16)) {
1986 goto err;
1987 }
1988 simd_arg_free(&seq_arg);
1989 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
1990err:
1991 simd_arg_free(&seq_arg);
1992 return NULL((void*)0);
1993}
1994
1995#endif // 0
1996
1997/***************************
1998 * Misc
1999 ***************************/
2000SIMD_IMPL_INTRIN_0(zero_u16, vu16)static PyObject *simd__intrin_zero_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_u16") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vu16, .data = {.vu16 = _mm_setzero_si128()}
, }; return simd_arg_to_obj(&a); }
2001SIMD_IMPL_INTRIN_1(setall_u16, vu16, u16)static PyObject *simd__intrin_setall_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u16}; if (!PyArg_ParseTuple( args, "O&:"
"setall_u16", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vu16 = _mm_set1_epi16((short)(arg.data
.u16))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
2002SIMD_IMPL_INTRIN_3(select_u16, vu16, vb16, vu16, vu16)static PyObject *simd__intrin_select_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; simd_arg arg3 = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu16 = npyv_select_u8
( arg1.data.vb16, arg2.data.vu16, arg3.data.vu16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2003
2004#line 246
2005#if 1
2006SIMD_IMPL_INTRIN_1(reinterpret_u8_u16, vu8, vu16)static PyObject *simd__intrin_reinterpret_u8_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = arg.data.vu16
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu8 }; return simd_arg_to_obj(&ret); }
2007#endif // simd_sup2
2008
2009#line 246
2010#if 1
2011SIMD_IMPL_INTRIN_1(reinterpret_s8_u16, vs8, vu16)static PyObject *simd__intrin_reinterpret_s8_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = arg.data.vu16
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs8 }; return simd_arg_to_obj(&ret); }
2012#endif // simd_sup2
2013
2014#line 246
2015#if 1
2016SIMD_IMPL_INTRIN_1(reinterpret_u16_u16, vu16, vu16)static PyObject *simd__intrin_reinterpret_u16_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vu16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
2017#endif // simd_sup2
2018
2019#line 246
2020#if 1
2021SIMD_IMPL_INTRIN_1(reinterpret_s16_u16, vs16, vu16)static PyObject *simd__intrin_reinterpret_s16_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vu16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
2022#endif // simd_sup2
2023
2024#line 246
2025#if 1
2026SIMD_IMPL_INTRIN_1(reinterpret_u32_u16, vu32, vu16)static PyObject *simd__intrin_reinterpret_u32_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vu16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret)
; }
2027#endif // simd_sup2
2028
2029#line 246
2030#if 1
2031SIMD_IMPL_INTRIN_1(reinterpret_s32_u16, vs32, vu16)static PyObject *simd__intrin_reinterpret_s32_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vu16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret)
; }
2032#endif // simd_sup2
2033
2034#line 246
2035#if 1
2036SIMD_IMPL_INTRIN_1(reinterpret_u64_u16, vu64, vu16)static PyObject *simd__intrin_reinterpret_u64_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vu16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret)
; }
2037#endif // simd_sup2
2038
2039#line 246
2040#if 1
2041SIMD_IMPL_INTRIN_1(reinterpret_s64_u16, vs64, vu16)static PyObject *simd__intrin_reinterpret_s64_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vu16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret)
; }
2042#endif // simd_sup2
2043
2044#line 246
2045#if 1
2046SIMD_IMPL_INTRIN_1(reinterpret_f32_u16, vf32, vu16)static PyObject *simd__intrin_reinterpret_f32_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vu16 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
2047#endif // simd_sup2
2048
2049#line 246
2050#if NPY_SIMD_F641
2051SIMD_IMPL_INTRIN_1(reinterpret_f64_u16, vf64, vu16)static PyObject *simd__intrin_reinterpret_f64_u16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_u16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vu16 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
2052#endif // simd_sup2
2053
2054
2055/**
2056 * special definition due to the nature of intrinsics
2057 * npyv_setf_u16 and npy_set_u16.
2058*/
2059#line 258
2060static PyObject *
2061simd__intrin_setf_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2062{
2063 npyv_lanetype_u16 *data = simd_sequence_from_iterable(args, simd_data_qu16, npyv_nlanes_u168);
2064 if (data == NULL((void*)0)) {
2065 return NULL((void*)0);
2066 }
2067 simd_data r = {.vu16 = npyv_setf_u16(npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2068 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2069 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2070 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2071 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2072 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2073 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2074 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2075 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2076 data[64] // for setfnpyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2077 )npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
};
2078 simd_sequence_free(data);
2079 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu16);
2080}
2081
2082#line 258
2083static PyObject *
2084simd__intrin_set_u16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2085{
2086 npyv_lanetype_u16 *data = simd_sequence_from_iterable(args, simd_data_qu16, npyv_nlanes_u168);
2087 if (data == NULL((void*)0)) {
2088 return NULL((void*)0);
2089 }
2090 simd_data r = {.vu16 = npyv_set_u16(npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2091 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2092 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2093 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2094 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2095 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2096 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2097 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2098 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2099 data[64] // for setfnpyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2100 )npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
};
2101 simd_sequence_free(data);
2102 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu16);
2103}
2104
2105
2106/***************************
2107 * Reorder
2108 ***************************/
2109#line 287
2110SIMD_IMPL_INTRIN_2(combinel_u16, vu16, vu16, vu16)static PyObject *simd__intrin_combinel_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_u16"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu16 = _mm_unpacklo_epi64
( arg1.data.vu16, arg2.data.vu16 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
2111
2112#line 287
2113SIMD_IMPL_INTRIN_2(combineh_u16, vu16, vu16, vu16)static PyObject *simd__intrin_combineh_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_u16"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu16 = _mm_unpackhi_epi64
( arg1.data.vu16, arg2.data.vu16 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
2114
2115
2116#line 293
2117SIMD_IMPL_INTRIN_2(combine_u16, vu16x2, vu16, vu16)static PyObject *simd__intrin_combine_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_u16",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu16x2 = npyv__combine
( arg1.data.vu16, arg2.data.vu16 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu16x2 }; return simd_arg_to_obj(&ret); }
2118
2119#line 293
2120SIMD_IMPL_INTRIN_2(zip_u16, vu16x2, vu16, vu16)static PyObject *simd__intrin_zip_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16x2 = npyv_zip_u16( arg1.data.vu16,
arg2.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu16x2
}; return simd_arg_to_obj(&ret); }
2121
2122
2123#if 1
2124SIMD_IMPL_INTRIN_1(rev64_u16, vu16, vu16)static PyObject *simd__intrin_rev64_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_u16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu16 = npyv_rev64_u16( arg.data.vu16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
2125#endif
2126
2127/***************************
2128 * Operators
2129 ***************************/
2130#if 15 > 0
2131SIMD_IMPL_INTRIN_2(shl_u16, vu16, vu16, u8)static PyObject *simd__intrin_shl_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_sll_epi16(arg1.data.vu16, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2132SIMD_IMPL_INTRIN_2(shr_u16, vu16, vu16, u8)static PyObject *simd__intrin_shr_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_srl_epi16(arg1.data.vu16, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2133// immediate constant
2134SIMD_IMPL_INTRIN_2IMM(shli_u16, vu16, vu16, 15)static PyObject *simd__intrin_shli_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu16 = 0 == arg2.data.u8
? _mm_slli_epi16(arg1.data.vu16, 0) : 1 == arg2.data.u8 ? _mm_slli_epi16
(arg1.data.vu16, 1) : 2 == arg2.data.u8 ? _mm_slli_epi16(arg1
.data.vu16, 2) : 3 == arg2.data.u8 ? _mm_slli_epi16(arg1.data
.vu16, 3) : 4 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vu16
, 4) : 5 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vu16, 5) :
6 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vu16, 6) : 7 ==
arg2.data.u8 ? _mm_slli_epi16(arg1.data.vu16, 7) : 8 == arg2
.data.u8 ? _mm_slli_epi16(arg1.data.vu16, 8) : 9 == arg2.data
.u8 ? _mm_slli_epi16(arg1.data.vu16, 9) : 10 == arg2.data.u8 ?
_mm_slli_epi16(arg1.data.vu16, 10) : 11 == arg2.data.u8 ? _mm_slli_epi16
(arg1.data.vu16, 11) : 12 == arg2.data.u8 ? _mm_slli_epi16(arg1
.data.vu16, 12) : 13 == arg2.data.u8 ? _mm_slli_epi16(arg1.data
.vu16, 13) : 14 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vu16
, 14) : 15 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vu16, 15
) : data.vu16; simd_arg_free(&arg1); simd_arg ret = { .data
= data, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&
ret); }
2135SIMD_IMPL_INTRIN_2IMM(shri_u16, vu16, vu16, 16)static PyObject *simd__intrin_shri_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu16 = 1 == arg2.data.u8
? _mm_srli_epi16(arg1.data.vu16, 1) : 2 == arg2.data.u8 ? _mm_srli_epi16
(arg1.data.vu16, 2) : 3 == arg2.data.u8 ? _mm_srli_epi16(arg1
.data.vu16, 3) : 4 == arg2.data.u8 ? _mm_srli_epi16(arg1.data
.vu16, 4) : 5 == arg2.data.u8 ? _mm_srli_epi16(arg1.data.vu16
, 5) : 6 == arg2.data.u8 ? _mm_srli_epi16(arg1.data.vu16, 6) :
7 == arg2.data.u8 ? _mm_srli_epi16(arg1.data.vu16, 7) : 8 ==
arg2.data.u8 ? _mm_srli_epi16(arg1.data.vu16, 8) : 9 == arg2
.data.u8 ? _mm_srli_epi16(arg1.data.vu16, 9) : 10 == arg2.data
.u8 ? _mm_srli_epi16(arg1.data.vu16, 10) : 11 == arg2.data.u8
? _mm_srli_epi16(arg1.data.vu16, 11) : 12 == arg2.data.u8 ? _mm_srli_epi16
(arg1.data.vu16, 12) : 13 == arg2.data.u8 ? _mm_srli_epi16(arg1
.data.vu16, 13) : 14 == arg2.data.u8 ? _mm_srli_epi16(arg1.data
.vu16, 14) : 15 == arg2.data.u8 ? _mm_srli_epi16(arg1.data.vu16
, 15) : 16 == arg2.data.u8 ? _mm_srli_epi16(arg1.data.vu16, 16
) : data.vu16; simd_arg_free(&arg1); simd_arg ret = { .data
= data, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&
ret); }
2136#endif // shl_imm
2137
2138#line 314
2139SIMD_IMPL_INTRIN_2(and_u16, vu16, vu16, vu16)static PyObject *simd__intrin_and_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""and_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_and_si128( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2140
2141#line 314
2142SIMD_IMPL_INTRIN_2(or_u16, vu16, vu16, vu16)static PyObject *simd__intrin_or_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""or_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_or_si128( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2143
2144#line 314
2145SIMD_IMPL_INTRIN_2(xor_u16, vu16, vu16, vu16)static PyObject *simd__intrin_xor_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_xor_si128( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2146
2147
2148SIMD_IMPL_INTRIN_1(not_u16, vu16, vu16)static PyObject *simd__intrin_not_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"not_u16", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu16 = _mm_xor_si128(arg.data.vu16, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
2149
2150#line 322
2151SIMD_IMPL_INTRIN_2(cmpeq_u16, vb16, vu16, vu16)static PyObject *simd__intrin_cmpeq_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_cmpeq_epi16( arg1.data.vu16
, arg2.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb16
}; return simd_arg_to_obj(&ret); }
2152
2153#line 322
2154SIMD_IMPL_INTRIN_2(cmpneq_u16, vb16, vu16, vu16)static PyObject *simd__intrin_cmpneq_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_xor_si128(_mm_cmpeq_epi16(arg1
.data.vu16, arg2.data.vu16), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb16 }; return simd_arg_to_obj(&
ret); }
2155
2156#line 322
2157SIMD_IMPL_INTRIN_2(cmpgt_u16, vb16, vu16, vu16)static PyObject *simd__intrin_cmpgt_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = npyv_cmpgt_u16( arg1.data.vu16,
arg2.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb16
}; return simd_arg_to_obj(&ret); }
2158
2159#line 322
2160SIMD_IMPL_INTRIN_2(cmpge_u16, vb16, vu16, vu16)static PyObject *simd__intrin_cmpge_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_cmpeq_epi16(_mm_subs_epu16(
arg2.data.vu16, arg1.data.vu16), _mm_setzero_si128())}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb16 }; return simd_arg_to_obj(&
ret); }
2161
2162#line 322
2163SIMD_IMPL_INTRIN_2(cmplt_u16, vb16, vu16, vu16)static PyObject *simd__intrin_cmplt_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = npyv_cmpgt_u16(arg2.data.vu16, arg1
.data.vu16)}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb16 }; return
simd_arg_to_obj(&ret); }
2164
2165#line 322
2166SIMD_IMPL_INTRIN_2(cmple_u16, vb16, vu16, vu16)static PyObject *simd__intrin_cmple_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_cmpeq_epi16(_mm_subs_epu16(
arg1.data.vu16, arg2.data.vu16), _mm_setzero_si128())}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb16 }; return simd_arg_to_obj(&
ret); }
2167
2168
2169/***************************
2170 * Conversion
2171 ***************************/
2172SIMD_IMPL_INTRIN_1(cvt_u16_b16, vu16, vb16)static PyObject *simd__intrin_cvt_u16_b16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb16}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_u16_b16", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vu16 = arg.data.vb16}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2173SIMD_IMPL_INTRIN_1(cvt_b16_u16, vb16, vu16)static PyObject *simd__intrin_cvt_b16_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b16_u16", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb16 = arg.data.vu16}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vb16
}; return simd_arg_to_obj(&ret); }
2174#if 1
2175SIMD_IMPL_INTRIN_1(expand_u32_u16, vu32x2, vu16)static PyObject *simd__intrin_expand_u32_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"expand_u32_u16", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vu32x2 = npyv_expand_u32_u16( arg.data
.vu16 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32x2 }; return simd_arg_to_obj(&ret
); }
2176#endif // expand_sup
2177/***************************
2178 * Arithmetic
2179 ***************************/
2180#line 339
2181SIMD_IMPL_INTRIN_2(add_u16, vu16, vu16, vu16)static PyObject *simd__intrin_add_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""add_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_add_epi16( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2182
2183#line 339
2184SIMD_IMPL_INTRIN_2(sub_u16, vu16, vu16, vu16)static PyObject *simd__intrin_sub_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_sub_epi16( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2185
2186
2187#if 1
2188#line 346
2189SIMD_IMPL_INTRIN_2(adds_u16, vu16, vu16, vu16)static PyObject *simd__intrin_adds_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_adds_epu16( arg1.data.vu16,
arg2.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2190
2191#line 346
2192SIMD_IMPL_INTRIN_2(subs_u16, vu16, vu16, vu16)static PyObject *simd__intrin_subs_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_subs_epu16( arg1.data.vu16,
arg2.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2193
2194#endif // sat_sup
2195
2196#if 1
2197SIMD_IMPL_INTRIN_2(mul_u16, vu16, vu16, vu16)static PyObject *simd__intrin_mul_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = _mm_mullo_epi16( arg1.data.vu16
, arg2.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2198#endif // mul_sup
2199
2200#if 0
2201SIMD_IMPL_INTRIN_2(div_u16, vu16, vu16, vu16)static PyObject *simd__intrin_div_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""div_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = npyv_div_u16( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2202#endif // div_sup
2203
2204#if 1
2205SIMD_IMPL_INTRIN_1(divisor_u16, vu16x3, u16)static PyObject *simd__intrin_divisor_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u16}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_u16", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vu16x3 = npyv_divisor_u16( arg.data.u16
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vu16x3 }; return simd_arg_to_obj(&ret);
}
2206SIMD_IMPL_INTRIN_2(divc_u16, vu16, vu16, vu16x3)static PyObject *simd__intrin_divc_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = npyv_divc_u16( arg1.data.vu16, arg2
.data.vu16x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2207#endif // intdiv_sup
2208
2209#if 0
2210#line 367
2211SIMD_IMPL_INTRIN_3(muladd_u16, vu16, vu16, vu16, vu16)static PyObject *simd__intrin_muladd_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; simd_arg arg3 = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu16 = npyv_muladd_u16
( arg1.data.vu16, arg2.data.vu16, arg3.data.vu16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2212
2213#line 367
2214SIMD_IMPL_INTRIN_3(mulsub_u16, vu16, vu16, vu16, vu16)static PyObject *simd__intrin_mulsub_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; simd_arg arg3 = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu16 = npyv_mulsub_u16
( arg1.data.vu16, arg2.data.vu16, arg3.data.vu16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2215
2216#line 367
2217SIMD_IMPL_INTRIN_3(nmuladd_u16, vu16, vu16, vu16, vu16)static PyObject *simd__intrin_nmuladd_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; simd_arg arg3 = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu16 = npyv_nmuladd_u16
( arg1.data.vu16, arg2.data.vu16, arg3.data.vu16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2218
2219#line 367
2220SIMD_IMPL_INTRIN_3(nmulsub_u16, vu16, vu16, vu16, vu16)static PyObject *simd__intrin_nmulsub_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; simd_arg arg3 = {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu16 = npyv_nmulsub_u16
( arg1.data.vu16, arg2.data.vu16, arg3.data.vu16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2221
2222#endif // fused_sup
2223
2224#if 0
2225SIMD_IMPL_INTRIN_1(sum_u16, u16, vu16)static PyObject *simd__intrin_sum_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"sum_u16", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.u16 = npyv_sum_u16( arg.data.vu16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_u16
}; return simd_arg_to_obj(&ret); }
2226#endif // sum_sup
2227
2228#if 1
2229SIMD_IMPL_INTRIN_1(sumup_u16, u32, vu16)static PyObject *simd__intrin_sumup_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_u16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.u32 = npyv_sumup_u16( arg.data.vu16 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_u32 }; return simd_arg_to_obj(&ret); }
2230#endif // sumup_sup
2231
2232/***************************
2233 * Math
2234 ***************************/
2235#if 0
2236#line 386
2237SIMD_IMPL_INTRIN_1(sqrt_u16, vu16, vu16)static PyObject *simd__intrin_sqrt_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_u16", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu16 = npyv_sqrt_u16( arg.data.vu16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2238
2239#line 386
2240SIMD_IMPL_INTRIN_1(recip_u16, vu16, vu16)static PyObject *simd__intrin_recip_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"recip_u16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu16 = npyv_recip_u16( arg.data.vu16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
2241
2242#line 386
2243SIMD_IMPL_INTRIN_1(abs_u16, vu16, vu16)static PyObject *simd__intrin_abs_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"abs_u16", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu16 = npyv_abs_u16( arg.data.vu16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu16
}; return simd_arg_to_obj(&ret); }
2244
2245#line 386
2246SIMD_IMPL_INTRIN_1(square_u16, vu16, vu16)static PyObject *simd__intrin_square_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&:"
"square_u16", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vu16 = npyv_square_u16( arg.data.vu16 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu16 }; return simd_arg_to_obj(&ret); }
2247
2248#endif
2249
2250#line 393
2251SIMD_IMPL_INTRIN_2(max_u16, vu16, vu16, vu16)static PyObject *simd__intrin_max_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""max_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = npyv_max_u16( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2252
2253#line 393
2254SIMD_IMPL_INTRIN_2(min_u16, vu16, vu16, vu16)static PyObject *simd__intrin_min_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""min_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = npyv_min_u16( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2255
2256
2257#if 0
2258#line 400
2259SIMD_IMPL_INTRIN_2(maxp_u16, vu16, vu16, vu16)static PyObject *simd__intrin_maxp_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = npyv_maxp_u16( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2260
2261#line 400
2262SIMD_IMPL_INTRIN_2(minp_u16, vu16, vu16, vu16)static PyObject *simd__intrin_minp_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_u16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu16 = npyv_minp_u16( arg1.data.vu16, arg2
.data.vu16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu16 }; return
simd_arg_to_obj(&ret); }
2263
2264#endif
2265
2266/***************************
2267 * Mask operations
2268 ***************************/
2269#line 410
2270 SIMD_IMPL_INTRIN_4(ifadd_u16, vu16, vb16, vu16, vu16, vu16)static PyObject *simd__intrin_ifadd_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; simd_arg arg3 = {.dtype = simd_data_vu16}; simd_arg arg4 =
{.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_u16", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu16 = npyv_ifadd_u16
( arg1.data.vb16, arg2.data.vu16, arg3.data.vu16, arg4.data.vu16
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&
ret); }
2271
2272#line 410
2273 SIMD_IMPL_INTRIN_4(ifsub_u16, vu16, vb16, vu16, vu16, vu16)static PyObject *simd__intrin_ifsub_u16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vu16
}; simd_arg arg3 = {.dtype = simd_data_vu16}; simd_arg arg4 =
{.dtype = simd_data_vu16}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_u16", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu16 = npyv_ifsub_u16
( arg1.data.vb16, arg2.data.vu16, arg3.data.vu16, arg4.data.vu16
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&
ret); }
2274
2275
2276#endif // simd_sup
2277
2278#line 34
2279#if 1
2280/***************************
2281 * Memory
2282 ***************************/
2283#line 41
2284SIMD_IMPL_INTRIN_1(load_s16, vs16, qs16)static PyObject *simd__intrin_load_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs16}; if (!PyArg_ParseTuple( args, "O&:"
"load_s16", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs16 = npyv_load_s16( arg.data.qs16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2285
2286#line 41
2287SIMD_IMPL_INTRIN_1(loada_s16, vs16, qs16)static PyObject *simd__intrin_loada_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs16}; if (!PyArg_ParseTuple( args, "O&:"
"loada_s16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs16 = npyv_loada_s16( arg.data.qs16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2288
2289#line 41
2290SIMD_IMPL_INTRIN_1(loads_s16, vs16, qs16)static PyObject *simd__intrin_loads_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs16}; if (!PyArg_ParseTuple( args, "O&:"
"loads_s16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs16 = npyv_loads_s16( arg.data.qs16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2291
2292#line 41
2293SIMD_IMPL_INTRIN_1(loadl_s16, vs16, qs16)static PyObject *simd__intrin_loadl_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs16}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_s16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs16 = npyv_loadl_s16( arg.data.qs16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2294
2295#line 46
2296// special definition due to the nature of store
2297static PyObject *
2298simd__intrin_store_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2299{
2300 simd_arg seq_arg = {.dtype = simd_data_qs16};
2301 simd_arg vec_arg = {.dtype = simd_data_vs16};
2302 if (!PyArg_ParseTuple(
2303 args, "O&O&:store_s16",
2304 simd_arg_converter, &seq_arg,
2305 simd_arg_converter, &vec_arg
2306 )) {
2307 return NULL((void*)0);
2308 }
2309 npyv_store_s16(seq_arg.data.qs16, vec_arg.data.vs16);
2310 // write-back
2311 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2312 simd_arg_free(&seq_arg);
2313 return NULL((void*)0);
2314 }
2315 simd_arg_free(&seq_arg);
2316 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2317}
2318
2319#line 46
2320// special definition due to the nature of storea
2321static PyObject *
2322simd__intrin_storea_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2323{
2324 simd_arg seq_arg = {.dtype = simd_data_qs16};
2325 simd_arg vec_arg = {.dtype = simd_data_vs16};
2326 if (!PyArg_ParseTuple(
2327 args, "O&O&:storea_s16",
2328 simd_arg_converter, &seq_arg,
2329 simd_arg_converter, &vec_arg
2330 )) {
2331 return NULL((void*)0);
2332 }
2333 npyv_storea_s16(seq_arg.data.qs16, vec_arg.data.vs16);
2334 // write-back
2335 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2336 simd_arg_free(&seq_arg);
2337 return NULL((void*)0);
2338 }
2339 simd_arg_free(&seq_arg);
2340 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2341}
2342
2343#line 46
2344// special definition due to the nature of stores
2345static PyObject *
2346simd__intrin_stores_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2347{
2348 simd_arg seq_arg = {.dtype = simd_data_qs16};
2349 simd_arg vec_arg = {.dtype = simd_data_vs16};
2350 if (!PyArg_ParseTuple(
2351 args, "O&O&:stores_s16",
2352 simd_arg_converter, &seq_arg,
2353 simd_arg_converter, &vec_arg
2354 )) {
2355 return NULL((void*)0);
2356 }
2357 npyv_stores_s16(seq_arg.data.qs16, vec_arg.data.vs16);
2358 // write-back
2359 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2360 simd_arg_free(&seq_arg);
2361 return NULL((void*)0);
2362 }
2363 simd_arg_free(&seq_arg);
2364 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2365}
2366
2367#line 46
2368// special definition due to the nature of storel
2369static PyObject *
2370simd__intrin_storel_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2371{
2372 simd_arg seq_arg = {.dtype = simd_data_qs16};
2373 simd_arg vec_arg = {.dtype = simd_data_vs16};
2374 if (!PyArg_ParseTuple(
2375 args, "O&O&:storel_s16",
2376 simd_arg_converter, &seq_arg,
2377 simd_arg_converter, &vec_arg
2378 )) {
2379 return NULL((void*)0);
2380 }
2381 npyv_storel_s16(seq_arg.data.qs16, vec_arg.data.vs16);
2382 // write-back
2383 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2384 simd_arg_free(&seq_arg);
2385 return NULL((void*)0);
2386 }
2387 simd_arg_free(&seq_arg);
2388 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2389}
2390
2391#line 46
2392// special definition due to the nature of storeh
2393static PyObject *
2394simd__intrin_storeh_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2395{
2396 simd_arg seq_arg = {.dtype = simd_data_qs16};
2397 simd_arg vec_arg = {.dtype = simd_data_vs16};
2398 if (!PyArg_ParseTuple(
2399 args, "O&O&:storeh_s16",
2400 simd_arg_converter, &seq_arg,
2401 simd_arg_converter, &vec_arg
2402 )) {
2403 return NULL((void*)0);
2404 }
2405 npyv_storeh_s16(seq_arg.data.qs16, vec_arg.data.vs16);
2406 // write-back
2407 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2408 simd_arg_free(&seq_arg);
2409 return NULL((void*)0);
2410 }
2411 simd_arg_free(&seq_arg);
2412 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2413}
2414
2415
2416/****************************************
2417 * Non-contiguous/Partial Memory access
2418 ****************************************/
2419#if 0
2420// Partial Load
2421SIMD_IMPL_INTRIN_3(load_till_s16, vs16, qs16, u32, s16)static PyObject *simd__intrin_load_till_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs16}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_s16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs16 = npyv_load_till_s16
( arg1.data.qs16, arg2.data.u32, arg3.data.s16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2422SIMD_IMPL_INTRIN_2(load_tillz_s16, vs16, qs16, u32)static PyObject *simd__intrin_load_tillz_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs16}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_s16"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs16 = npyv_load_tillz_s16
( arg1.data.qs16, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2423
2424// Partial Store
2425static PyObject *
2426simd__intrin_store_till_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2427{
2428 simd_arg seq_arg = {.dtype = simd_data_qs16};
2429 simd_arg nlane_arg = {.dtype = simd_data_u32};
2430 simd_arg vec_arg = {.dtype = simd_data_vs16};
2431 if (!PyArg_ParseTuple(
2432 args, "O&O&O&:store_till_s16",
2433 simd_arg_converter, &seq_arg,
2434 simd_arg_converter, &nlane_arg,
2435 simd_arg_converter, &vec_arg
2436 )) {
2437 return NULL((void*)0);
2438 }
2439 npyv_store_till_s16(
2440 seq_arg.data.qs16, nlane_arg.data.u32, vec_arg.data.vs16
2441 );
2442 // write-back
2443 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2444 simd_arg_free(&seq_arg);
2445 return NULL((void*)0);
2446 }
2447 simd_arg_free(&seq_arg);
2448 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2449}
2450
2451// Non-contiguous Load
2452#line 112
2453static PyObject *
2454simd__intrin_loadn_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2455{
2456 simd_arg seq_arg = {.dtype = simd_data_qs16};
2457 simd_arg stride_arg = {.dtype = simd_data_s64};
2458#if 0
2459 simd_arg nlane_arg = {.dtype = simd_data_u32};
2460#endif // till
2461#if 0
2462 simd_arg fill_arg = {.dtype = simd_data_s16};
2463#endif
2464 if (!PyArg_ParseTuple(
2465 args, "O&O&:loadn_s16",
2466 simd_arg_converter, &seq_arg,
2467 simd_arg_converter, &stride_arg
2468#if 0
2469 ,simd_arg_converter, &nlane_arg
2470#endif
2471#if 0
2472 ,simd_arg_converter, &fill_arg
2473#endif
2474 )) {
2475 return NULL((void*)0);
2476 }
2477 npyv_lanetype_s16 *seq_ptr = seq_arg.data.qs16;
2478 npy_intp stride = (npy_intp)stride_arg.data.s64;
2479 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
2480 Py_ssize_t min_seq_len = stride * npyv_nlanes_s168;
2481 if (stride < 0) {
2482 seq_ptr += cur_seq_len -1;
2483 min_seq_len = -min_seq_len;
2484 }
2485 if (cur_seq_len < min_seq_len) {
2486 PyErr_Format(PyExc_ValueError,
2487 "loadn_s16(), according to provided stride %d, the "
2488 "minimum acceptable size of the required sequence is %d, given(%d)",
2489 stride, min_seq_len, cur_seq_len
2490 );
2491 goto err;
2492 }
2493 npyv_s16 rvec = npyv_loadn_s16(
2494 seq_ptr, stride
2495 #if 0
2496 , nlane_arg.data.u32
2497 #endif
2498 #if 0
2499 , fill_arg.data.s16
2500 #endif
2501 );
2502 simd_arg ret = {
2503 .dtype = simd_data_vs16, .data = {.vs16=rvec}
2504 };
2505 simd_arg_free(&seq_arg);
2506 return simd_arg_to_obj(&ret);
2507err:
2508 simd_arg_free(&seq_arg);
2509 return NULL((void*)0);
2510}
2511
2512#line 112
2513static PyObject *
2514simd__intrin_loadn_till_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2515{
2516 simd_arg seq_arg = {.dtype = simd_data_qs16};
2517 simd_arg stride_arg = {.dtype = simd_data_s64};
2518#if 1
2519 simd_arg nlane_arg = {.dtype = simd_data_u32};
2520#endif // till
2521#if 1
2522 simd_arg fill_arg = {.dtype = simd_data_s16};
2523#endif
2524 if (!PyArg_ParseTuple(
2525 args, "O&O&O&O&:loadn_till_s16",
2526 simd_arg_converter, &seq_arg,
2527 simd_arg_converter, &stride_arg
2528#if 1
2529 ,simd_arg_converter, &nlane_arg
2530#endif
2531#if 1
2532 ,simd_arg_converter, &fill_arg
2533#endif
2534 )) {
2535 return NULL((void*)0);
2536 }
2537 npyv_lanetype_s16 *seq_ptr = seq_arg.data.qs16;
2538 npy_intp stride = (npy_intp)stride_arg.data.s64;
2539 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
2540 Py_ssize_t min_seq_len = stride * npyv_nlanes_s168;
2541 if (stride < 0) {
2542 seq_ptr += cur_seq_len -1;
2543 min_seq_len = -min_seq_len;
2544 }
2545 if (cur_seq_len < min_seq_len) {
2546 PyErr_Format(PyExc_ValueError,
2547 "loadn_till_s16(), according to provided stride %d, the "
2548 "minimum acceptable size of the required sequence is %d, given(%d)",
2549 stride, min_seq_len, cur_seq_len
2550 );
2551 goto err;
2552 }
2553 npyv_s16 rvec = npyv_loadn_till_s16(
2554 seq_ptr, stride
2555 #if 1
2556 , nlane_arg.data.u32
2557 #endif
2558 #if 1
2559 , fill_arg.data.s16
2560 #endif
2561 );
2562 simd_arg ret = {
2563 .dtype = simd_data_vs16, .data = {.vs16=rvec}
2564 };
2565 simd_arg_free(&seq_arg);
2566 return simd_arg_to_obj(&ret);
2567err:
2568 simd_arg_free(&seq_arg);
2569 return NULL((void*)0);
2570}
2571
2572#line 112
2573static PyObject *
2574simd__intrin_loadn_tillz_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2575{
2576 simd_arg seq_arg = {.dtype = simd_data_qs16};
2577 simd_arg stride_arg = {.dtype = simd_data_s64};
2578#if 1
2579 simd_arg nlane_arg = {.dtype = simd_data_u32};
2580#endif // till
2581#if 0
2582 simd_arg fill_arg = {.dtype = simd_data_s16};
2583#endif
2584 if (!PyArg_ParseTuple(
2585 args, "O&O&O&:loadn_tillz_s16",
2586 simd_arg_converter, &seq_arg,
2587 simd_arg_converter, &stride_arg
2588#if 1
2589 ,simd_arg_converter, &nlane_arg
2590#endif
2591#if 0
2592 ,simd_arg_converter, &fill_arg
2593#endif
2594 )) {
2595 return NULL((void*)0);
2596 }
2597 npyv_lanetype_s16 *seq_ptr = seq_arg.data.qs16;
2598 npy_intp stride = (npy_intp)stride_arg.data.s64;
2599 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
2600 Py_ssize_t min_seq_len = stride * npyv_nlanes_s168;
2601 if (stride < 0) {
2602 seq_ptr += cur_seq_len -1;
2603 min_seq_len = -min_seq_len;
2604 }
2605 if (cur_seq_len < min_seq_len) {
2606 PyErr_Format(PyExc_ValueError,
2607 "loadn_tillz_s16(), according to provided stride %d, the "
2608 "minimum acceptable size of the required sequence is %d, given(%d)",
2609 stride, min_seq_len, cur_seq_len
2610 );
2611 goto err;
2612 }
2613 npyv_s16 rvec = npyv_loadn_tillz_s16(
2614 seq_ptr, stride
2615 #if 1
2616 , nlane_arg.data.u32
2617 #endif
2618 #if 0
2619 , fill_arg.data.s16
2620 #endif
2621 );
2622 simd_arg ret = {
2623 .dtype = simd_data_vs16, .data = {.vs16=rvec}
2624 };
2625 simd_arg_free(&seq_arg);
2626 return simd_arg_to_obj(&ret);
2627err:
2628 simd_arg_free(&seq_arg);
2629 return NULL((void*)0);
2630}
2631
2632
2633// Non-contiguous Store
2634#line 178
2635static PyObject *
2636simd__intrin_storen_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2637{
2638 simd_arg seq_arg = {.dtype = simd_data_qs16};
2639 simd_arg stride_arg = {.dtype = simd_data_s64};
2640 simd_arg vec_arg = {.dtype = simd_data_vs16};
2641#if 0
2642 simd_arg nlane_arg = {.dtype = simd_data_u32};
2643#endif
2644 if (!PyArg_ParseTuple(
2645 args, "O&O&O&:storen_s16",
2646 simd_arg_converter, &seq_arg,
2647 simd_arg_converter, &stride_arg
2648#if 0
2649 ,simd_arg_converter, &nlane_arg
2650#endif
2651 ,simd_arg_converter, &vec_arg
2652 )) {
2653 return NULL((void*)0);
2654 }
2655 npyv_lanetype_s16 *seq_ptr = seq_arg.data.qs16;
2656 npy_intp stride = (npy_intp)stride_arg.data.s64;
2657 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
2658 Py_ssize_t min_seq_len = stride * npyv_nlanes_s168;
2659 if (stride < 0) {
2660 seq_ptr += cur_seq_len -1;
2661 min_seq_len = -min_seq_len;
2662 }
2663 // overflow guard
2664 if (cur_seq_len < min_seq_len) {
2665 PyErr_Format(PyExc_ValueError,
2666 "storen_s16(), according to provided stride %d, the"
2667 "minimum acceptable size of the required sequence is %d, given(%d)",
2668 stride, min_seq_len, cur_seq_len
2669 );
2670 goto err;
2671 }
2672 npyv_storen_s16(
2673 seq_ptr, stride
2674 #if 0
2675 ,nlane_arg.data.u32
2676 #endif
2677 ,vec_arg.data.vs16
2678 );
2679 // write-back
2680 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2681 goto err;
2682 }
2683 simd_arg_free(&seq_arg);
2684 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2685err:
2686 simd_arg_free(&seq_arg);
2687 return NULL((void*)0);
2688}
2689
2690#line 178
2691static PyObject *
2692simd__intrin_storen_till_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2693{
2694 simd_arg seq_arg = {.dtype = simd_data_qs16};
2695 simd_arg stride_arg = {.dtype = simd_data_s64};
2696 simd_arg vec_arg = {.dtype = simd_data_vs16};
2697#if 1
2698 simd_arg nlane_arg = {.dtype = simd_data_u32};
2699#endif
2700 if (!PyArg_ParseTuple(
2701 args, "O&O&O&O&:storen_s16",
2702 simd_arg_converter, &seq_arg,
2703 simd_arg_converter, &stride_arg
2704#if 1
2705 ,simd_arg_converter, &nlane_arg
2706#endif
2707 ,simd_arg_converter, &vec_arg
2708 )) {
2709 return NULL((void*)0);
2710 }
2711 npyv_lanetype_s16 *seq_ptr = seq_arg.data.qs16;
2712 npy_intp stride = (npy_intp)stride_arg.data.s64;
2713 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
2714 Py_ssize_t min_seq_len = stride * npyv_nlanes_s168;
2715 if (stride < 0) {
2716 seq_ptr += cur_seq_len -1;
2717 min_seq_len = -min_seq_len;
2718 }
2719 // overflow guard
2720 if (cur_seq_len < min_seq_len) {
2721 PyErr_Format(PyExc_ValueError,
2722 "storen_till_s16(), according to provided stride %d, the"
2723 "minimum acceptable size of the required sequence is %d, given(%d)",
2724 stride, min_seq_len, cur_seq_len
2725 );
2726 goto err;
2727 }
2728 npyv_storen_till_s16(
2729 seq_ptr, stride
2730 #if 1
2731 ,nlane_arg.data.u32
2732 #endif
2733 ,vec_arg.data.vs16
2734 );
2735 // write-back
2736 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs16, simd_data_qs16)) {
2737 goto err;
2738 }
2739 simd_arg_free(&seq_arg);
2740 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
2741err:
2742 simd_arg_free(&seq_arg);
2743 return NULL((void*)0);
2744}
2745
2746#endif // 0
2747
2748/***************************
2749 * Misc
2750 ***************************/
2751SIMD_IMPL_INTRIN_0(zero_s16, vs16)static PyObject *simd__intrin_zero_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_s16") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vs16, .data = {.vs16 = _mm_setzero_si128()}
, }; return simd_arg_to_obj(&a); }
2752SIMD_IMPL_INTRIN_1(setall_s16, vs16, s16)static PyObject *simd__intrin_setall_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s16}; if (!PyArg_ParseTuple( args, "O&:"
"setall_s16", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vs16 = _mm_set1_epi16((short)(arg.data
.s16))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
2753SIMD_IMPL_INTRIN_3(select_s16, vs16, vb16, vs16, vs16)static PyObject *simd__intrin_select_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; simd_arg arg3 = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs16 = npyv_select_u8
( arg1.data.vb16, arg2.data.vs16, arg3.data.vs16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2754
2755#line 246
2756#if 1
2757SIMD_IMPL_INTRIN_1(reinterpret_u8_s16, vu8, vs16)static PyObject *simd__intrin_reinterpret_u8_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = arg.data.vs16
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu8 }; return simd_arg_to_obj(&ret); }
2758#endif // simd_sup2
2759
2760#line 246
2761#if 1
2762SIMD_IMPL_INTRIN_1(reinterpret_s8_s16, vs8, vs16)static PyObject *simd__intrin_reinterpret_s8_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = arg.data.vs16
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs8 }; return simd_arg_to_obj(&ret); }
2763#endif // simd_sup2
2764
2765#line 246
2766#if 1
2767SIMD_IMPL_INTRIN_1(reinterpret_u16_s16, vu16, vs16)static PyObject *simd__intrin_reinterpret_u16_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vs16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
2768#endif // simd_sup2
2769
2770#line 246
2771#if 1
2772SIMD_IMPL_INTRIN_1(reinterpret_s16_s16, vs16, vs16)static PyObject *simd__intrin_reinterpret_s16_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vs16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
2773#endif // simd_sup2
2774
2775#line 246
2776#if 1
2777SIMD_IMPL_INTRIN_1(reinterpret_u32_s16, vu32, vs16)static PyObject *simd__intrin_reinterpret_u32_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vs16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret)
; }
2778#endif // simd_sup2
2779
2780#line 246
2781#if 1
2782SIMD_IMPL_INTRIN_1(reinterpret_s32_s16, vs32, vs16)static PyObject *simd__intrin_reinterpret_s32_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vs16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret)
; }
2783#endif // simd_sup2
2784
2785#line 246
2786#if 1
2787SIMD_IMPL_INTRIN_1(reinterpret_u64_s16, vu64, vs16)static PyObject *simd__intrin_reinterpret_u64_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vs16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret)
; }
2788#endif // simd_sup2
2789
2790#line 246
2791#if 1
2792SIMD_IMPL_INTRIN_1(reinterpret_s64_s16, vs64, vs16)static PyObject *simd__intrin_reinterpret_s64_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vs16}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret)
; }
2793#endif // simd_sup2
2794
2795#line 246
2796#if 1
2797SIMD_IMPL_INTRIN_1(reinterpret_f32_s16, vf32, vs16)static PyObject *simd__intrin_reinterpret_f32_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vs16 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
2798#endif // simd_sup2
2799
2800#line 246
2801#if NPY_SIMD_F641
2802SIMD_IMPL_INTRIN_1(reinterpret_f64_s16, vf64, vs16)static PyObject *simd__intrin_reinterpret_f64_s16 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_s16", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vs16 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
2803#endif // simd_sup2
2804
2805
2806/**
2807 * special definition due to the nature of intrinsics
2808 * npyv_setf_s16 and npy_set_s16.
2809*/
2810#line 258
2811static PyObject *
2812simd__intrin_setf_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2813{
2814 npyv_lanetype_s16 *data = simd_sequence_from_iterable(args, simd_data_qs16, npyv_nlanes_s168);
2815 if (data == NULL((void*)0)) {
2816 return NULL((void*)0);
2817 }
2818 simd_data r = {.vs16 = npyv_setf_s16(npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2819 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2820 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2821 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2822 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2823 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2824 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2825 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2826 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2827 data[64] // for setfnpyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
2828 )npyv__setr_epi16((short)(data[1]), (short)(data[2]), (short)(
data[3]), (short)(data[4]), (short)(data[5]), (short)(data[6]
), (short)(data[7]), (short)(data[8]))
};
2829 simd_sequence_free(data);
2830 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs16);
2831}
2832
2833#line 258
2834static PyObject *
2835simd__intrin_set_s16(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
2836{
2837 npyv_lanetype_s16 *data = simd_sequence_from_iterable(args, simd_data_qs16, npyv_nlanes_s168);
2838 if (data == NULL((void*)0)) {
2839 return NULL((void*)0);
2840 }
2841 simd_data r = {.vs16 = npyv_set_s16(npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2842 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2843 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2844 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2845 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2846 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2847 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2848 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2849 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2850 data[64] // for setfnpyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
2851 )npyv__setr_epi16((short)(data[0]), (short)(data[1]), (short)(
data[2]), (short)(data[3]), (short)(data[4]), (short)(data[5]
), (short)(data[6]), (short)(data[7]))
};
2852 simd_sequence_free(data);
2853 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs16);
2854}
2855
2856
2857/***************************
2858 * Reorder
2859 ***************************/
2860#line 287
2861SIMD_IMPL_INTRIN_2(combinel_s16, vs16, vs16, vs16)static PyObject *simd__intrin_combinel_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_s16"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs16 = _mm_unpacklo_epi64
( arg1.data.vs16, arg2.data.vs16 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2862
2863#line 287
2864SIMD_IMPL_INTRIN_2(combineh_s16, vs16, vs16, vs16)static PyObject *simd__intrin_combineh_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_s16"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs16 = _mm_unpackhi_epi64
( arg1.data.vs16, arg2.data.vs16 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2865
2866
2867#line 293
2868SIMD_IMPL_INTRIN_2(combine_s16, vs16x2, vs16, vs16)static PyObject *simd__intrin_combine_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_s16",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs16x2 = npyv__combine
( arg1.data.vs16, arg2.data.vs16 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs16x2 }; return simd_arg_to_obj(&ret); }
2869
2870#line 293
2871SIMD_IMPL_INTRIN_2(zip_s16, vs16x2, vs16, vs16)static PyObject *simd__intrin_zip_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16x2 = npyv_zip_s16( arg1.data.vs16,
arg2.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs16x2
}; return simd_arg_to_obj(&ret); }
2872
2873
2874#if 1
2875SIMD_IMPL_INTRIN_1(rev64_s16, vs16, vs16)static PyObject *simd__intrin_rev64_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_s16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs16 = npyv_rev64_u16( arg.data.vs16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2876#endif
2877
2878/***************************
2879 * Operators
2880 ***************************/
2881#if 15 > 0
2882SIMD_IMPL_INTRIN_2(shl_s16, vs16, vs16, u8)static PyObject *simd__intrin_shl_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_sll_epi16(arg1.data.vs16, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2883SIMD_IMPL_INTRIN_2(shr_s16, vs16, vs16, u8)static PyObject *simd__intrin_shr_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_sra_epi16(arg1.data.vs16, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2884// immediate constant
2885SIMD_IMPL_INTRIN_2IMM(shli_s16, vs16, vs16, 15)static PyObject *simd__intrin_shli_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs16 = 0 == arg2.data.u8
? _mm_slli_epi16(arg1.data.vs16, 0) : 1 == arg2.data.u8 ? _mm_slli_epi16
(arg1.data.vs16, 1) : 2 == arg2.data.u8 ? _mm_slli_epi16(arg1
.data.vs16, 2) : 3 == arg2.data.u8 ? _mm_slli_epi16(arg1.data
.vs16, 3) : 4 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vs16
, 4) : 5 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vs16, 5) :
6 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vs16, 6) : 7 ==
arg2.data.u8 ? _mm_slli_epi16(arg1.data.vs16, 7) : 8 == arg2
.data.u8 ? _mm_slli_epi16(arg1.data.vs16, 8) : 9 == arg2.data
.u8 ? _mm_slli_epi16(arg1.data.vs16, 9) : 10 == arg2.data.u8 ?
_mm_slli_epi16(arg1.data.vs16, 10) : 11 == arg2.data.u8 ? _mm_slli_epi16
(arg1.data.vs16, 11) : 12 == arg2.data.u8 ? _mm_slli_epi16(arg1
.data.vs16, 12) : 13 == arg2.data.u8 ? _mm_slli_epi16(arg1.data
.vs16, 13) : 14 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vs16
, 14) : 15 == arg2.data.u8 ? _mm_slli_epi16(arg1.data.vs16, 15
) : data.vs16; simd_arg_free(&arg1); simd_arg ret = { .data
= data, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&
ret); }
2886SIMD_IMPL_INTRIN_2IMM(shri_s16, vs16, vs16, 16)static PyObject *simd__intrin_shri_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs16 = 1 == arg2.data.u8
? _mm_srai_epi16(arg1.data.vs16, 1) : 2 == arg2.data.u8 ? _mm_srai_epi16
(arg1.data.vs16, 2) : 3 == arg2.data.u8 ? _mm_srai_epi16(arg1
.data.vs16, 3) : 4 == arg2.data.u8 ? _mm_srai_epi16(arg1.data
.vs16, 4) : 5 == arg2.data.u8 ? _mm_srai_epi16(arg1.data.vs16
, 5) : 6 == arg2.data.u8 ? _mm_srai_epi16(arg1.data.vs16, 6) :
7 == arg2.data.u8 ? _mm_srai_epi16(arg1.data.vs16, 7) : 8 ==
arg2.data.u8 ? _mm_srai_epi16(arg1.data.vs16, 8) : 9 == arg2
.data.u8 ? _mm_srai_epi16(arg1.data.vs16, 9) : 10 == arg2.data
.u8 ? _mm_srai_epi16(arg1.data.vs16, 10) : 11 == arg2.data.u8
? _mm_srai_epi16(arg1.data.vs16, 11) : 12 == arg2.data.u8 ? _mm_srai_epi16
(arg1.data.vs16, 12) : 13 == arg2.data.u8 ? _mm_srai_epi16(arg1
.data.vs16, 13) : 14 == arg2.data.u8 ? _mm_srai_epi16(arg1.data
.vs16, 14) : 15 == arg2.data.u8 ? _mm_srai_epi16(arg1.data.vs16
, 15) : 16 == arg2.data.u8 ? _mm_srai_epi16(arg1.data.vs16, 16
) : data.vs16; simd_arg_free(&arg1); simd_arg ret = { .data
= data, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&
ret); }
2887#endif // shl_imm
2888
2889#line 314
2890SIMD_IMPL_INTRIN_2(and_s16, vs16, vs16, vs16)static PyObject *simd__intrin_and_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""and_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_and_si128( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2891
2892#line 314
2893SIMD_IMPL_INTRIN_2(or_s16, vs16, vs16, vs16)static PyObject *simd__intrin_or_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""or_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_or_si128( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2894
2895#line 314
2896SIMD_IMPL_INTRIN_2(xor_s16, vs16, vs16, vs16)static PyObject *simd__intrin_xor_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_xor_si128( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2897
2898
2899SIMD_IMPL_INTRIN_1(not_s16, vs16, vs16)static PyObject *simd__intrin_not_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"not_s16", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs16 = _mm_xor_si128(arg.data.vs16, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
2900
2901#line 322
2902SIMD_IMPL_INTRIN_2(cmpeq_s16, vb16, vs16, vs16)static PyObject *simd__intrin_cmpeq_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_cmpeq_epi16( arg1.data.vs16
, arg2.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb16
}; return simd_arg_to_obj(&ret); }
2903
2904#line 322
2905SIMD_IMPL_INTRIN_2(cmpneq_s16, vb16, vs16, vs16)static PyObject *simd__intrin_cmpneq_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_xor_si128(_mm_cmpeq_epi16(arg1
.data.vs16, arg2.data.vs16), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb16 }; return simd_arg_to_obj(&
ret); }
2906
2907#line 322
2908SIMD_IMPL_INTRIN_2(cmpgt_s16, vb16, vs16, vs16)static PyObject *simd__intrin_cmpgt_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_cmpgt_epi16( arg1.data.vs16
, arg2.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb16
}; return simd_arg_to_obj(&ret); }
2909
2910#line 322
2911SIMD_IMPL_INTRIN_2(cmpge_s16, vb16, vs16, vs16)static PyObject *simd__intrin_cmpge_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_xor_si128(_mm_cmpgt_epi16(arg2
.data.vs16, arg1.data.vs16), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb16 }; return simd_arg_to_obj(&
ret); }
2912
2913#line 322
2914SIMD_IMPL_INTRIN_2(cmplt_s16, vb16, vs16, vs16)static PyObject *simd__intrin_cmplt_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_cmpgt_epi16(arg2.data.vs16,
arg1.data.vs16)}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb16
}; return simd_arg_to_obj(&ret); }
2915
2916#line 322
2917SIMD_IMPL_INTRIN_2(cmple_s16, vb16, vs16, vs16)static PyObject *simd__intrin_cmple_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_xor_si128(_mm_cmpgt_epi16(arg1
.data.vs16, arg2.data.vs16), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb16 }; return simd_arg_to_obj(&
ret); }
2918
2919
2920/***************************
2921 * Conversion
2922 ***************************/
2923SIMD_IMPL_INTRIN_1(cvt_s16_b16, vs16, vb16)static PyObject *simd__intrin_cvt_s16_b16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb16}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_s16_b16", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vs16 = arg.data.vb16}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2924SIMD_IMPL_INTRIN_1(cvt_b16_s16, vb16, vs16)static PyObject *simd__intrin_cvt_b16_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b16_s16", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb16 = arg.data.vs16}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vb16
}; return simd_arg_to_obj(&ret); }
2925#if 0
2926SIMD_IMPL_INTRIN_1(expand_s16_s16, vs16x2, vs16)static PyObject *simd__intrin_expand_s16_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"expand_s16_s16", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vs16x2 = npyv_expand_s16_s16( arg.data
.vs16 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16x2 }; return simd_arg_to_obj(&ret
); }
2927#endif // expand_sup
2928/***************************
2929 * Arithmetic
2930 ***************************/
2931#line 339
2932SIMD_IMPL_INTRIN_2(add_s16, vs16, vs16, vs16)static PyObject *simd__intrin_add_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""add_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_add_epi16( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2933
2934#line 339
2935SIMD_IMPL_INTRIN_2(sub_s16, vs16, vs16, vs16)static PyObject *simd__intrin_sub_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_sub_epi16( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2936
2937
2938#if 1
2939#line 346
2940SIMD_IMPL_INTRIN_2(adds_s16, vs16, vs16, vs16)static PyObject *simd__intrin_adds_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_adds_epi16( arg1.data.vs16,
arg2.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2941
2942#line 346
2943SIMD_IMPL_INTRIN_2(subs_s16, vs16, vs16, vs16)static PyObject *simd__intrin_subs_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_subs_epi16( arg1.data.vs16,
arg2.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2944
2945#endif // sat_sup
2946
2947#if 1
2948SIMD_IMPL_INTRIN_2(mul_s16, vs16, vs16, vs16)static PyObject *simd__intrin_mul_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_mullo_epi16( arg1.data.vs16
, arg2.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2949#endif // mul_sup
2950
2951#if 0
2952SIMD_IMPL_INTRIN_2(div_s16, vs16, vs16, vs16)static PyObject *simd__intrin_div_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""div_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = npyv_div_s16( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2953#endif // div_sup
2954
2955#if 1
2956SIMD_IMPL_INTRIN_1(divisor_s16, vs16x3, s16)static PyObject *simd__intrin_divisor_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s16}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_s16", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vs16x3 = npyv_divisor_s16( arg.data.s16
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vs16x3 }; return simd_arg_to_obj(&ret);
}
2957SIMD_IMPL_INTRIN_2(divc_s16, vs16, vs16, vs16x3)static PyObject *simd__intrin_divc_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = npyv_divc_s16( arg1.data.vs16, arg2
.data.vs16x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2958#endif // intdiv_sup
2959
2960#if 0
2961#line 367
2962SIMD_IMPL_INTRIN_3(muladd_s16, vs16, vs16, vs16, vs16)static PyObject *simd__intrin_muladd_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; simd_arg arg3 = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs16 = npyv_muladd_s16
( arg1.data.vs16, arg2.data.vs16, arg3.data.vs16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2963
2964#line 367
2965SIMD_IMPL_INTRIN_3(mulsub_s16, vs16, vs16, vs16, vs16)static PyObject *simd__intrin_mulsub_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; simd_arg arg3 = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs16 = npyv_mulsub_s16
( arg1.data.vs16, arg2.data.vs16, arg3.data.vs16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2966
2967#line 367
2968SIMD_IMPL_INTRIN_3(nmuladd_s16, vs16, vs16, vs16, vs16)static PyObject *simd__intrin_nmuladd_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; simd_arg arg3 = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs16 = npyv_nmuladd_s16
( arg1.data.vs16, arg2.data.vs16, arg3.data.vs16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2969
2970#line 367
2971SIMD_IMPL_INTRIN_3(nmulsub_s16, vs16, vs16, vs16, vs16)static PyObject *simd__intrin_nmulsub_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; simd_arg arg3 = {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs16 = npyv_nmulsub_s16
( arg1.data.vs16, arg2.data.vs16, arg3.data.vs16 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
2972
2973#endif // fused_sup
2974
2975#if 0
2976SIMD_IMPL_INTRIN_1(sum_s16, s16, vs16)static PyObject *simd__intrin_sum_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"sum_s16", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.s16 = npyv_sum_s16( arg.data.vs16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_s16
}; return simd_arg_to_obj(&ret); }
2977#endif // sum_sup
2978
2979#if 0
2980SIMD_IMPL_INTRIN_1(sumup_s16, s16, vs16)static PyObject *simd__intrin_sumup_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_s16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.s16 = npyv_sumup_s16( arg.data.vs16 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_s16 }; return simd_arg_to_obj(&ret); }
2981#endif // sumup_sup
2982
2983/***************************
2984 * Math
2985 ***************************/
2986#if 0
2987#line 386
2988SIMD_IMPL_INTRIN_1(sqrt_s16, vs16, vs16)static PyObject *simd__intrin_sqrt_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_s16", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs16 = npyv_sqrt_s16( arg.data.vs16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2989
2990#line 386
2991SIMD_IMPL_INTRIN_1(recip_s16, vs16, vs16)static PyObject *simd__intrin_recip_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"recip_s16", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs16 = npyv_recip_s16( arg.data.vs16 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2992
2993#line 386
2994SIMD_IMPL_INTRIN_1(abs_s16, vs16, vs16)static PyObject *simd__intrin_abs_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"abs_s16", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs16 = npyv_abs_s16( arg.data.vs16 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs16
}; return simd_arg_to_obj(&ret); }
2995
2996#line 386
2997SIMD_IMPL_INTRIN_1(square_s16, vs16, vs16)static PyObject *simd__intrin_square_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&:"
"square_s16", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vs16 = npyv_square_s16( arg.data.vs16 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs16 }; return simd_arg_to_obj(&ret); }
2998
2999#endif
3000
3001#line 393
3002SIMD_IMPL_INTRIN_2(max_s16, vs16, vs16, vs16)static PyObject *simd__intrin_max_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""max_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_max_epi16( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
3003
3004#line 393
3005SIMD_IMPL_INTRIN_2(min_s16, vs16, vs16, vs16)static PyObject *simd__intrin_min_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""min_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = _mm_min_epi16( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
3006
3007
3008#if 0
3009#line 400
3010SIMD_IMPL_INTRIN_2(maxp_s16, vs16, vs16, vs16)static PyObject *simd__intrin_maxp_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = npyv_maxp_s16( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
3011
3012#line 400
3013SIMD_IMPL_INTRIN_2(minp_s16, vs16, vs16, vs16)static PyObject *simd__intrin_minp_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_s16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs16 = npyv_minp_s16( arg1.data.vs16, arg2
.data.vs16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs16 }; return
simd_arg_to_obj(&ret); }
3014
3015#endif
3016
3017/***************************
3018 * Mask operations
3019 ***************************/
3020#line 410
3021 SIMD_IMPL_INTRIN_4(ifadd_s16, vs16, vb16, vs16, vs16, vs16)static PyObject *simd__intrin_ifadd_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; simd_arg arg3 = {.dtype = simd_data_vs16}; simd_arg arg4 =
{.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_s16", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs16 = npyv_ifadd_s16
( arg1.data.vb16, arg2.data.vs16, arg3.data.vs16, arg4.data.vs16
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&
ret); }
3022
3023#line 410
3024 SIMD_IMPL_INTRIN_4(ifsub_s16, vs16, vb16, vs16, vs16, vs16)static PyObject *simd__intrin_ifsub_s16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vs16
}; simd_arg arg3 = {.dtype = simd_data_vs16}; simd_arg arg4 =
{.dtype = simd_data_vs16}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_s16", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs16 = npyv_ifsub_s16
( arg1.data.vb16, arg2.data.vs16, arg3.data.vs16, arg4.data.vs16
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&
ret); }
3025
3026
3027#endif // simd_sup
3028
3029#line 34
3030#if 1
3031/***************************
3032 * Memory
3033 ***************************/
3034#line 41
3035SIMD_IMPL_INTRIN_1(load_u32, vu32, qu32)static PyObject *simd__intrin_load_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu32}; if (!PyArg_ParseTuple( args, "O&:"
"load_u32", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu32 = npyv_load_u32( arg.data.qu32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu32
}; return simd_arg_to_obj(&ret); }
3036
3037#line 41
3038SIMD_IMPL_INTRIN_1(loada_u32, vu32, qu32)static PyObject *simd__intrin_loada_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu32}; if (!PyArg_ParseTuple( args, "O&:"
"loada_u32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu32 = npyv_loada_u32( arg.data.qu32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3039
3040#line 41
3041SIMD_IMPL_INTRIN_1(loads_u32, vu32, qu32)static PyObject *simd__intrin_loads_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu32}; if (!PyArg_ParseTuple( args, "O&:"
"loads_u32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu32 = npyv_loads_u32( arg.data.qu32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3042
3043#line 41
3044SIMD_IMPL_INTRIN_1(loadl_u32, vu32, qu32)static PyObject *simd__intrin_loadl_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu32}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_u32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu32 = npyv_loadl_u32( arg.data.qu32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3045
3046#line 46
3047// special definition due to the nature of store
3048static PyObject *
3049simd__intrin_store_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3050{
3051 simd_arg seq_arg = {.dtype = simd_data_qu32};
3052 simd_arg vec_arg = {.dtype = simd_data_vu32};
3053 if (!PyArg_ParseTuple(
3054 args, "O&O&:store_u32",
3055 simd_arg_converter, &seq_arg,
3056 simd_arg_converter, &vec_arg
3057 )) {
3058 return NULL((void*)0);
3059 }
3060 npyv_store_u32(seq_arg.data.qu32, vec_arg.data.vu32);
3061 // write-back
3062 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3063 simd_arg_free(&seq_arg);
3064 return NULL((void*)0);
3065 }
3066 simd_arg_free(&seq_arg);
3067 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3068}
3069
3070#line 46
3071// special definition due to the nature of storea
3072static PyObject *
3073simd__intrin_storea_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3074{
3075 simd_arg seq_arg = {.dtype = simd_data_qu32};
3076 simd_arg vec_arg = {.dtype = simd_data_vu32};
3077 if (!PyArg_ParseTuple(
3078 args, "O&O&:storea_u32",
3079 simd_arg_converter, &seq_arg,
3080 simd_arg_converter, &vec_arg
3081 )) {
3082 return NULL((void*)0);
3083 }
3084 npyv_storea_u32(seq_arg.data.qu32, vec_arg.data.vu32);
3085 // write-back
3086 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3087 simd_arg_free(&seq_arg);
3088 return NULL((void*)0);
3089 }
3090 simd_arg_free(&seq_arg);
3091 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3092}
3093
3094#line 46
3095// special definition due to the nature of stores
3096static PyObject *
3097simd__intrin_stores_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3098{
3099 simd_arg seq_arg = {.dtype = simd_data_qu32};
3100 simd_arg vec_arg = {.dtype = simd_data_vu32};
3101 if (!PyArg_ParseTuple(
3102 args, "O&O&:stores_u32",
3103 simd_arg_converter, &seq_arg,
3104 simd_arg_converter, &vec_arg
3105 )) {
3106 return NULL((void*)0);
3107 }
3108 npyv_stores_u32(seq_arg.data.qu32, vec_arg.data.vu32);
3109 // write-back
3110 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3111 simd_arg_free(&seq_arg);
3112 return NULL((void*)0);
3113 }
3114 simd_arg_free(&seq_arg);
3115 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3116}
3117
3118#line 46
3119// special definition due to the nature of storel
3120static PyObject *
3121simd__intrin_storel_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3122{
3123 simd_arg seq_arg = {.dtype = simd_data_qu32};
3124 simd_arg vec_arg = {.dtype = simd_data_vu32};
3125 if (!PyArg_ParseTuple(
3126 args, "O&O&:storel_u32",
3127 simd_arg_converter, &seq_arg,
3128 simd_arg_converter, &vec_arg
3129 )) {
3130 return NULL((void*)0);
3131 }
3132 npyv_storel_u32(seq_arg.data.qu32, vec_arg.data.vu32);
3133 // write-back
3134 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3135 simd_arg_free(&seq_arg);
3136 return NULL((void*)0);
3137 }
3138 simd_arg_free(&seq_arg);
3139 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3140}
3141
3142#line 46
3143// special definition due to the nature of storeh
3144static PyObject *
3145simd__intrin_storeh_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3146{
3147 simd_arg seq_arg = {.dtype = simd_data_qu32};
3148 simd_arg vec_arg = {.dtype = simd_data_vu32};
3149 if (!PyArg_ParseTuple(
3150 args, "O&O&:storeh_u32",
3151 simd_arg_converter, &seq_arg,
3152 simd_arg_converter, &vec_arg
3153 )) {
3154 return NULL((void*)0);
3155 }
3156 npyv_storeh_u32(seq_arg.data.qu32, vec_arg.data.vu32);
3157 // write-back
3158 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3159 simd_arg_free(&seq_arg);
3160 return NULL((void*)0);
3161 }
3162 simd_arg_free(&seq_arg);
3163 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3164}
3165
3166
3167/****************************************
3168 * Non-contiguous/Partial Memory access
3169 ****************************************/
3170#if 1
3171// Partial Load
3172SIMD_IMPL_INTRIN_3(load_till_u32, vu32, qu32, u32, u32)static PyObject *simd__intrin_load_till_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu32}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_u32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu32 = npyv_load_till_u32
( arg1.data.qu32, arg2.data.u32, arg3.data.u32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3173SIMD_IMPL_INTRIN_2(load_tillz_u32, vu32, qu32, u32)static PyObject *simd__intrin_load_tillz_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu32}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_u32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu32 = npyv_load_tillz_u32
( arg1.data.qu32, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3174
3175// Partial Store
3176static PyObject *
3177simd__intrin_store_till_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3178{
3179 simd_arg seq_arg = {.dtype = simd_data_qu32};
3180 simd_arg nlane_arg = {.dtype = simd_data_u32};
3181 simd_arg vec_arg = {.dtype = simd_data_vu32};
3182 if (!PyArg_ParseTuple(
3183 args, "O&O&O&:store_till_u32",
3184 simd_arg_converter, &seq_arg,
3185 simd_arg_converter, &nlane_arg,
3186 simd_arg_converter, &vec_arg
3187 )) {
3188 return NULL((void*)0);
3189 }
3190 npyv_store_till_u32(
3191 seq_arg.data.qu32, nlane_arg.data.u32, vec_arg.data.vu32
3192 );
3193 // write-back
3194 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3195 simd_arg_free(&seq_arg);
3196 return NULL((void*)0);
3197 }
3198 simd_arg_free(&seq_arg);
3199 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3200}
3201
3202// Non-contiguous Load
3203#line 112
3204static PyObject *
3205simd__intrin_loadn_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3206{
3207 simd_arg seq_arg = {.dtype = simd_data_qu32};
3208 simd_arg stride_arg = {.dtype = simd_data_s64};
3209#if 0
3210 simd_arg nlane_arg = {.dtype = simd_data_u32};
3211#endif // till
3212#if 0
3213 simd_arg fill_arg = {.dtype = simd_data_u32};
3214#endif
3215 if (!PyArg_ParseTuple(
3216 args, "O&O&:loadn_u32",
3217 simd_arg_converter, &seq_arg,
3218 simd_arg_converter, &stride_arg
3219#if 0
3220 ,simd_arg_converter, &nlane_arg
3221#endif
3222#if 0
3223 ,simd_arg_converter, &fill_arg
3224#endif
3225 )) {
3226 return NULL((void*)0);
3227 }
3228 npyv_lanetype_u32 *seq_ptr = seq_arg.data.qu32;
3229 npy_intp stride = (npy_intp)stride_arg.data.s64;
3230 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
3231 Py_ssize_t min_seq_len = stride * npyv_nlanes_u324;
3232 if (stride < 0) {
3233 seq_ptr += cur_seq_len -1;
3234 min_seq_len = -min_seq_len;
3235 }
3236 if (cur_seq_len < min_seq_len) {
3237 PyErr_Format(PyExc_ValueError,
3238 "loadn_u32(), according to provided stride %d, the "
3239 "minimum acceptable size of the required sequence is %d, given(%d)",
3240 stride, min_seq_len, cur_seq_len
3241 );
3242 goto err;
3243 }
3244 npyv_u32 rvec = npyv_loadn_u32(
3245 seq_ptr, stride
3246 #if 0
3247 , nlane_arg.data.u32
3248 #endif
3249 #if 0
3250 , fill_arg.data.u32
3251 #endif
3252 );
3253 simd_arg ret = {
3254 .dtype = simd_data_vu32, .data = {.vu32=rvec}
3255 };
3256 simd_arg_free(&seq_arg);
3257 return simd_arg_to_obj(&ret);
3258err:
3259 simd_arg_free(&seq_arg);
3260 return NULL((void*)0);
3261}
3262
3263#line 112
3264static PyObject *
3265simd__intrin_loadn_till_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3266{
3267 simd_arg seq_arg = {.dtype = simd_data_qu32};
3268 simd_arg stride_arg = {.dtype = simd_data_s64};
3269#if 1
3270 simd_arg nlane_arg = {.dtype = simd_data_u32};
3271#endif // till
3272#if 1
3273 simd_arg fill_arg = {.dtype = simd_data_u32};
3274#endif
3275 if (!PyArg_ParseTuple(
3276 args, "O&O&O&O&:loadn_till_u32",
3277 simd_arg_converter, &seq_arg,
3278 simd_arg_converter, &stride_arg
3279#if 1
3280 ,simd_arg_converter, &nlane_arg
3281#endif
3282#if 1
3283 ,simd_arg_converter, &fill_arg
3284#endif
3285 )) {
3286 return NULL((void*)0);
3287 }
3288 npyv_lanetype_u32 *seq_ptr = seq_arg.data.qu32;
3289 npy_intp stride = (npy_intp)stride_arg.data.s64;
3290 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
3291 Py_ssize_t min_seq_len = stride * npyv_nlanes_u324;
3292 if (stride < 0) {
3293 seq_ptr += cur_seq_len -1;
3294 min_seq_len = -min_seq_len;
3295 }
3296 if (cur_seq_len < min_seq_len) {
3297 PyErr_Format(PyExc_ValueError,
3298 "loadn_till_u32(), according to provided stride %d, the "
3299 "minimum acceptable size of the required sequence is %d, given(%d)",
3300 stride, min_seq_len, cur_seq_len
3301 );
3302 goto err;
3303 }
3304 npyv_u32 rvec = npyv_loadn_till_u32(
3305 seq_ptr, stride
3306 #if 1
3307 , nlane_arg.data.u32
3308 #endif
3309 #if 1
3310 , fill_arg.data.u32
3311 #endif
3312 );
3313 simd_arg ret = {
3314 .dtype = simd_data_vu32, .data = {.vu32=rvec}
3315 };
3316 simd_arg_free(&seq_arg);
3317 return simd_arg_to_obj(&ret);
3318err:
3319 simd_arg_free(&seq_arg);
3320 return NULL((void*)0);
3321}
3322
3323#line 112
3324static PyObject *
3325simd__intrin_loadn_tillz_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3326{
3327 simd_arg seq_arg = {.dtype = simd_data_qu32};
3328 simd_arg stride_arg = {.dtype = simd_data_s64};
3329#if 1
3330 simd_arg nlane_arg = {.dtype = simd_data_u32};
3331#endif // till
3332#if 0
3333 simd_arg fill_arg = {.dtype = simd_data_u32};
3334#endif
3335 if (!PyArg_ParseTuple(
3336 args, "O&O&O&:loadn_tillz_u32",
3337 simd_arg_converter, &seq_arg,
3338 simd_arg_converter, &stride_arg
3339#if 1
3340 ,simd_arg_converter, &nlane_arg
3341#endif
3342#if 0
3343 ,simd_arg_converter, &fill_arg
3344#endif
3345 )) {
3346 return NULL((void*)0);
3347 }
3348 npyv_lanetype_u32 *seq_ptr = seq_arg.data.qu32;
3349 npy_intp stride = (npy_intp)stride_arg.data.s64;
3350 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
3351 Py_ssize_t min_seq_len = stride * npyv_nlanes_u324;
3352 if (stride < 0) {
3353 seq_ptr += cur_seq_len -1;
3354 min_seq_len = -min_seq_len;
3355 }
3356 if (cur_seq_len < min_seq_len) {
3357 PyErr_Format(PyExc_ValueError,
3358 "loadn_tillz_u32(), according to provided stride %d, the "
3359 "minimum acceptable size of the required sequence is %d, given(%d)",
3360 stride, min_seq_len, cur_seq_len
3361 );
3362 goto err;
3363 }
3364 npyv_u32 rvec = npyv_loadn_tillz_u32(
3365 seq_ptr, stride
3366 #if 1
3367 , nlane_arg.data.u32
3368 #endif
3369 #if 0
3370 , fill_arg.data.u32
3371 #endif
3372 );
3373 simd_arg ret = {
3374 .dtype = simd_data_vu32, .data = {.vu32=rvec}
3375 };
3376 simd_arg_free(&seq_arg);
3377 return simd_arg_to_obj(&ret);
3378err:
3379 simd_arg_free(&seq_arg);
3380 return NULL((void*)0);
3381}
3382
3383
3384// Non-contiguous Store
3385#line 178
3386static PyObject *
3387simd__intrin_storen_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3388{
3389 simd_arg seq_arg = {.dtype = simd_data_qu32};
3390 simd_arg stride_arg = {.dtype = simd_data_s64};
3391 simd_arg vec_arg = {.dtype = simd_data_vu32};
3392#if 0
3393 simd_arg nlane_arg = {.dtype = simd_data_u32};
3394#endif
3395 if (!PyArg_ParseTuple(
3396 args, "O&O&O&:storen_u32",
3397 simd_arg_converter, &seq_arg,
3398 simd_arg_converter, &stride_arg
3399#if 0
3400 ,simd_arg_converter, &nlane_arg
3401#endif
3402 ,simd_arg_converter, &vec_arg
3403 )) {
3404 return NULL((void*)0);
3405 }
3406 npyv_lanetype_u32 *seq_ptr = seq_arg.data.qu32;
3407 npy_intp stride = (npy_intp)stride_arg.data.s64;
3408 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
3409 Py_ssize_t min_seq_len = stride * npyv_nlanes_u324;
3410 if (stride < 0) {
3411 seq_ptr += cur_seq_len -1;
3412 min_seq_len = -min_seq_len;
3413 }
3414 // overflow guard
3415 if (cur_seq_len < min_seq_len) {
3416 PyErr_Format(PyExc_ValueError,
3417 "storen_u32(), according to provided stride %d, the"
3418 "minimum acceptable size of the required sequence is %d, given(%d)",
3419 stride, min_seq_len, cur_seq_len
3420 );
3421 goto err;
3422 }
3423 npyv_storen_u32(
3424 seq_ptr, stride
3425 #if 0
3426 ,nlane_arg.data.u32
3427 #endif
3428 ,vec_arg.data.vu32
3429 );
3430 // write-back
3431 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3432 goto err;
3433 }
3434 simd_arg_free(&seq_arg);
3435 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3436err:
3437 simd_arg_free(&seq_arg);
3438 return NULL((void*)0);
3439}
3440
3441#line 178
3442static PyObject *
3443simd__intrin_storen_till_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3444{
3445 simd_arg seq_arg = {.dtype = simd_data_qu32};
3446 simd_arg stride_arg = {.dtype = simd_data_s64};
3447 simd_arg vec_arg = {.dtype = simd_data_vu32};
3448#if 1
3449 simd_arg nlane_arg = {.dtype = simd_data_u32};
3450#endif
3451 if (!PyArg_ParseTuple(
3452 args, "O&O&O&O&:storen_u32",
3453 simd_arg_converter, &seq_arg,
3454 simd_arg_converter, &stride_arg
3455#if 1
3456 ,simd_arg_converter, &nlane_arg
3457#endif
3458 ,simd_arg_converter, &vec_arg
3459 )) {
3460 return NULL((void*)0);
3461 }
3462 npyv_lanetype_u32 *seq_ptr = seq_arg.data.qu32;
3463 npy_intp stride = (npy_intp)stride_arg.data.s64;
3464 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
3465 Py_ssize_t min_seq_len = stride * npyv_nlanes_u324;
3466 if (stride < 0) {
3467 seq_ptr += cur_seq_len -1;
3468 min_seq_len = -min_seq_len;
3469 }
3470 // overflow guard
3471 if (cur_seq_len < min_seq_len) {
3472 PyErr_Format(PyExc_ValueError,
3473 "storen_till_u32(), according to provided stride %d, the"
3474 "minimum acceptable size of the required sequence is %d, given(%d)",
3475 stride, min_seq_len, cur_seq_len
3476 );
3477 goto err;
3478 }
3479 npyv_storen_till_u32(
3480 seq_ptr, stride
3481 #if 1
3482 ,nlane_arg.data.u32
3483 #endif
3484 ,vec_arg.data.vu32
3485 );
3486 // write-back
3487 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu32, simd_data_qu32)) {
3488 goto err;
3489 }
3490 simd_arg_free(&seq_arg);
3491 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3492err:
3493 simd_arg_free(&seq_arg);
3494 return NULL((void*)0);
3495}
3496
3497#endif // 1
3498
3499/***************************
3500 * Misc
3501 ***************************/
3502SIMD_IMPL_INTRIN_0(zero_u32, vu32)static PyObject *simd__intrin_zero_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_u32") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vu32, .data = {.vu32 = _mm_setzero_si128()}
, }; return simd_arg_to_obj(&a); }
3503SIMD_IMPL_INTRIN_1(setall_u32, vu32, u32)static PyObject *simd__intrin_setall_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u32}; if (!PyArg_ParseTuple( args, "O&:"
"setall_u32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vu32 = _mm_set1_epi32((int)(arg.data.u32
))}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3504SIMD_IMPL_INTRIN_3(select_u32, vu32, vb32, vu32, vu32)static PyObject *simd__intrin_select_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; simd_arg arg3 = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu32 = npyv_select_u8
( arg1.data.vb32, arg2.data.vu32, arg3.data.vu32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3505
3506#line 246
3507#if 1
3508SIMD_IMPL_INTRIN_1(reinterpret_u8_u32, vu8, vu32)static PyObject *simd__intrin_reinterpret_u8_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = arg.data.vu32
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu8 }; return simd_arg_to_obj(&ret); }
3509#endif // simd_sup2
3510
3511#line 246
3512#if 1
3513SIMD_IMPL_INTRIN_1(reinterpret_s8_u32, vs8, vu32)static PyObject *simd__intrin_reinterpret_s8_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = arg.data.vu32
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs8 }; return simd_arg_to_obj(&ret); }
3514#endif // simd_sup2
3515
3516#line 246
3517#if 1
3518SIMD_IMPL_INTRIN_1(reinterpret_u16_u32, vu16, vu32)static PyObject *simd__intrin_reinterpret_u16_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vu32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
3519#endif // simd_sup2
3520
3521#line 246
3522#if 1
3523SIMD_IMPL_INTRIN_1(reinterpret_s16_u32, vs16, vu32)static PyObject *simd__intrin_reinterpret_s16_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vu32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
3524#endif // simd_sup2
3525
3526#line 246
3527#if 1
3528SIMD_IMPL_INTRIN_1(reinterpret_u32_u32, vu32, vu32)static PyObject *simd__intrin_reinterpret_u32_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vu32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret)
; }
3529#endif // simd_sup2
3530
3531#line 246
3532#if 1
3533SIMD_IMPL_INTRIN_1(reinterpret_s32_u32, vs32, vu32)static PyObject *simd__intrin_reinterpret_s32_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vu32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret)
; }
3534#endif // simd_sup2
3535
3536#line 246
3537#if 1
3538SIMD_IMPL_INTRIN_1(reinterpret_u64_u32, vu64, vu32)static PyObject *simd__intrin_reinterpret_u64_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vu32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret)
; }
3539#endif // simd_sup2
3540
3541#line 246
3542#if 1
3543SIMD_IMPL_INTRIN_1(reinterpret_s64_u32, vs64, vu32)static PyObject *simd__intrin_reinterpret_s64_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vu32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret)
; }
3544#endif // simd_sup2
3545
3546#line 246
3547#if 1
3548SIMD_IMPL_INTRIN_1(reinterpret_f32_u32, vf32, vu32)static PyObject *simd__intrin_reinterpret_f32_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vu32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
3549#endif // simd_sup2
3550
3551#line 246
3552#if NPY_SIMD_F641
3553SIMD_IMPL_INTRIN_1(reinterpret_f64_u32, vf64, vu32)static PyObject *simd__intrin_reinterpret_f64_u32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_u32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vu32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
3554#endif // simd_sup2
3555
3556
3557/**
3558 * special definition due to the nature of intrinsics
3559 * npyv_setf_u32 and npy_set_u32.
3560*/
3561#line 258
3562static PyObject *
3563simd__intrin_setf_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3564{
3565 npyv_lanetype_u32 *data = simd_sequence_from_iterable(args, simd_data_qu32, npyv_nlanes_u324);
3566 if (data == NULL((void*)0)) {
3567 return NULL((void*)0);
3568 }
3569 simd_data r = {.vu32 = npyv_setf_u32(npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3570 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3571 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3572 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3573 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3574 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3575 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3576 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3577 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3578 data[64] // for setfnpyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
3579 )npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
};
3580 simd_sequence_free(data);
3581 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu32);
3582}
3583
3584#line 258
3585static PyObject *
3586simd__intrin_set_u32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3587{
3588 npyv_lanetype_u32 *data = simd_sequence_from_iterable(args, simd_data_qu32, npyv_nlanes_u324);
3589 if (data == NULL((void*)0)) {
3590 return NULL((void*)0);
3591 }
3592 simd_data r = {.vu32 = npyv_set_u32(npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3593 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3594 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3595 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3596 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3597 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3598 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3599 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3600 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3601 data[64] // for setfnpyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
3602 )npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
};
3603 simd_sequence_free(data);
3604 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu32);
3605}
3606
3607
3608/***************************
3609 * Reorder
3610 ***************************/
3611#line 287
3612SIMD_IMPL_INTRIN_2(combinel_u32, vu32, vu32, vu32)static PyObject *simd__intrin_combinel_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_u32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu32 = _mm_unpacklo_epi64
( arg1.data.vu32, arg2.data.vu32 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3613
3614#line 287
3615SIMD_IMPL_INTRIN_2(combineh_u32, vu32, vu32, vu32)static PyObject *simd__intrin_combineh_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_u32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu32 = _mm_unpackhi_epi64
( arg1.data.vu32, arg2.data.vu32 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3616
3617
3618#line 293
3619SIMD_IMPL_INTRIN_2(combine_u32, vu32x2, vu32, vu32)static PyObject *simd__intrin_combine_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_u32",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu32x2 = npyv__combine
( arg1.data.vu32, arg2.data.vu32 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu32x2 }; return simd_arg_to_obj(&ret); }
3620
3621#line 293
3622SIMD_IMPL_INTRIN_2(zip_u32, vu32x2, vu32, vu32)static PyObject *simd__intrin_zip_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32x2 = npyv_zip_u32( arg1.data.vu32,
arg2.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu32x2
}; return simd_arg_to_obj(&ret); }
3623
3624
3625#if 1
3626SIMD_IMPL_INTRIN_1(rev64_u32, vu32, vu32)static PyObject *simd__intrin_rev64_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_u32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu32 = npyv_rev64_u32( arg.data.vu32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3627#endif
3628
3629/***************************
3630 * Operators
3631 ***************************/
3632#if 31 > 0
3633SIMD_IMPL_INTRIN_2(shl_u32, vu32, vu32, u8)static PyObject *simd__intrin_shl_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = _mm_sll_epi32(arg1.data.vu32, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu32
}; return simd_arg_to_obj(&ret); }
3634SIMD_IMPL_INTRIN_2(shr_u32, vu32, vu32, u8)static PyObject *simd__intrin_shr_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = _mm_srl_epi32(arg1.data.vu32, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu32
}; return simd_arg_to_obj(&ret); }
3635// immediate constant
3636SIMD_IMPL_INTRIN_2IMM(shli_u32, vu32, vu32, 31)static PyObject *simd__intrin_shli_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu32 = 0 == arg2.data.u8
? _mm_slli_epi32(arg1.data.vu32, 0) : 1 == arg2.data.u8 ? _mm_slli_epi32
(arg1.data.vu32, 1) : 2 == arg2.data.u8 ? _mm_slli_epi32(arg1
.data.vu32, 2) : 3 == arg2.data.u8 ? _mm_slli_epi32(arg1.data
.vu32, 3) : 4 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32
, 4) : 5 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 5) :
6 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 6) : 7 ==
arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 7) : 8 == arg2
.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 8) : 9 == arg2.data
.u8 ? _mm_slli_epi32(arg1.data.vu32, 9) : 10 == arg2.data.u8 ?
_mm_slli_epi32(arg1.data.vu32, 10) : 11 == arg2.data.u8 ? _mm_slli_epi32
(arg1.data.vu32, 11) : 12 == arg2.data.u8 ? _mm_slli_epi32(arg1
.data.vu32, 12) : 13 == arg2.data.u8 ? _mm_slli_epi32(arg1.data
.vu32, 13) : 14 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32
, 14) : 15 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 15
) : 16 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 16) :
17 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 17) : 18
== arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 18) : 19 ==
arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 19) : 20 == arg2
.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 20) : 21 == arg2.data
.u8 ? _mm_slli_epi32(arg1.data.vu32, 21) : 22 == arg2.data.u8
? _mm_slli_epi32(arg1.data.vu32, 22) : 23 == arg2.data.u8 ? _mm_slli_epi32
(arg1.data.vu32, 23) : 24 == arg2.data.u8 ? _mm_slli_epi32(arg1
.data.vu32, 24) : 25 == arg2.data.u8 ? _mm_slli_epi32(arg1.data
.vu32, 25) : 26 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32
, 26) : 27 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 27
) : 28 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 28) :
29 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 29) : 30
== arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 30) : 31 ==
arg2.data.u8 ? _mm_slli_epi32(arg1.data.vu32, 31) : data.vu32
; simd_arg_free(&arg1); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3637SIMD_IMPL_INTRIN_2IMM(shri_u32, vu32, vu32, 32)static PyObject *simd__intrin_shri_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu32 = 1 == arg2.data.u8
? _mm_srli_epi32(arg1.data.vu32, 1) : 2 == arg2.data.u8 ? _mm_srli_epi32
(arg1.data.vu32, 2) : 3 == arg2.data.u8 ? _mm_srli_epi32(arg1
.data.vu32, 3) : 4 == arg2.data.u8 ? _mm_srli_epi32(arg1.data
.vu32, 4) : 5 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32
, 5) : 6 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 6) :
7 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 7) : 8 ==
arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 8) : 9 == arg2
.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 9) : 10 == arg2.data
.u8 ? _mm_srli_epi32(arg1.data.vu32, 10) : 11 == arg2.data.u8
? _mm_srli_epi32(arg1.data.vu32, 11) : 12 == arg2.data.u8 ? _mm_srli_epi32
(arg1.data.vu32, 12) : 13 == arg2.data.u8 ? _mm_srli_epi32(arg1
.data.vu32, 13) : 14 == arg2.data.u8 ? _mm_srli_epi32(arg1.data
.vu32, 14) : 15 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32
, 15) : 16 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 16
) : 17 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 17) :
18 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 18) : 19
== arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 19) : 20 ==
arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 20) : 21 == arg2
.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 21) : 22 == arg2.data
.u8 ? _mm_srli_epi32(arg1.data.vu32, 22) : 23 == arg2.data.u8
? _mm_srli_epi32(arg1.data.vu32, 23) : 24 == arg2.data.u8 ? _mm_srli_epi32
(arg1.data.vu32, 24) : 25 == arg2.data.u8 ? _mm_srli_epi32(arg1
.data.vu32, 25) : 26 == arg2.data.u8 ? _mm_srli_epi32(arg1.data
.vu32, 26) : 27 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32
, 27) : 28 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 28
) : 29 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 29) :
30 == arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 30) : 31
== arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 31) : 32 ==
arg2.data.u8 ? _mm_srli_epi32(arg1.data.vu32, 32) : data.vu32
; simd_arg_free(&arg1); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3638#endif // shl_imm
3639
3640#line 314
3641SIMD_IMPL_INTRIN_2(and_u32, vu32, vu32, vu32)static PyObject *simd__intrin_and_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""and_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = _mm_and_si128( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3642
3643#line 314
3644SIMD_IMPL_INTRIN_2(or_u32, vu32, vu32, vu32)static PyObject *simd__intrin_or_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""or_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = _mm_or_si128( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3645
3646#line 314
3647SIMD_IMPL_INTRIN_2(xor_u32, vu32, vu32, vu32)static PyObject *simd__intrin_xor_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = _mm_xor_si128( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3648
3649
3650SIMD_IMPL_INTRIN_1(not_u32, vu32, vu32)static PyObject *simd__intrin_not_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"not_u32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu32 = _mm_xor_si128(arg.data.vu32, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret)
; }
3651
3652#line 322
3653SIMD_IMPL_INTRIN_2(cmpeq_u32, vb32, vu32, vu32)static PyObject *simd__intrin_cmpeq_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_cmpeq_epi32( arg1.data.vu32
, arg2.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
3654
3655#line 322
3656SIMD_IMPL_INTRIN_2(cmpneq_u32, vb32, vu32, vu32)static PyObject *simd__intrin_cmpneq_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_xor_si128(_mm_cmpeq_epi32(arg1
.data.vu32, arg2.data.vu32), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb32 }; return simd_arg_to_obj(&
ret); }
3657
3658#line 322
3659SIMD_IMPL_INTRIN_2(cmpgt_u32, vb32, vu32, vu32)static PyObject *simd__intrin_cmpgt_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = npyv_cmpgt_u32( arg1.data.vu32,
arg2.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
3660
3661#line 322
3662SIMD_IMPL_INTRIN_2(cmpge_u32, vb32, vu32, vu32)static PyObject *simd__intrin_cmpge_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_xor_si128(npyv_cmpgt_u32(arg2
.data.vu32, arg1.data.vu32), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb32 }; return simd_arg_to_obj(&
ret); }
3663
3664#line 322
3665SIMD_IMPL_INTRIN_2(cmplt_u32, vb32, vu32, vu32)static PyObject *simd__intrin_cmplt_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = npyv_cmpgt_u32(arg2.data.vu32, arg1
.data.vu32)}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb32 }; return
simd_arg_to_obj(&ret); }
3666
3667#line 322
3668SIMD_IMPL_INTRIN_2(cmple_u32, vb32, vu32, vu32)static PyObject *simd__intrin_cmple_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_xor_si128(npyv_cmpgt_u32(arg1
.data.vu32, arg2.data.vu32), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb32 }; return simd_arg_to_obj(&
ret); }
3669
3670
3671/***************************
3672 * Conversion
3673 ***************************/
3674SIMD_IMPL_INTRIN_1(cvt_u32_b32, vu32, vb32)static PyObject *simd__intrin_cvt_u32_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb32}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_u32_b32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vu32 = arg.data.vb32}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu32
}; return simd_arg_to_obj(&ret); }
3675SIMD_IMPL_INTRIN_1(cvt_b32_u32, vb32, vu32)static PyObject *simd__intrin_cvt_b32_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b32_u32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb32 = arg.data.vu32}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
3676#if 0
3677SIMD_IMPL_INTRIN_1(expand_u32_u32, vu32x2, vu32)static PyObject *simd__intrin_expand_u32_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"expand_u32_u32", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vu32x2 = npyv_expand_u32_u32( arg.data
.vu32 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32x2 }; return simd_arg_to_obj(&ret
); }
3678#endif // expand_sup
3679/***************************
3680 * Arithmetic
3681 ***************************/
3682#line 339
3683SIMD_IMPL_INTRIN_2(add_u32, vu32, vu32, vu32)static PyObject *simd__intrin_add_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""add_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = _mm_add_epi32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3684
3685#line 339
3686SIMD_IMPL_INTRIN_2(sub_u32, vu32, vu32, vu32)static PyObject *simd__intrin_sub_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = _mm_sub_epi32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3687
3688
3689#if 0
3690#line 346
3691SIMD_IMPL_INTRIN_2(adds_u32, vu32, vu32, vu32)static PyObject *simd__intrin_adds_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_adds_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3692
3693#line 346
3694SIMD_IMPL_INTRIN_2(subs_u32, vu32, vu32, vu32)static PyObject *simd__intrin_subs_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_subs_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3695
3696#endif // sat_sup
3697
3698#if 1
3699SIMD_IMPL_INTRIN_2(mul_u32, vu32, vu32, vu32)static PyObject *simd__intrin_mul_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_mul_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3700#endif // mul_sup
3701
3702#if 0
3703SIMD_IMPL_INTRIN_2(div_u32, vu32, vu32, vu32)static PyObject *simd__intrin_div_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""div_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_div_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3704#endif // div_sup
3705
3706#if 1
3707SIMD_IMPL_INTRIN_1(divisor_u32, vu32x3, u32)static PyObject *simd__intrin_divisor_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u32}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_u32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vu32x3 = npyv_divisor_u32( arg.data.u32
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vu32x3 }; return simd_arg_to_obj(&ret);
}
3708SIMD_IMPL_INTRIN_2(divc_u32, vu32, vu32, vu32x3)static PyObject *simd__intrin_divc_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_divc_u32( arg1.data.vu32, arg2
.data.vu32x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu32
}; return simd_arg_to_obj(&ret); }
3709#endif // intdiv_sup
3710
3711#if 0
3712#line 367
3713SIMD_IMPL_INTRIN_3(muladd_u32, vu32, vu32, vu32, vu32)static PyObject *simd__intrin_muladd_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; simd_arg arg3 = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu32 = npyv_muladd_u32
( arg1.data.vu32, arg2.data.vu32, arg3.data.vu32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3714
3715#line 367
3716SIMD_IMPL_INTRIN_3(mulsub_u32, vu32, vu32, vu32, vu32)static PyObject *simd__intrin_mulsub_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; simd_arg arg3 = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu32 = npyv_mulsub_u32
( arg1.data.vu32, arg2.data.vu32, arg3.data.vu32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3717
3718#line 367
3719SIMD_IMPL_INTRIN_3(nmuladd_u32, vu32, vu32, vu32, vu32)static PyObject *simd__intrin_nmuladd_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; simd_arg arg3 = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu32 = npyv_nmuladd_u32
( arg1.data.vu32, arg2.data.vu32, arg3.data.vu32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3720
3721#line 367
3722SIMD_IMPL_INTRIN_3(nmulsub_u32, vu32, vu32, vu32, vu32)static PyObject *simd__intrin_nmulsub_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; simd_arg arg3 = {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu32 = npyv_nmulsub_u32
( arg1.data.vu32, arg2.data.vu32, arg3.data.vu32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3723
3724#endif // fused_sup
3725
3726#if 1
3727SIMD_IMPL_INTRIN_1(sum_u32, u32, vu32)static PyObject *simd__intrin_sum_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"sum_u32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.u32 = npyv_sum_u32( arg.data.vu32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_u32
}; return simd_arg_to_obj(&ret); }
3728#endif // sum_sup
3729
3730#if 0
3731SIMD_IMPL_INTRIN_1(sumup_u32, u32, vu32)static PyObject *simd__intrin_sumup_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_u32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.u32 = npyv_sumup_u32( arg.data.vu32 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_u32 }; return simd_arg_to_obj(&ret); }
3732#endif // sumup_sup
3733
3734/***************************
3735 * Math
3736 ***************************/
3737#if 0
3738#line 386
3739SIMD_IMPL_INTRIN_1(sqrt_u32, vu32, vu32)static PyObject *simd__intrin_sqrt_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_u32", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu32 = npyv_sqrt_u32( arg.data.vu32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu32
}; return simd_arg_to_obj(&ret); }
3740
3741#line 386
3742SIMD_IMPL_INTRIN_1(recip_u32, vu32, vu32)static PyObject *simd__intrin_recip_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"recip_u32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu32 = npyv_recip_u32( arg.data.vu32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3743
3744#line 386
3745SIMD_IMPL_INTRIN_1(abs_u32, vu32, vu32)static PyObject *simd__intrin_abs_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"abs_u32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu32 = npyv_abs_u32( arg.data.vu32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu32
}; return simd_arg_to_obj(&ret); }
3746
3747#line 386
3748SIMD_IMPL_INTRIN_1(square_u32, vu32, vu32)static PyObject *simd__intrin_square_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&:"
"square_u32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vu32 = npyv_square_u32( arg.data.vu32 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu32 }; return simd_arg_to_obj(&ret); }
3749
3750#endif
3751
3752#line 393
3753SIMD_IMPL_INTRIN_2(max_u32, vu32, vu32, vu32)static PyObject *simd__intrin_max_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""max_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_max_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3754
3755#line 393
3756SIMD_IMPL_INTRIN_2(min_u32, vu32, vu32, vu32)static PyObject *simd__intrin_min_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""min_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_min_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3757
3758
3759#if 0
3760#line 400
3761SIMD_IMPL_INTRIN_2(maxp_u32, vu32, vu32, vu32)static PyObject *simd__intrin_maxp_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_maxp_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3762
3763#line 400
3764SIMD_IMPL_INTRIN_2(minp_u32, vu32, vu32, vu32)static PyObject *simd__intrin_minp_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_u32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu32 = npyv_minp_u32( arg1.data.vu32, arg2
.data.vu32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu32 }; return
simd_arg_to_obj(&ret); }
3765
3766#endif
3767
3768/***************************
3769 * Mask operations
3770 ***************************/
3771#line 410
3772 SIMD_IMPL_INTRIN_4(ifadd_u32, vu32, vb32, vu32, vu32, vu32)static PyObject *simd__intrin_ifadd_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; simd_arg arg3 = {.dtype = simd_data_vu32}; simd_arg arg4 =
{.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_u32", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu32 = npyv_ifadd_u32
( arg1.data.vb32, arg2.data.vu32, arg3.data.vu32, arg4.data.vu32
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&
ret); }
3773
3774#line 410
3775 SIMD_IMPL_INTRIN_4(ifsub_u32, vu32, vb32, vu32, vu32, vu32)static PyObject *simd__intrin_ifsub_u32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vu32
}; simd_arg arg3 = {.dtype = simd_data_vu32}; simd_arg arg4 =
{.dtype = simd_data_vu32}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_u32", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu32 = npyv_ifsub_u32
( arg1.data.vb32, arg2.data.vu32, arg3.data.vu32, arg4.data.vu32
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&
ret); }
3776
3777
3778#endif // simd_sup
3779
3780#line 34
3781#if 1
3782/***************************
3783 * Memory
3784 ***************************/
3785#line 41
3786SIMD_IMPL_INTRIN_1(load_s32, vs32, qs32)static PyObject *simd__intrin_load_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs32}; if (!PyArg_ParseTuple( args, "O&:"
"load_s32", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs32 = npyv_load_s32( arg.data.qs32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs32
}; return simd_arg_to_obj(&ret); }
3787
3788#line 41
3789SIMD_IMPL_INTRIN_1(loada_s32, vs32, qs32)static PyObject *simd__intrin_loada_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs32}; if (!PyArg_ParseTuple( args, "O&:"
"loada_s32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs32 = npyv_loada_s32( arg.data.qs32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
3790
3791#line 41
3792SIMD_IMPL_INTRIN_1(loads_s32, vs32, qs32)static PyObject *simd__intrin_loads_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs32}; if (!PyArg_ParseTuple( args, "O&:"
"loads_s32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs32 = npyv_loads_s32( arg.data.qs32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
3793
3794#line 41
3795SIMD_IMPL_INTRIN_1(loadl_s32, vs32, qs32)static PyObject *simd__intrin_loadl_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs32}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_s32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs32 = npyv_loadl_s32( arg.data.qs32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
3796
3797#line 46
3798// special definition due to the nature of store
3799static PyObject *
3800simd__intrin_store_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3801{
3802 simd_arg seq_arg = {.dtype = simd_data_qs32};
3803 simd_arg vec_arg = {.dtype = simd_data_vs32};
3804 if (!PyArg_ParseTuple(
3805 args, "O&O&:store_s32",
3806 simd_arg_converter, &seq_arg,
3807 simd_arg_converter, &vec_arg
3808 )) {
3809 return NULL((void*)0);
3810 }
3811 npyv_store_s32(seq_arg.data.qs32, vec_arg.data.vs32);
3812 // write-back
3813 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
3814 simd_arg_free(&seq_arg);
3815 return NULL((void*)0);
3816 }
3817 simd_arg_free(&seq_arg);
3818 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3819}
3820
3821#line 46
3822// special definition due to the nature of storea
3823static PyObject *
3824simd__intrin_storea_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3825{
3826 simd_arg seq_arg = {.dtype = simd_data_qs32};
3827 simd_arg vec_arg = {.dtype = simd_data_vs32};
3828 if (!PyArg_ParseTuple(
3829 args, "O&O&:storea_s32",
3830 simd_arg_converter, &seq_arg,
3831 simd_arg_converter, &vec_arg
3832 )) {
3833 return NULL((void*)0);
3834 }
3835 npyv_storea_s32(seq_arg.data.qs32, vec_arg.data.vs32);
3836 // write-back
3837 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
3838 simd_arg_free(&seq_arg);
3839 return NULL((void*)0);
3840 }
3841 simd_arg_free(&seq_arg);
3842 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3843}
3844
3845#line 46
3846// special definition due to the nature of stores
3847static PyObject *
3848simd__intrin_stores_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3849{
3850 simd_arg seq_arg = {.dtype = simd_data_qs32};
3851 simd_arg vec_arg = {.dtype = simd_data_vs32};
3852 if (!PyArg_ParseTuple(
3853 args, "O&O&:stores_s32",
3854 simd_arg_converter, &seq_arg,
3855 simd_arg_converter, &vec_arg
3856 )) {
3857 return NULL((void*)0);
3858 }
3859 npyv_stores_s32(seq_arg.data.qs32, vec_arg.data.vs32);
3860 // write-back
3861 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
3862 simd_arg_free(&seq_arg);
3863 return NULL((void*)0);
3864 }
3865 simd_arg_free(&seq_arg);
3866 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3867}
3868
3869#line 46
3870// special definition due to the nature of storel
3871static PyObject *
3872simd__intrin_storel_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3873{
3874 simd_arg seq_arg = {.dtype = simd_data_qs32};
3875 simd_arg vec_arg = {.dtype = simd_data_vs32};
3876 if (!PyArg_ParseTuple(
3877 args, "O&O&:storel_s32",
3878 simd_arg_converter, &seq_arg,
3879 simd_arg_converter, &vec_arg
3880 )) {
3881 return NULL((void*)0);
3882 }
3883 npyv_storel_s32(seq_arg.data.qs32, vec_arg.data.vs32);
3884 // write-back
3885 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
3886 simd_arg_free(&seq_arg);
3887 return NULL((void*)0);
3888 }
3889 simd_arg_free(&seq_arg);
3890 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3891}
3892
3893#line 46
3894// special definition due to the nature of storeh
3895static PyObject *
3896simd__intrin_storeh_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3897{
3898 simd_arg seq_arg = {.dtype = simd_data_qs32};
3899 simd_arg vec_arg = {.dtype = simd_data_vs32};
3900 if (!PyArg_ParseTuple(
3901 args, "O&O&:storeh_s32",
3902 simd_arg_converter, &seq_arg,
3903 simd_arg_converter, &vec_arg
3904 )) {
3905 return NULL((void*)0);
3906 }
3907 npyv_storeh_s32(seq_arg.data.qs32, vec_arg.data.vs32);
3908 // write-back
3909 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
3910 simd_arg_free(&seq_arg);
3911 return NULL((void*)0);
3912 }
3913 simd_arg_free(&seq_arg);
3914 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3915}
3916
3917
3918/****************************************
3919 * Non-contiguous/Partial Memory access
3920 ****************************************/
3921#if 1
3922// Partial Load
3923SIMD_IMPL_INTRIN_3(load_till_s32, vs32, qs32, u32, s32)static PyObject *simd__intrin_load_till_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs32}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_s32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs32 = npyv_load_till_s32
( arg1.data.qs32, arg2.data.u32, arg3.data.s32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
3924SIMD_IMPL_INTRIN_2(load_tillz_s32, vs32, qs32, u32)static PyObject *simd__intrin_load_tillz_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs32}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_s32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs32 = npyv_load_tillz_s32
( arg1.data.qs32, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
3925
3926// Partial Store
3927static PyObject *
3928simd__intrin_store_till_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3929{
3930 simd_arg seq_arg = {.dtype = simd_data_qs32};
3931 simd_arg nlane_arg = {.dtype = simd_data_u32};
3932 simd_arg vec_arg = {.dtype = simd_data_vs32};
3933 if (!PyArg_ParseTuple(
3934 args, "O&O&O&:store_till_s32",
3935 simd_arg_converter, &seq_arg,
3936 simd_arg_converter, &nlane_arg,
3937 simd_arg_converter, &vec_arg
3938 )) {
3939 return NULL((void*)0);
3940 }
3941 npyv_store_till_s32(
3942 seq_arg.data.qs32, nlane_arg.data.u32, vec_arg.data.vs32
3943 );
3944 // write-back
3945 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
3946 simd_arg_free(&seq_arg);
3947 return NULL((void*)0);
3948 }
3949 simd_arg_free(&seq_arg);
3950 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
3951}
3952
3953// Non-contiguous Load
3954#line 112
3955static PyObject *
3956simd__intrin_loadn_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
3957{
3958 simd_arg seq_arg = {.dtype = simd_data_qs32};
3959 simd_arg stride_arg = {.dtype = simd_data_s64};
3960#if 0
3961 simd_arg nlane_arg = {.dtype = simd_data_u32};
3962#endif // till
3963#if 0
3964 simd_arg fill_arg = {.dtype = simd_data_s32};
3965#endif
3966 if (!PyArg_ParseTuple(
3967 args, "O&O&:loadn_s32",
3968 simd_arg_converter, &seq_arg,
3969 simd_arg_converter, &stride_arg
3970#if 0
3971 ,simd_arg_converter, &nlane_arg
3972#endif
3973#if 0
3974 ,simd_arg_converter, &fill_arg
3975#endif
3976 )) {
3977 return NULL((void*)0);
3978 }
3979 npyv_lanetype_s32 *seq_ptr = seq_arg.data.qs32;
3980 npy_intp stride = (npy_intp)stride_arg.data.s64;
3981 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
3982 Py_ssize_t min_seq_len = stride * npyv_nlanes_s324;
3983 if (stride < 0) {
3984 seq_ptr += cur_seq_len -1;
3985 min_seq_len = -min_seq_len;
3986 }
3987 if (cur_seq_len < min_seq_len) {
3988 PyErr_Format(PyExc_ValueError,
3989 "loadn_s32(), according to provided stride %d, the "
3990 "minimum acceptable size of the required sequence is %d, given(%d)",
3991 stride, min_seq_len, cur_seq_len
3992 );
3993 goto err;
3994 }
3995 npyv_s32 rvec = npyv_loadn_s32(
3996 seq_ptr, stride
3997 #if 0
3998 , nlane_arg.data.u32
3999 #endif
4000 #if 0
4001 , fill_arg.data.s32
4002 #endif
4003 );
4004 simd_arg ret = {
4005 .dtype = simd_data_vs32, .data = {.vs32=rvec}
4006 };
4007 simd_arg_free(&seq_arg);
4008 return simd_arg_to_obj(&ret);
4009err:
4010 simd_arg_free(&seq_arg);
4011 return NULL((void*)0);
4012}
4013
4014#line 112
4015static PyObject *
4016simd__intrin_loadn_till_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4017{
4018 simd_arg seq_arg = {.dtype = simd_data_qs32};
4019 simd_arg stride_arg = {.dtype = simd_data_s64};
4020#if 1
4021 simd_arg nlane_arg = {.dtype = simd_data_u32};
4022#endif // till
4023#if 1
4024 simd_arg fill_arg = {.dtype = simd_data_s32};
4025#endif
4026 if (!PyArg_ParseTuple(
4027 args, "O&O&O&O&:loadn_till_s32",
4028 simd_arg_converter, &seq_arg,
4029 simd_arg_converter, &stride_arg
4030#if 1
4031 ,simd_arg_converter, &nlane_arg
4032#endif
4033#if 1
4034 ,simd_arg_converter, &fill_arg
4035#endif
4036 )) {
4037 return NULL((void*)0);
4038 }
4039 npyv_lanetype_s32 *seq_ptr = seq_arg.data.qs32;
4040 npy_intp stride = (npy_intp)stride_arg.data.s64;
4041 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4042 Py_ssize_t min_seq_len = stride * npyv_nlanes_s324;
4043 if (stride < 0) {
4044 seq_ptr += cur_seq_len -1;
4045 min_seq_len = -min_seq_len;
4046 }
4047 if (cur_seq_len < min_seq_len) {
4048 PyErr_Format(PyExc_ValueError,
4049 "loadn_till_s32(), according to provided stride %d, the "
4050 "minimum acceptable size of the required sequence is %d, given(%d)",
4051 stride, min_seq_len, cur_seq_len
4052 );
4053 goto err;
4054 }
4055 npyv_s32 rvec = npyv_loadn_till_s32(
4056 seq_ptr, stride
4057 #if 1
4058 , nlane_arg.data.u32
4059 #endif
4060 #if 1
4061 , fill_arg.data.s32
4062 #endif
4063 );
4064 simd_arg ret = {
4065 .dtype = simd_data_vs32, .data = {.vs32=rvec}
4066 };
4067 simd_arg_free(&seq_arg);
4068 return simd_arg_to_obj(&ret);
4069err:
4070 simd_arg_free(&seq_arg);
4071 return NULL((void*)0);
4072}
4073
4074#line 112
4075static PyObject *
4076simd__intrin_loadn_tillz_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4077{
4078 simd_arg seq_arg = {.dtype = simd_data_qs32};
4079 simd_arg stride_arg = {.dtype = simd_data_s64};
4080#if 1
4081 simd_arg nlane_arg = {.dtype = simd_data_u32};
4082#endif // till
4083#if 0
4084 simd_arg fill_arg = {.dtype = simd_data_s32};
4085#endif
4086 if (!PyArg_ParseTuple(
4087 args, "O&O&O&:loadn_tillz_s32",
4088 simd_arg_converter, &seq_arg,
4089 simd_arg_converter, &stride_arg
4090#if 1
4091 ,simd_arg_converter, &nlane_arg
4092#endif
4093#if 0
4094 ,simd_arg_converter, &fill_arg
4095#endif
4096 )) {
4097 return NULL((void*)0);
4098 }
4099 npyv_lanetype_s32 *seq_ptr = seq_arg.data.qs32;
4100 npy_intp stride = (npy_intp)stride_arg.data.s64;
4101 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4102 Py_ssize_t min_seq_len = stride * npyv_nlanes_s324;
4103 if (stride < 0) {
4104 seq_ptr += cur_seq_len -1;
4105 min_seq_len = -min_seq_len;
4106 }
4107 if (cur_seq_len < min_seq_len) {
4108 PyErr_Format(PyExc_ValueError,
4109 "loadn_tillz_s32(), according to provided stride %d, the "
4110 "minimum acceptable size of the required sequence is %d, given(%d)",
4111 stride, min_seq_len, cur_seq_len
4112 );
4113 goto err;
4114 }
4115 npyv_s32 rvec = npyv_loadn_tillz_s32(
4116 seq_ptr, stride
4117 #if 1
4118 , nlane_arg.data.u32
4119 #endif
4120 #if 0
4121 , fill_arg.data.s32
4122 #endif
4123 );
4124 simd_arg ret = {
4125 .dtype = simd_data_vs32, .data = {.vs32=rvec}
4126 };
4127 simd_arg_free(&seq_arg);
4128 return simd_arg_to_obj(&ret);
4129err:
4130 simd_arg_free(&seq_arg);
4131 return NULL((void*)0);
4132}
4133
4134
4135// Non-contiguous Store
4136#line 178
4137static PyObject *
4138simd__intrin_storen_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4139{
4140 simd_arg seq_arg = {.dtype = simd_data_qs32};
4141 simd_arg stride_arg = {.dtype = simd_data_s64};
4142 simd_arg vec_arg = {.dtype = simd_data_vs32};
4143#if 0
4144 simd_arg nlane_arg = {.dtype = simd_data_u32};
4145#endif
4146 if (!PyArg_ParseTuple(
4147 args, "O&O&O&:storen_s32",
4148 simd_arg_converter, &seq_arg,
4149 simd_arg_converter, &stride_arg
4150#if 0
4151 ,simd_arg_converter, &nlane_arg
4152#endif
4153 ,simd_arg_converter, &vec_arg
4154 )) {
4155 return NULL((void*)0);
4156 }
4157 npyv_lanetype_s32 *seq_ptr = seq_arg.data.qs32;
4158 npy_intp stride = (npy_intp)stride_arg.data.s64;
4159 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4160 Py_ssize_t min_seq_len = stride * npyv_nlanes_s324;
4161 if (stride < 0) {
4162 seq_ptr += cur_seq_len -1;
4163 min_seq_len = -min_seq_len;
4164 }
4165 // overflow guard
4166 if (cur_seq_len < min_seq_len) {
4167 PyErr_Format(PyExc_ValueError,
4168 "storen_s32(), according to provided stride %d, the"
4169 "minimum acceptable size of the required sequence is %d, given(%d)",
4170 stride, min_seq_len, cur_seq_len
4171 );
4172 goto err;
4173 }
4174 npyv_storen_s32(
4175 seq_ptr, stride
4176 #if 0
4177 ,nlane_arg.data.u32
4178 #endif
4179 ,vec_arg.data.vs32
4180 );
4181 // write-back
4182 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
4183 goto err;
4184 }
4185 simd_arg_free(&seq_arg);
4186 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4187err:
4188 simd_arg_free(&seq_arg);
4189 return NULL((void*)0);
4190}
4191
4192#line 178
4193static PyObject *
4194simd__intrin_storen_till_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4195{
4196 simd_arg seq_arg = {.dtype = simd_data_qs32};
4197 simd_arg stride_arg = {.dtype = simd_data_s64};
4198 simd_arg vec_arg = {.dtype = simd_data_vs32};
4199#if 1
4200 simd_arg nlane_arg = {.dtype = simd_data_u32};
4201#endif
4202 if (!PyArg_ParseTuple(
4203 args, "O&O&O&O&:storen_s32",
4204 simd_arg_converter, &seq_arg,
4205 simd_arg_converter, &stride_arg
4206#if 1
4207 ,simd_arg_converter, &nlane_arg
4208#endif
4209 ,simd_arg_converter, &vec_arg
4210 )) {
4211 return NULL((void*)0);
4212 }
4213 npyv_lanetype_s32 *seq_ptr = seq_arg.data.qs32;
4214 npy_intp stride = (npy_intp)stride_arg.data.s64;
4215 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4216 Py_ssize_t min_seq_len = stride * npyv_nlanes_s324;
4217 if (stride < 0) {
4218 seq_ptr += cur_seq_len -1;
4219 min_seq_len = -min_seq_len;
4220 }
4221 // overflow guard
4222 if (cur_seq_len < min_seq_len) {
4223 PyErr_Format(PyExc_ValueError,
4224 "storen_till_s32(), according to provided stride %d, the"
4225 "minimum acceptable size of the required sequence is %d, given(%d)",
4226 stride, min_seq_len, cur_seq_len
4227 );
4228 goto err;
4229 }
4230 npyv_storen_till_s32(
4231 seq_ptr, stride
4232 #if 1
4233 ,nlane_arg.data.u32
4234 #endif
4235 ,vec_arg.data.vs32
4236 );
4237 // write-back
4238 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs32, simd_data_qs32)) {
4239 goto err;
4240 }
4241 simd_arg_free(&seq_arg);
4242 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4243err:
4244 simd_arg_free(&seq_arg);
4245 return NULL((void*)0);
4246}
4247
4248#endif // 1
4249
4250/***************************
4251 * Misc
4252 ***************************/
4253SIMD_IMPL_INTRIN_0(zero_s32, vs32)static PyObject *simd__intrin_zero_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_s32") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vs32, .data = {.vs32 = _mm_setzero_si128()}
, }; return simd_arg_to_obj(&a); }
4254SIMD_IMPL_INTRIN_1(setall_s32, vs32, s32)static PyObject *simd__intrin_setall_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s32}; if (!PyArg_ParseTuple( args, "O&:"
"setall_s32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vs32 = _mm_set1_epi32((int)(arg.data.s32
))}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4255SIMD_IMPL_INTRIN_3(select_s32, vs32, vb32, vs32, vs32)static PyObject *simd__intrin_select_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; simd_arg arg3 = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs32 = npyv_select_u8
( arg1.data.vb32, arg2.data.vs32, arg3.data.vs32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4256
4257#line 246
4258#if 1
4259SIMD_IMPL_INTRIN_1(reinterpret_u8_s32, vu8, vs32)static PyObject *simd__intrin_reinterpret_u8_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = arg.data.vs32
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu8 }; return simd_arg_to_obj(&ret); }
4260#endif // simd_sup2
4261
4262#line 246
4263#if 1
4264SIMD_IMPL_INTRIN_1(reinterpret_s8_s32, vs8, vs32)static PyObject *simd__intrin_reinterpret_s8_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = arg.data.vs32
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs8 }; return simd_arg_to_obj(&ret); }
4265#endif // simd_sup2
4266
4267#line 246
4268#if 1
4269SIMD_IMPL_INTRIN_1(reinterpret_u16_s32, vu16, vs32)static PyObject *simd__intrin_reinterpret_u16_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vs32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
4270#endif // simd_sup2
4271
4272#line 246
4273#if 1
4274SIMD_IMPL_INTRIN_1(reinterpret_s16_s32, vs16, vs32)static PyObject *simd__intrin_reinterpret_s16_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vs32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
4275#endif // simd_sup2
4276
4277#line 246
4278#if 1
4279SIMD_IMPL_INTRIN_1(reinterpret_u32_s32, vu32, vs32)static PyObject *simd__intrin_reinterpret_u32_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vs32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret)
; }
4280#endif // simd_sup2
4281
4282#line 246
4283#if 1
4284SIMD_IMPL_INTRIN_1(reinterpret_s32_s32, vs32, vs32)static PyObject *simd__intrin_reinterpret_s32_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vs32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret)
; }
4285#endif // simd_sup2
4286
4287#line 246
4288#if 1
4289SIMD_IMPL_INTRIN_1(reinterpret_u64_s32, vu64, vs32)static PyObject *simd__intrin_reinterpret_u64_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vs32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret)
; }
4290#endif // simd_sup2
4291
4292#line 246
4293#if 1
4294SIMD_IMPL_INTRIN_1(reinterpret_s64_s32, vs64, vs32)static PyObject *simd__intrin_reinterpret_s64_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vs32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret)
; }
4295#endif // simd_sup2
4296
4297#line 246
4298#if 1
4299SIMD_IMPL_INTRIN_1(reinterpret_f32_s32, vf32, vs32)static PyObject *simd__intrin_reinterpret_f32_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vs32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
4300#endif // simd_sup2
4301
4302#line 246
4303#if NPY_SIMD_F641
4304SIMD_IMPL_INTRIN_1(reinterpret_f64_s32, vf64, vs32)static PyObject *simd__intrin_reinterpret_f64_s32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_s32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vs32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
4305#endif // simd_sup2
4306
4307
4308/**
4309 * special definition due to the nature of intrinsics
4310 * npyv_setf_s32 and npy_set_s32.
4311*/
4312#line 258
4313static PyObject *
4314simd__intrin_setf_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4315{
4316 npyv_lanetype_s32 *data = simd_sequence_from_iterable(args, simd_data_qs32, npyv_nlanes_s324);
4317 if (data == NULL((void*)0)) {
4318 return NULL((void*)0);
4319 }
4320 simd_data r = {.vs32 = npyv_setf_s32(npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4321 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4322 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4323 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4324 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4325 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4326 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4327 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4328 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4329 data[64] // for setfnpyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
4330 )npyv__setr_epi32((int)(data[1]), (int)(data[2]), (int)(data[3
]), (int)(data[4]))
};
4331 simd_sequence_free(data);
4332 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs32);
4333}
4334
4335#line 258
4336static PyObject *
4337simd__intrin_set_s32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4338{
4339 npyv_lanetype_s32 *data = simd_sequence_from_iterable(args, simd_data_qs32, npyv_nlanes_s324);
4340 if (data == NULL((void*)0)) {
4341 return NULL((void*)0);
4342 }
4343 simd_data r = {.vs32 = npyv_set_s32(npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4344 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4345 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4346 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4347 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4348 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4349 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4350 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4351 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4352 data[64] // for setfnpyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
4353 )npyv__setr_epi32((int)(data[0]), (int)(data[1]), (int)(data[2
]), (int)(data[3]))
};
4354 simd_sequence_free(data);
4355 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs32);
4356}
4357
4358
4359/***************************
4360 * Reorder
4361 ***************************/
4362#line 287
4363SIMD_IMPL_INTRIN_2(combinel_s32, vs32, vs32, vs32)static PyObject *simd__intrin_combinel_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_s32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs32 = _mm_unpacklo_epi64
( arg1.data.vs32, arg2.data.vs32 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4364
4365#line 287
4366SIMD_IMPL_INTRIN_2(combineh_s32, vs32, vs32, vs32)static PyObject *simd__intrin_combineh_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_s32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs32 = _mm_unpackhi_epi64
( arg1.data.vs32, arg2.data.vs32 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4367
4368
4369#line 293
4370SIMD_IMPL_INTRIN_2(combine_s32, vs32x2, vs32, vs32)static PyObject *simd__intrin_combine_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_s32",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs32x2 = npyv__combine
( arg1.data.vs32, arg2.data.vs32 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs32x2 }; return simd_arg_to_obj(&ret); }
4371
4372#line 293
4373SIMD_IMPL_INTRIN_2(zip_s32, vs32x2, vs32, vs32)static PyObject *simd__intrin_zip_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32x2 = npyv_zip_s32( arg1.data.vs32,
arg2.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs32x2
}; return simd_arg_to_obj(&ret); }
4374
4375
4376#if 1
4377SIMD_IMPL_INTRIN_1(rev64_s32, vs32, vs32)static PyObject *simd__intrin_rev64_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_s32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs32 = npyv_rev64_u32( arg.data.vs32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4378#endif
4379
4380/***************************
4381 * Operators
4382 ***************************/
4383#if 31 > 0
4384SIMD_IMPL_INTRIN_2(shl_s32, vs32, vs32, u8)static PyObject *simd__intrin_shl_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = _mm_sll_epi32(arg1.data.vs32, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs32
}; return simd_arg_to_obj(&ret); }
4385SIMD_IMPL_INTRIN_2(shr_s32, vs32, vs32, u8)static PyObject *simd__intrin_shr_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = _mm_sra_epi32(arg1.data.vs32, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs32
}; return simd_arg_to_obj(&ret); }
4386// immediate constant
4387SIMD_IMPL_INTRIN_2IMM(shli_s32, vs32, vs32, 31)static PyObject *simd__intrin_shli_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs32 = 0 == arg2.data.u8
? _mm_slli_epi32(arg1.data.vs32, 0) : 1 == arg2.data.u8 ? _mm_slli_epi32
(arg1.data.vs32, 1) : 2 == arg2.data.u8 ? _mm_slli_epi32(arg1
.data.vs32, 2) : 3 == arg2.data.u8 ? _mm_slli_epi32(arg1.data
.vs32, 3) : 4 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32
, 4) : 5 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 5) :
6 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 6) : 7 ==
arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 7) : 8 == arg2
.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 8) : 9 == arg2.data
.u8 ? _mm_slli_epi32(arg1.data.vs32, 9) : 10 == arg2.data.u8 ?
_mm_slli_epi32(arg1.data.vs32, 10) : 11 == arg2.data.u8 ? _mm_slli_epi32
(arg1.data.vs32, 11) : 12 == arg2.data.u8 ? _mm_slli_epi32(arg1
.data.vs32, 12) : 13 == arg2.data.u8 ? _mm_slli_epi32(arg1.data
.vs32, 13) : 14 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32
, 14) : 15 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 15
) : 16 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 16) :
17 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 17) : 18
== arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 18) : 19 ==
arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 19) : 20 == arg2
.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 20) : 21 == arg2.data
.u8 ? _mm_slli_epi32(arg1.data.vs32, 21) : 22 == arg2.data.u8
? _mm_slli_epi32(arg1.data.vs32, 22) : 23 == arg2.data.u8 ? _mm_slli_epi32
(arg1.data.vs32, 23) : 24 == arg2.data.u8 ? _mm_slli_epi32(arg1
.data.vs32, 24) : 25 == arg2.data.u8 ? _mm_slli_epi32(arg1.data
.vs32, 25) : 26 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32
, 26) : 27 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 27
) : 28 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 28) :
29 == arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 29) : 30
== arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 30) : 31 ==
arg2.data.u8 ? _mm_slli_epi32(arg1.data.vs32, 31) : data.vs32
; simd_arg_free(&arg1); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4388SIMD_IMPL_INTRIN_2IMM(shri_s32, vs32, vs32, 32)static PyObject *simd__intrin_shri_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs32 = 1 == arg2.data.u8
? _mm_srai_epi32(arg1.data.vs32, 1) : 2 == arg2.data.u8 ? _mm_srai_epi32
(arg1.data.vs32, 2) : 3 == arg2.data.u8 ? _mm_srai_epi32(arg1
.data.vs32, 3) : 4 == arg2.data.u8 ? _mm_srai_epi32(arg1.data
.vs32, 4) : 5 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32
, 5) : 6 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 6) :
7 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 7) : 8 ==
arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 8) : 9 == arg2
.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 9) : 10 == arg2.data
.u8 ? _mm_srai_epi32(arg1.data.vs32, 10) : 11 == arg2.data.u8
? _mm_srai_epi32(arg1.data.vs32, 11) : 12 == arg2.data.u8 ? _mm_srai_epi32
(arg1.data.vs32, 12) : 13 == arg2.data.u8 ? _mm_srai_epi32(arg1
.data.vs32, 13) : 14 == arg2.data.u8 ? _mm_srai_epi32(arg1.data
.vs32, 14) : 15 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32
, 15) : 16 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 16
) : 17 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 17) :
18 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 18) : 19
== arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 19) : 20 ==
arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 20) : 21 == arg2
.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 21) : 22 == arg2.data
.u8 ? _mm_srai_epi32(arg1.data.vs32, 22) : 23 == arg2.data.u8
? _mm_srai_epi32(arg1.data.vs32, 23) : 24 == arg2.data.u8 ? _mm_srai_epi32
(arg1.data.vs32, 24) : 25 == arg2.data.u8 ? _mm_srai_epi32(arg1
.data.vs32, 25) : 26 == arg2.data.u8 ? _mm_srai_epi32(arg1.data
.vs32, 26) : 27 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32
, 27) : 28 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 28
) : 29 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 29) :
30 == arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 30) : 31
== arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 31) : 32 ==
arg2.data.u8 ? _mm_srai_epi32(arg1.data.vs32, 32) : data.vs32
; simd_arg_free(&arg1); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4389#endif // shl_imm
4390
4391#line 314
4392SIMD_IMPL_INTRIN_2(and_s32, vs32, vs32, vs32)static PyObject *simd__intrin_and_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""and_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = _mm_and_si128( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4393
4394#line 314
4395SIMD_IMPL_INTRIN_2(or_s32, vs32, vs32, vs32)static PyObject *simd__intrin_or_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""or_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = _mm_or_si128( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4396
4397#line 314
4398SIMD_IMPL_INTRIN_2(xor_s32, vs32, vs32, vs32)static PyObject *simd__intrin_xor_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = _mm_xor_si128( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4399
4400
4401SIMD_IMPL_INTRIN_1(not_s32, vs32, vs32)static PyObject *simd__intrin_not_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"not_s32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs32 = _mm_xor_si128(arg.data.vs32, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret)
; }
4402
4403#line 322
4404SIMD_IMPL_INTRIN_2(cmpeq_s32, vb32, vs32, vs32)static PyObject *simd__intrin_cmpeq_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_cmpeq_epi32( arg1.data.vs32
, arg2.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
4405
4406#line 322
4407SIMD_IMPL_INTRIN_2(cmpneq_s32, vb32, vs32, vs32)static PyObject *simd__intrin_cmpneq_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_xor_si128(_mm_cmpeq_epi32(arg1
.data.vs32, arg2.data.vs32), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb32 }; return simd_arg_to_obj(&
ret); }
4408
4409#line 322
4410SIMD_IMPL_INTRIN_2(cmpgt_s32, vb32, vs32, vs32)static PyObject *simd__intrin_cmpgt_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_cmpgt_epi32( arg1.data.vs32
, arg2.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
4411
4412#line 322
4413SIMD_IMPL_INTRIN_2(cmpge_s32, vb32, vs32, vs32)static PyObject *simd__intrin_cmpge_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_xor_si128(_mm_cmpgt_epi32(arg2
.data.vs32, arg1.data.vs32), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb32 }; return simd_arg_to_obj(&
ret); }
4414
4415#line 322
4416SIMD_IMPL_INTRIN_2(cmplt_s32, vb32, vs32, vs32)static PyObject *simd__intrin_cmplt_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_cmpgt_epi32(arg2.data.vs32,
arg1.data.vs32)}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
4417
4418#line 322
4419SIMD_IMPL_INTRIN_2(cmple_s32, vb32, vs32, vs32)static PyObject *simd__intrin_cmple_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_xor_si128(_mm_cmpgt_epi32(arg1
.data.vs32, arg2.data.vs32), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb32 }; return simd_arg_to_obj(&
ret); }
4420
4421
4422/***************************
4423 * Conversion
4424 ***************************/
4425SIMD_IMPL_INTRIN_1(cvt_s32_b32, vs32, vb32)static PyObject *simd__intrin_cvt_s32_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb32}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_s32_b32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vs32 = arg.data.vb32}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs32
}; return simd_arg_to_obj(&ret); }
4426SIMD_IMPL_INTRIN_1(cvt_b32_s32, vb32, vs32)static PyObject *simd__intrin_cvt_b32_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b32_s32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb32 = arg.data.vs32}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
4427#if 0
4428SIMD_IMPL_INTRIN_1(expand_s32_s32, vs32x2, vs32)static PyObject *simd__intrin_expand_s32_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"expand_s32_s32", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vs32x2 = npyv_expand_s32_s32( arg.data
.vs32 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32x2 }; return simd_arg_to_obj(&ret
); }
4429#endif // expand_sup
4430/***************************
4431 * Arithmetic
4432 ***************************/
4433#line 339
4434SIMD_IMPL_INTRIN_2(add_s32, vs32, vs32, vs32)static PyObject *simd__intrin_add_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""add_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = _mm_add_epi32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4435
4436#line 339
4437SIMD_IMPL_INTRIN_2(sub_s32, vs32, vs32, vs32)static PyObject *simd__intrin_sub_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = _mm_sub_epi32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4438
4439
4440#if 0
4441#line 346
4442SIMD_IMPL_INTRIN_2(adds_s32, vs32, vs32, vs32)static PyObject *simd__intrin_adds_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_adds_s32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4443
4444#line 346
4445SIMD_IMPL_INTRIN_2(subs_s32, vs32, vs32, vs32)static PyObject *simd__intrin_subs_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_subs_s32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4446
4447#endif // sat_sup
4448
4449#if 1
4450SIMD_IMPL_INTRIN_2(mul_s32, vs32, vs32, vs32)static PyObject *simd__intrin_mul_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_mul_u32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4451#endif // mul_sup
4452
4453#if 0
4454SIMD_IMPL_INTRIN_2(div_s32, vs32, vs32, vs32)static PyObject *simd__intrin_div_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""div_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_div_s32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4455#endif // div_sup
4456
4457#if 1
4458SIMD_IMPL_INTRIN_1(divisor_s32, vs32x3, s32)static PyObject *simd__intrin_divisor_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s32}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_s32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vs32x3 = npyv_divisor_s32( arg.data.s32
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vs32x3 }; return simd_arg_to_obj(&ret);
}
4459SIMD_IMPL_INTRIN_2(divc_s32, vs32, vs32, vs32x3)static PyObject *simd__intrin_divc_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_divc_s32( arg1.data.vs32, arg2
.data.vs32x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs32
}; return simd_arg_to_obj(&ret); }
4460#endif // intdiv_sup
4461
4462#if 0
4463#line 367
4464SIMD_IMPL_INTRIN_3(muladd_s32, vs32, vs32, vs32, vs32)static PyObject *simd__intrin_muladd_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; simd_arg arg3 = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs32 = npyv_muladd_s32
( arg1.data.vs32, arg2.data.vs32, arg3.data.vs32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4465
4466#line 367
4467SIMD_IMPL_INTRIN_3(mulsub_s32, vs32, vs32, vs32, vs32)static PyObject *simd__intrin_mulsub_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; simd_arg arg3 = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs32 = npyv_mulsub_s32
( arg1.data.vs32, arg2.data.vs32, arg3.data.vs32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4468
4469#line 367
4470SIMD_IMPL_INTRIN_3(nmuladd_s32, vs32, vs32, vs32, vs32)static PyObject *simd__intrin_nmuladd_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; simd_arg arg3 = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs32 = npyv_nmuladd_s32
( arg1.data.vs32, arg2.data.vs32, arg3.data.vs32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4471
4472#line 367
4473SIMD_IMPL_INTRIN_3(nmulsub_s32, vs32, vs32, vs32, vs32)static PyObject *simd__intrin_nmulsub_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; simd_arg arg3 = {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs32 = npyv_nmulsub_s32
( arg1.data.vs32, arg2.data.vs32, arg3.data.vs32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4474
4475#endif // fused_sup
4476
4477#if 0
4478SIMD_IMPL_INTRIN_1(sum_s32, s32, vs32)static PyObject *simd__intrin_sum_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"sum_s32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.s32 = npyv_sum_s32( arg.data.vs32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_s32
}; return simd_arg_to_obj(&ret); }
4479#endif // sum_sup
4480
4481#if 0
4482SIMD_IMPL_INTRIN_1(sumup_s32, s32, vs32)static PyObject *simd__intrin_sumup_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_s32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.s32 = npyv_sumup_s32( arg.data.vs32 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_s32 }; return simd_arg_to_obj(&ret); }
4483#endif // sumup_sup
4484
4485/***************************
4486 * Math
4487 ***************************/
4488#if 0
4489#line 386
4490SIMD_IMPL_INTRIN_1(sqrt_s32, vs32, vs32)static PyObject *simd__intrin_sqrt_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_s32", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs32 = npyv_sqrt_s32( arg.data.vs32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs32
}; return simd_arg_to_obj(&ret); }
4491
4492#line 386
4493SIMD_IMPL_INTRIN_1(recip_s32, vs32, vs32)static PyObject *simd__intrin_recip_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"recip_s32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs32 = npyv_recip_s32( arg.data.vs32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4494
4495#line 386
4496SIMD_IMPL_INTRIN_1(abs_s32, vs32, vs32)static PyObject *simd__intrin_abs_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"abs_s32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs32 = npyv_abs_s32( arg.data.vs32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs32
}; return simd_arg_to_obj(&ret); }
4497
4498#line 386
4499SIMD_IMPL_INTRIN_1(square_s32, vs32, vs32)static PyObject *simd__intrin_square_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&:"
"square_s32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vs32 = npyv_square_s32( arg.data.vs32 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
4500
4501#endif
4502
4503#line 393
4504SIMD_IMPL_INTRIN_2(max_s32, vs32, vs32, vs32)static PyObject *simd__intrin_max_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""max_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_max_s32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4505
4506#line 393
4507SIMD_IMPL_INTRIN_2(min_s32, vs32, vs32, vs32)static PyObject *simd__intrin_min_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""min_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_min_s32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4508
4509
4510#if 0
4511#line 400
4512SIMD_IMPL_INTRIN_2(maxp_s32, vs32, vs32, vs32)static PyObject *simd__intrin_maxp_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_maxp_s32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4513
4514#line 400
4515SIMD_IMPL_INTRIN_2(minp_s32, vs32, vs32, vs32)static PyObject *simd__intrin_minp_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_s32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs32 = npyv_minp_s32( arg1.data.vs32, arg2
.data.vs32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs32 }; return
simd_arg_to_obj(&ret); }
4516
4517#endif
4518
4519/***************************
4520 * Mask operations
4521 ***************************/
4522#line 410
4523 SIMD_IMPL_INTRIN_4(ifadd_s32, vs32, vb32, vs32, vs32, vs32)static PyObject *simd__intrin_ifadd_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; simd_arg arg3 = {.dtype = simd_data_vs32}; simd_arg arg4 =
{.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_s32", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs32 = npyv_ifadd_s32
( arg1.data.vb32, arg2.data.vs32, arg3.data.vs32, arg4.data.vs32
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&
ret); }
4524
4525#line 410
4526 SIMD_IMPL_INTRIN_4(ifsub_s32, vs32, vb32, vs32, vs32, vs32)static PyObject *simd__intrin_ifsub_s32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vs32
}; simd_arg arg3 = {.dtype = simd_data_vs32}; simd_arg arg4 =
{.dtype = simd_data_vs32}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_s32", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs32 = npyv_ifsub_s32
( arg1.data.vb32, arg2.data.vs32, arg3.data.vs32, arg4.data.vs32
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&
ret); }
4527
4528
4529#endif // simd_sup
4530
4531#line 34
4532#if 1
4533/***************************
4534 * Memory
4535 ***************************/
4536#line 41
4537SIMD_IMPL_INTRIN_1(load_u64, vu64, qu64)static PyObject *simd__intrin_load_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu64}; if (!PyArg_ParseTuple( args, "O&:"
"load_u64", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu64 = npyv_load_u64( arg.data.qu64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu64
}; return simd_arg_to_obj(&ret); }
4538
4539#line 41
4540SIMD_IMPL_INTRIN_1(loada_u64, vu64, qu64)static PyObject *simd__intrin_loada_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu64}; if (!PyArg_ParseTuple( args, "O&:"
"loada_u64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu64 = npyv_loada_u64( arg.data.qu64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
4541
4542#line 41
4543SIMD_IMPL_INTRIN_1(loads_u64, vu64, qu64)static PyObject *simd__intrin_loads_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu64}; if (!PyArg_ParseTuple( args, "O&:"
"loads_u64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu64 = npyv_loads_u64( arg.data.qu64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
4544
4545#line 41
4546SIMD_IMPL_INTRIN_1(loadl_u64, vu64, qu64)static PyObject *simd__intrin_loadl_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qu64}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_u64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu64 = npyv_loadl_u64( arg.data.qu64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
4547
4548#line 46
4549// special definition due to the nature of store
4550static PyObject *
4551simd__intrin_store_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4552{
4553 simd_arg seq_arg = {.dtype = simd_data_qu64};
4554 simd_arg vec_arg = {.dtype = simd_data_vu64};
4555 if (!PyArg_ParseTuple(
4556 args, "O&O&:store_u64",
4557 simd_arg_converter, &seq_arg,
4558 simd_arg_converter, &vec_arg
4559 )) {
4560 return NULL((void*)0);
4561 }
4562 npyv_store_u64(seq_arg.data.qu64, vec_arg.data.vu64);
4563 // write-back
4564 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4565 simd_arg_free(&seq_arg);
4566 return NULL((void*)0);
4567 }
4568 simd_arg_free(&seq_arg);
4569 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4570}
4571
4572#line 46
4573// special definition due to the nature of storea
4574static PyObject *
4575simd__intrin_storea_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4576{
4577 simd_arg seq_arg = {.dtype = simd_data_qu64};
4578 simd_arg vec_arg = {.dtype = simd_data_vu64};
4579 if (!PyArg_ParseTuple(
4580 args, "O&O&:storea_u64",
4581 simd_arg_converter, &seq_arg,
4582 simd_arg_converter, &vec_arg
4583 )) {
4584 return NULL((void*)0);
4585 }
4586 npyv_storea_u64(seq_arg.data.qu64, vec_arg.data.vu64);
4587 // write-back
4588 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4589 simd_arg_free(&seq_arg);
4590 return NULL((void*)0);
4591 }
4592 simd_arg_free(&seq_arg);
4593 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4594}
4595
4596#line 46
4597// special definition due to the nature of stores
4598static PyObject *
4599simd__intrin_stores_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4600{
4601 simd_arg seq_arg = {.dtype = simd_data_qu64};
4602 simd_arg vec_arg = {.dtype = simd_data_vu64};
4603 if (!PyArg_ParseTuple(
4604 args, "O&O&:stores_u64",
4605 simd_arg_converter, &seq_arg,
4606 simd_arg_converter, &vec_arg
4607 )) {
4608 return NULL((void*)0);
4609 }
4610 npyv_stores_u64(seq_arg.data.qu64, vec_arg.data.vu64);
4611 // write-back
4612 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4613 simd_arg_free(&seq_arg);
4614 return NULL((void*)0);
4615 }
4616 simd_arg_free(&seq_arg);
4617 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4618}
4619
4620#line 46
4621// special definition due to the nature of storel
4622static PyObject *
4623simd__intrin_storel_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4624{
4625 simd_arg seq_arg = {.dtype = simd_data_qu64};
4626 simd_arg vec_arg = {.dtype = simd_data_vu64};
4627 if (!PyArg_ParseTuple(
4628 args, "O&O&:storel_u64",
4629 simd_arg_converter, &seq_arg,
4630 simd_arg_converter, &vec_arg
4631 )) {
4632 return NULL((void*)0);
4633 }
4634 npyv_storel_u64(seq_arg.data.qu64, vec_arg.data.vu64);
4635 // write-back
4636 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4637 simd_arg_free(&seq_arg);
4638 return NULL((void*)0);
4639 }
4640 simd_arg_free(&seq_arg);
4641 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4642}
4643
4644#line 46
4645// special definition due to the nature of storeh
4646static PyObject *
4647simd__intrin_storeh_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4648{
4649 simd_arg seq_arg = {.dtype = simd_data_qu64};
4650 simd_arg vec_arg = {.dtype = simd_data_vu64};
4651 if (!PyArg_ParseTuple(
4652 args, "O&O&:storeh_u64",
4653 simd_arg_converter, &seq_arg,
4654 simd_arg_converter, &vec_arg
4655 )) {
4656 return NULL((void*)0);
4657 }
4658 npyv_storeh_u64(seq_arg.data.qu64, vec_arg.data.vu64);
4659 // write-back
4660 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4661 simd_arg_free(&seq_arg);
4662 return NULL((void*)0);
4663 }
4664 simd_arg_free(&seq_arg);
4665 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4666}
4667
4668
4669/****************************************
4670 * Non-contiguous/Partial Memory access
4671 ****************************************/
4672#if 1
4673// Partial Load
4674SIMD_IMPL_INTRIN_3(load_till_u64, vu64, qu64, u32, u64)static PyObject *simd__intrin_load_till_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu64}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_u64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu64 = npyv_load_till_u64
( arg1.data.qu64, arg2.data.u32, arg3.data.u64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
4675SIMD_IMPL_INTRIN_2(load_tillz_u64, vu64, qu64, u32)static PyObject *simd__intrin_load_tillz_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qu64}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_u64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu64 = npyv_load_tillz_u64
( arg1.data.qu64, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
4676
4677// Partial Store
4678static PyObject *
4679simd__intrin_store_till_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4680{
4681 simd_arg seq_arg = {.dtype = simd_data_qu64};
4682 simd_arg nlane_arg = {.dtype = simd_data_u32};
4683 simd_arg vec_arg = {.dtype = simd_data_vu64};
4684 if (!PyArg_ParseTuple(
4685 args, "O&O&O&:store_till_u64",
4686 simd_arg_converter, &seq_arg,
4687 simd_arg_converter, &nlane_arg,
4688 simd_arg_converter, &vec_arg
4689 )) {
4690 return NULL((void*)0);
4691 }
4692 npyv_store_till_u64(
4693 seq_arg.data.qu64, nlane_arg.data.u32, vec_arg.data.vu64
4694 );
4695 // write-back
4696 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4697 simd_arg_free(&seq_arg);
4698 return NULL((void*)0);
4699 }
4700 simd_arg_free(&seq_arg);
4701 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4702}
4703
4704// Non-contiguous Load
4705#line 112
4706static PyObject *
4707simd__intrin_loadn_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4708{
4709 simd_arg seq_arg = {.dtype = simd_data_qu64};
4710 simd_arg stride_arg = {.dtype = simd_data_s64};
4711#if 0
4712 simd_arg nlane_arg = {.dtype = simd_data_u32};
4713#endif // till
4714#if 0
4715 simd_arg fill_arg = {.dtype = simd_data_u64};
4716#endif
4717 if (!PyArg_ParseTuple(
4718 args, "O&O&:loadn_u64",
4719 simd_arg_converter, &seq_arg,
4720 simd_arg_converter, &stride_arg
4721#if 0
4722 ,simd_arg_converter, &nlane_arg
4723#endif
4724#if 0
4725 ,simd_arg_converter, &fill_arg
4726#endif
4727 )) {
4728 return NULL((void*)0);
4729 }
4730 npyv_lanetype_u64 *seq_ptr = seq_arg.data.qu64;
4731 npy_intp stride = (npy_intp)stride_arg.data.s64;
4732 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4733 Py_ssize_t min_seq_len = stride * npyv_nlanes_u642;
4734 if (stride < 0) {
4735 seq_ptr += cur_seq_len -1;
4736 min_seq_len = -min_seq_len;
4737 }
4738 if (cur_seq_len < min_seq_len) {
4739 PyErr_Format(PyExc_ValueError,
4740 "loadn_u64(), according to provided stride %d, the "
4741 "minimum acceptable size of the required sequence is %d, given(%d)",
4742 stride, min_seq_len, cur_seq_len
4743 );
4744 goto err;
4745 }
4746 npyv_u64 rvec = npyv_loadn_u64(
4747 seq_ptr, stride
4748 #if 0
4749 , nlane_arg.data.u32
4750 #endif
4751 #if 0
4752 , fill_arg.data.u64
4753 #endif
4754 );
4755 simd_arg ret = {
4756 .dtype = simd_data_vu64, .data = {.vu64=rvec}
4757 };
4758 simd_arg_free(&seq_arg);
4759 return simd_arg_to_obj(&ret);
4760err:
4761 simd_arg_free(&seq_arg);
4762 return NULL((void*)0);
4763}
4764
4765#line 112
4766static PyObject *
4767simd__intrin_loadn_till_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4768{
4769 simd_arg seq_arg = {.dtype = simd_data_qu64};
4770 simd_arg stride_arg = {.dtype = simd_data_s64};
4771#if 1
4772 simd_arg nlane_arg = {.dtype = simd_data_u32};
4773#endif // till
4774#if 1
4775 simd_arg fill_arg = {.dtype = simd_data_u64};
4776#endif
4777 if (!PyArg_ParseTuple(
4778 args, "O&O&O&O&:loadn_till_u64",
4779 simd_arg_converter, &seq_arg,
4780 simd_arg_converter, &stride_arg
4781#if 1
4782 ,simd_arg_converter, &nlane_arg
4783#endif
4784#if 1
4785 ,simd_arg_converter, &fill_arg
4786#endif
4787 )) {
4788 return NULL((void*)0);
4789 }
4790 npyv_lanetype_u64 *seq_ptr = seq_arg.data.qu64;
4791 npy_intp stride = (npy_intp)stride_arg.data.s64;
4792 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4793 Py_ssize_t min_seq_len = stride * npyv_nlanes_u642;
4794 if (stride < 0) {
4795 seq_ptr += cur_seq_len -1;
4796 min_seq_len = -min_seq_len;
4797 }
4798 if (cur_seq_len < min_seq_len) {
4799 PyErr_Format(PyExc_ValueError,
4800 "loadn_till_u64(), according to provided stride %d, the "
4801 "minimum acceptable size of the required sequence is %d, given(%d)",
4802 stride, min_seq_len, cur_seq_len
4803 );
4804 goto err;
4805 }
4806 npyv_u64 rvec = npyv_loadn_till_u64(
4807 seq_ptr, stride
4808 #if 1
4809 , nlane_arg.data.u32
4810 #endif
4811 #if 1
4812 , fill_arg.data.u64
4813 #endif
4814 );
4815 simd_arg ret = {
4816 .dtype = simd_data_vu64, .data = {.vu64=rvec}
4817 };
4818 simd_arg_free(&seq_arg);
4819 return simd_arg_to_obj(&ret);
4820err:
4821 simd_arg_free(&seq_arg);
4822 return NULL((void*)0);
4823}
4824
4825#line 112
4826static PyObject *
4827simd__intrin_loadn_tillz_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4828{
4829 simd_arg seq_arg = {.dtype = simd_data_qu64};
4830 simd_arg stride_arg = {.dtype = simd_data_s64};
4831#if 1
4832 simd_arg nlane_arg = {.dtype = simd_data_u32};
4833#endif // till
4834#if 0
4835 simd_arg fill_arg = {.dtype = simd_data_u64};
4836#endif
4837 if (!PyArg_ParseTuple(
4838 args, "O&O&O&:loadn_tillz_u64",
4839 simd_arg_converter, &seq_arg,
4840 simd_arg_converter, &stride_arg
4841#if 1
4842 ,simd_arg_converter, &nlane_arg
4843#endif
4844#if 0
4845 ,simd_arg_converter, &fill_arg
4846#endif
4847 )) {
4848 return NULL((void*)0);
4849 }
4850 npyv_lanetype_u64 *seq_ptr = seq_arg.data.qu64;
4851 npy_intp stride = (npy_intp)stride_arg.data.s64;
4852 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4853 Py_ssize_t min_seq_len = stride * npyv_nlanes_u642;
4854 if (stride < 0) {
4855 seq_ptr += cur_seq_len -1;
4856 min_seq_len = -min_seq_len;
4857 }
4858 if (cur_seq_len < min_seq_len) {
4859 PyErr_Format(PyExc_ValueError,
4860 "loadn_tillz_u64(), according to provided stride %d, the "
4861 "minimum acceptable size of the required sequence is %d, given(%d)",
4862 stride, min_seq_len, cur_seq_len
4863 );
4864 goto err;
4865 }
4866 npyv_u64 rvec = npyv_loadn_tillz_u64(
4867 seq_ptr, stride
4868 #if 1
4869 , nlane_arg.data.u32
4870 #endif
4871 #if 0
4872 , fill_arg.data.u64
4873 #endif
4874 );
4875 simd_arg ret = {
4876 .dtype = simd_data_vu64, .data = {.vu64=rvec}
4877 };
4878 simd_arg_free(&seq_arg);
4879 return simd_arg_to_obj(&ret);
4880err:
4881 simd_arg_free(&seq_arg);
4882 return NULL((void*)0);
4883}
4884
4885
4886// Non-contiguous Store
4887#line 178
4888static PyObject *
4889simd__intrin_storen_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4890{
4891 simd_arg seq_arg = {.dtype = simd_data_qu64};
4892 simd_arg stride_arg = {.dtype = simd_data_s64};
4893 simd_arg vec_arg = {.dtype = simd_data_vu64};
4894#if 0
4895 simd_arg nlane_arg = {.dtype = simd_data_u32};
4896#endif
4897 if (!PyArg_ParseTuple(
4898 args, "O&O&O&:storen_u64",
4899 simd_arg_converter, &seq_arg,
4900 simd_arg_converter, &stride_arg
4901#if 0
4902 ,simd_arg_converter, &nlane_arg
4903#endif
4904 ,simd_arg_converter, &vec_arg
4905 )) {
4906 return NULL((void*)0);
4907 }
4908 npyv_lanetype_u64 *seq_ptr = seq_arg.data.qu64;
4909 npy_intp stride = (npy_intp)stride_arg.data.s64;
4910 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4911 Py_ssize_t min_seq_len = stride * npyv_nlanes_u642;
4912 if (stride < 0) {
4913 seq_ptr += cur_seq_len -1;
4914 min_seq_len = -min_seq_len;
4915 }
4916 // overflow guard
4917 if (cur_seq_len < min_seq_len) {
4918 PyErr_Format(PyExc_ValueError,
4919 "storen_u64(), according to provided stride %d, the"
4920 "minimum acceptable size of the required sequence is %d, given(%d)",
4921 stride, min_seq_len, cur_seq_len
4922 );
4923 goto err;
4924 }
4925 npyv_storen_u64(
4926 seq_ptr, stride
4927 #if 0
4928 ,nlane_arg.data.u32
4929 #endif
4930 ,vec_arg.data.vu64
4931 );
4932 // write-back
4933 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4934 goto err;
4935 }
4936 simd_arg_free(&seq_arg);
4937 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4938err:
4939 simd_arg_free(&seq_arg);
4940 return NULL((void*)0);
4941}
4942
4943#line 178
4944static PyObject *
4945simd__intrin_storen_till_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
4946{
4947 simd_arg seq_arg = {.dtype = simd_data_qu64};
4948 simd_arg stride_arg = {.dtype = simd_data_s64};
4949 simd_arg vec_arg = {.dtype = simd_data_vu64};
4950#if 1
4951 simd_arg nlane_arg = {.dtype = simd_data_u32};
4952#endif
4953 if (!PyArg_ParseTuple(
4954 args, "O&O&O&O&:storen_u64",
4955 simd_arg_converter, &seq_arg,
4956 simd_arg_converter, &stride_arg
4957#if 1
4958 ,simd_arg_converter, &nlane_arg
4959#endif
4960 ,simd_arg_converter, &vec_arg
4961 )) {
4962 return NULL((void*)0);
4963 }
4964 npyv_lanetype_u64 *seq_ptr = seq_arg.data.qu64;
4965 npy_intp stride = (npy_intp)stride_arg.data.s64;
4966 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
4967 Py_ssize_t min_seq_len = stride * npyv_nlanes_u642;
4968 if (stride < 0) {
4969 seq_ptr += cur_seq_len -1;
4970 min_seq_len = -min_seq_len;
4971 }
4972 // overflow guard
4973 if (cur_seq_len < min_seq_len) {
4974 PyErr_Format(PyExc_ValueError,
4975 "storen_till_u64(), according to provided stride %d, the"
4976 "minimum acceptable size of the required sequence is %d, given(%d)",
4977 stride, min_seq_len, cur_seq_len
4978 );
4979 goto err;
4980 }
4981 npyv_storen_till_u64(
4982 seq_ptr, stride
4983 #if 1
4984 ,nlane_arg.data.u32
4985 #endif
4986 ,vec_arg.data.vu64
4987 );
4988 // write-back
4989 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qu64, simd_data_qu64)) {
4990 goto err;
4991 }
4992 simd_arg_free(&seq_arg);
4993 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
4994err:
4995 simd_arg_free(&seq_arg);
4996 return NULL((void*)0);
4997}
4998
4999#endif // 1
5000
5001/***************************
5002 * Misc
5003 ***************************/
5004SIMD_IMPL_INTRIN_0(zero_u64, vu64)static PyObject *simd__intrin_zero_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_u64") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vu64, .data = {.vu64 = _mm_setzero_si128()}
, }; return simd_arg_to_obj(&a); }
5005SIMD_IMPL_INTRIN_1(setall_u64, vu64, u64)static PyObject *simd__intrin_setall_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u64}; if (!PyArg_ParseTuple( args, "O&:"
"setall_u64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vu64 = _mm_set1_epi64x((npy_int64)(arg
.data.u64))}; simd_arg_free(&arg); simd_arg ret = { .data
= data, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&
ret); }
5006SIMD_IMPL_INTRIN_3(select_u64, vu64, vb64, vu64, vu64)static PyObject *simd__intrin_select_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; simd_arg arg3 = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu64 = npyv_select_u8
( arg1.data.vb64, arg2.data.vu64, arg3.data.vu64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5007
5008#line 246
5009#if 1
5010SIMD_IMPL_INTRIN_1(reinterpret_u8_u64, vu8, vu64)static PyObject *simd__intrin_reinterpret_u8_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = arg.data.vu64
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu8 }; return simd_arg_to_obj(&ret); }
5011#endif // simd_sup2
5012
5013#line 246
5014#if 1
5015SIMD_IMPL_INTRIN_1(reinterpret_s8_u64, vs8, vu64)static PyObject *simd__intrin_reinterpret_s8_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = arg.data.vu64
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs8 }; return simd_arg_to_obj(&ret); }
5016#endif // simd_sup2
5017
5018#line 246
5019#if 1
5020SIMD_IMPL_INTRIN_1(reinterpret_u16_u64, vu16, vu64)static PyObject *simd__intrin_reinterpret_u16_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vu64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
5021#endif // simd_sup2
5022
5023#line 246
5024#if 1
5025SIMD_IMPL_INTRIN_1(reinterpret_s16_u64, vs16, vu64)static PyObject *simd__intrin_reinterpret_s16_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vu64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
5026#endif // simd_sup2
5027
5028#line 246
5029#if 1
5030SIMD_IMPL_INTRIN_1(reinterpret_u32_u64, vu32, vu64)static PyObject *simd__intrin_reinterpret_u32_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vu64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret)
; }
5031#endif // simd_sup2
5032
5033#line 246
5034#if 1
5035SIMD_IMPL_INTRIN_1(reinterpret_s32_u64, vs32, vu64)static PyObject *simd__intrin_reinterpret_s32_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vu64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret)
; }
5036#endif // simd_sup2
5037
5038#line 246
5039#if 1
5040SIMD_IMPL_INTRIN_1(reinterpret_u64_u64, vu64, vu64)static PyObject *simd__intrin_reinterpret_u64_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vu64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret)
; }
5041#endif // simd_sup2
5042
5043#line 246
5044#if 1
5045SIMD_IMPL_INTRIN_1(reinterpret_s64_u64, vs64, vu64)static PyObject *simd__intrin_reinterpret_s64_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vu64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret)
; }
5046#endif // simd_sup2
5047
5048#line 246
5049#if 1
5050SIMD_IMPL_INTRIN_1(reinterpret_f32_u64, vf32, vu64)static PyObject *simd__intrin_reinterpret_f32_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vu64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
5051#endif // simd_sup2
5052
5053#line 246
5054#if NPY_SIMD_F641
5055SIMD_IMPL_INTRIN_1(reinterpret_f64_u64, vf64, vu64)static PyObject *simd__intrin_reinterpret_f64_u64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_u64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vu64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
5056#endif // simd_sup2
5057
5058
5059/**
5060 * special definition due to the nature of intrinsics
5061 * npyv_setf_u64 and npy_set_u64.
5062*/
5063#line 258
5064static PyObject *
5065simd__intrin_setf_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5066{
5067 npyv_lanetype_u64 *data = simd_sequence_from_iterable(args, simd_data_qu64, npyv_nlanes_u642);
5068 if (data == NULL((void*)0)) {
5069 return NULL((void*)0);
5070 }
5071 simd_data r = {.vu64 = npyv_setf_u64(npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5072 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5073 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5074 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5075 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5076 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5077 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5078 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5079 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5080 data[64] // for setfnpyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5081 )npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))};
5082 simd_sequence_free(data);
5083 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu64);
5084}
5085
5086#line 258
5087static PyObject *
5088simd__intrin_set_u64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5089{
5090 npyv_lanetype_u64 *data = simd_sequence_from_iterable(args, simd_data_qu64, npyv_nlanes_u642);
5091 if (data == NULL((void*)0)) {
5092 return NULL((void*)0);
5093 }
5094 simd_data r = {.vu64 = npyv_set_u64(npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5095 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5096 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5097 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5098 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5099 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5100 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5101 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5102 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5103 data[64] // for setfnpyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5104 )npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))};
5105 simd_sequence_free(data);
5106 return (PyObject*)PySIMDVector_FromData(r, simd_data_vu64);
5107}
5108
5109
5110/***************************
5111 * Reorder
5112 ***************************/
5113#line 287
5114SIMD_IMPL_INTRIN_2(combinel_u64, vu64, vu64, vu64)static PyObject *simd__intrin_combinel_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_u64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu64 = _mm_unpacklo_epi64
( arg1.data.vu64, arg2.data.vu64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
5115
5116#line 287
5117SIMD_IMPL_INTRIN_2(combineh_u64, vu64, vu64, vu64)static PyObject *simd__intrin_combineh_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_u64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu64 = _mm_unpackhi_epi64
( arg1.data.vu64, arg2.data.vu64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
5118
5119
5120#line 293
5121SIMD_IMPL_INTRIN_2(combine_u64, vu64x2, vu64, vu64)static PyObject *simd__intrin_combine_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_u64",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vu64x2 = npyv__combine
( arg1.data.vu64, arg2.data.vu64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vu64x2 }; return simd_arg_to_obj(&ret); }
5122
5123#line 293
5124SIMD_IMPL_INTRIN_2(zip_u64, vu64x2, vu64, vu64)static PyObject *simd__intrin_zip_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64x2 = npyv_zip_u64( arg1.data.vu64,
arg2.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu64x2
}; return simd_arg_to_obj(&ret); }
5125
5126
5127#if 0
5128SIMD_IMPL_INTRIN_1(rev64_u64, vu64, vu64)static PyObject *simd__intrin_rev64_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_u64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu64 = npyv_rev64_u64( arg.data.vu64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
5129#endif
5130
5131/***************************
5132 * Operators
5133 ***************************/
5134#if 63 > 0
5135SIMD_IMPL_INTRIN_2(shl_u64, vu64, vu64, u8)static PyObject *simd__intrin_shl_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = _mm_sll_epi64(arg1.data.vu64, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu64
}; return simd_arg_to_obj(&ret); }
5136SIMD_IMPL_INTRIN_2(shr_u64, vu64, vu64, u8)static PyObject *simd__intrin_shr_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = _mm_srl_epi64(arg1.data.vu64, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu64
}; return simd_arg_to_obj(&ret); }
5137// immediate constant
5138SIMD_IMPL_INTRIN_2IMM(shli_u64, vu64, vu64, 63)static PyObject *simd__intrin_shli_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu64 = 0 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vu64, 0) : 1 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vu64, 1) : 2 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vu64, 2) : 3 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vu64, 3) : 4 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64
, 4) : 5 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 5) :
6 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 6) : 7 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 7) : 8 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 8) : 9 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vu64, 9) : 10 == arg2.data.u8 ?
_mm_slli_epi64(arg1.data.vu64, 10) : 11 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vu64, 11) : 12 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vu64, 12) : 13 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vu64, 13) : 14 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64
, 14) : 15 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 15
) : 16 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 16) :
17 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 17) : 18
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 18) : 19 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 19) : 20 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 20) : 21 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vu64, 21) : 22 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vu64, 22) : 23 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vu64, 23) : 24 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vu64, 24) : 25 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vu64, 25) : 26 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64
, 26) : 27 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 27
) : 28 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 28) :
29 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 29) : 30
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 30) : 31 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 31) : 32 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 32) : 33 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vu64, 33) : 34 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vu64, 34) : 35 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vu64, 35) : 36 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vu64, 36) : 37 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vu64, 37) : 38 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64
, 38) : 39 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 39
) : 40 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 40) :
41 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 41) : 42
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 42) : 43 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 43) : 44 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 44) : 45 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vu64, 45) : 46 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vu64, 46) : 47 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vu64, 47) : 48 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vu64, 48) : 49 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vu64, 49) : 50 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64
, 50) : 51 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 51
) : 52 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 52) :
53 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 53) : 54
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 54) : 55 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 55) : 56 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 56) : 57 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vu64, 57) : 58 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vu64, 58) : 59 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vu64, 59) : 60 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vu64, 60) : 61 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vu64, 61) : 62 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64
, 62) : 63 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vu64, 63
) : data.vu64; simd_arg_free(&arg1); simd_arg ret = { .data
= data, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&
ret); }
5139SIMD_IMPL_INTRIN_2IMM(shri_u64, vu64, vu64, 64)static PyObject *simd__intrin_shri_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vu64 = 1 == arg2.data.u8
? _mm_srli_epi64(arg1.data.vu64, 1) : 2 == arg2.data.u8 ? _mm_srli_epi64
(arg1.data.vu64, 2) : 3 == arg2.data.u8 ? _mm_srli_epi64(arg1
.data.vu64, 3) : 4 == arg2.data.u8 ? _mm_srli_epi64(arg1.data
.vu64, 4) : 5 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64
, 5) : 6 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 6) :
7 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 7) : 8 ==
arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 8) : 9 == arg2
.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 9) : 10 == arg2.data
.u8 ? _mm_srli_epi64(arg1.data.vu64, 10) : 11 == arg2.data.u8
? _mm_srli_epi64(arg1.data.vu64, 11) : 12 == arg2.data.u8 ? _mm_srli_epi64
(arg1.data.vu64, 12) : 13 == arg2.data.u8 ? _mm_srli_epi64(arg1
.data.vu64, 13) : 14 == arg2.data.u8 ? _mm_srli_epi64(arg1.data
.vu64, 14) : 15 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64
, 15) : 16 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 16
) : 17 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 17) :
18 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 18) : 19
== arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 19) : 20 ==
arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 20) : 21 == arg2
.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 21) : 22 == arg2.data
.u8 ? _mm_srli_epi64(arg1.data.vu64, 22) : 23 == arg2.data.u8
? _mm_srli_epi64(arg1.data.vu64, 23) : 24 == arg2.data.u8 ? _mm_srli_epi64
(arg1.data.vu64, 24) : 25 == arg2.data.u8 ? _mm_srli_epi64(arg1
.data.vu64, 25) : 26 == arg2.data.u8 ? _mm_srli_epi64(arg1.data
.vu64, 26) : 27 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64
, 27) : 28 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 28
) : 29 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 29) :
30 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 30) : 31
== arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 31) : 32 ==
arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 32) : 33 == arg2
.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 33) : 34 == arg2.data
.u8 ? _mm_srli_epi64(arg1.data.vu64, 34) : 35 == arg2.data.u8
? _mm_srli_epi64(arg1.data.vu64, 35) : 36 == arg2.data.u8 ? _mm_srli_epi64
(arg1.data.vu64, 36) : 37 == arg2.data.u8 ? _mm_srli_epi64(arg1
.data.vu64, 37) : 38 == arg2.data.u8 ? _mm_srli_epi64(arg1.data
.vu64, 38) : 39 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64
, 39) : 40 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 40
) : 41 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 41) :
42 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 42) : 43
== arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 43) : 44 ==
arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 44) : 45 == arg2
.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 45) : 46 == arg2.data
.u8 ? _mm_srli_epi64(arg1.data.vu64, 46) : 47 == arg2.data.u8
? _mm_srli_epi64(arg1.data.vu64, 47) : 48 == arg2.data.u8 ? _mm_srli_epi64
(arg1.data.vu64, 48) : 49 == arg2.data.u8 ? _mm_srli_epi64(arg1
.data.vu64, 49) : 50 == arg2.data.u8 ? _mm_srli_epi64(arg1.data
.vu64, 50) : 51 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64
, 51) : 52 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 52
) : 53 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 53) :
54 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 54) : 55
== arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 55) : 56 ==
arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 56) : 57 == arg2
.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 57) : 58 == arg2.data
.u8 ? _mm_srli_epi64(arg1.data.vu64, 58) : 59 == arg2.data.u8
? _mm_srli_epi64(arg1.data.vu64, 59) : 60 == arg2.data.u8 ? _mm_srli_epi64
(arg1.data.vu64, 60) : 61 == arg2.data.u8 ? _mm_srli_epi64(arg1
.data.vu64, 61) : 62 == arg2.data.u8 ? _mm_srli_epi64(arg1.data
.vu64, 62) : 63 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64
, 63) : 64 == arg2.data.u8 ? _mm_srli_epi64(arg1.data.vu64, 64
) : data.vu64; simd_arg_free(&arg1); simd_arg ret = { .data
= data, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&
ret); }
5140#endif // shl_imm
5141
5142#line 314
5143SIMD_IMPL_INTRIN_2(and_u64, vu64, vu64, vu64)static PyObject *simd__intrin_and_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""and_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = _mm_and_si128( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5144
5145#line 314
5146SIMD_IMPL_INTRIN_2(or_u64, vu64, vu64, vu64)static PyObject *simd__intrin_or_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""or_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = _mm_or_si128( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5147
5148#line 314
5149SIMD_IMPL_INTRIN_2(xor_u64, vu64, vu64, vu64)static PyObject *simd__intrin_xor_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = _mm_xor_si128( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5150
5151
5152SIMD_IMPL_INTRIN_1(not_u64, vu64, vu64)static PyObject *simd__intrin_not_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"not_u64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu64 = _mm_xor_si128(arg.data.vu64, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret)
; }
5153
5154#line 322
5155SIMD_IMPL_INTRIN_2(cmpeq_u64, vb64, vu64, vu64)static PyObject *simd__intrin_cmpeq_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = npyv_cmpeq_u64( arg1.data.vu64,
arg2.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
5156
5157#line 322
5158SIMD_IMPL_INTRIN_2(cmpneq_u64, vb64, vu64, vu64)static PyObject *simd__intrin_cmpneq_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_xor_si128(npyv_cmpeq_u64(arg1
.data.vu64, arg2.data.vu64), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb64 }; return simd_arg_to_obj(&
ret); }
5159
5160#line 322
5161SIMD_IMPL_INTRIN_2(cmpgt_u64, vb64, vu64, vu64)static PyObject *simd__intrin_cmpgt_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = npyv_cmpgt_u64( arg1.data.vu64,
arg2.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
5162
5163#line 322
5164SIMD_IMPL_INTRIN_2(cmpge_u64, vb64, vu64, vu64)static PyObject *simd__intrin_cmpge_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_xor_si128(npyv_cmpgt_u64(arg2
.data.vu64, arg1.data.vu64), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb64 }; return simd_arg_to_obj(&
ret); }
5165
5166#line 322
5167SIMD_IMPL_INTRIN_2(cmplt_u64, vb64, vu64, vu64)static PyObject *simd__intrin_cmplt_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = npyv_cmpgt_u64(arg2.data.vu64, arg1
.data.vu64)}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb64 }; return
simd_arg_to_obj(&ret); }
5168
5169#line 322
5170SIMD_IMPL_INTRIN_2(cmple_u64, vb64, vu64, vu64)static PyObject *simd__intrin_cmple_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_xor_si128(npyv_cmpgt_u64(arg1
.data.vu64, arg2.data.vu64), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb64 }; return simd_arg_to_obj(&
ret); }
5171
5172
5173/***************************
5174 * Conversion
5175 ***************************/
5176SIMD_IMPL_INTRIN_1(cvt_u64_b64, vu64, vb64)static PyObject *simd__intrin_cvt_u64_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb64}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_u64_b64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vu64 = arg.data.vb64}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu64
}; return simd_arg_to_obj(&ret); }
5177SIMD_IMPL_INTRIN_1(cvt_b64_u64, vb64, vu64)static PyObject *simd__intrin_cvt_b64_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b64_u64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb64 = arg.data.vu64}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
5178#if 0
5179SIMD_IMPL_INTRIN_1(expand_u64_u64, vu64x2, vu64)static PyObject *simd__intrin_expand_u64_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"expand_u64_u64", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vu64x2 = npyv_expand_u64_u64( arg.data
.vu64 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64x2 }; return simd_arg_to_obj(&ret
); }
5180#endif // expand_sup
5181/***************************
5182 * Arithmetic
5183 ***************************/
5184#line 339
5185SIMD_IMPL_INTRIN_2(add_u64, vu64, vu64, vu64)static PyObject *simd__intrin_add_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""add_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = _mm_add_epi64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5186
5187#line 339
5188SIMD_IMPL_INTRIN_2(sub_u64, vu64, vu64, vu64)static PyObject *simd__intrin_sub_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = _mm_sub_epi64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5189
5190
5191#if 0
5192#line 346
5193SIMD_IMPL_INTRIN_2(adds_u64, vu64, vu64, vu64)static PyObject *simd__intrin_adds_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_adds_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5194
5195#line 346
5196SIMD_IMPL_INTRIN_2(subs_u64, vu64, vu64, vu64)static PyObject *simd__intrin_subs_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_subs_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5197
5198#endif // sat_sup
5199
5200#if 0
5201SIMD_IMPL_INTRIN_2(mul_u64, vu64, vu64, vu64)static PyObject *simd__intrin_mul_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_mul_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5202#endif // mul_sup
5203
5204#if 0
5205SIMD_IMPL_INTRIN_2(div_u64, vu64, vu64, vu64)static PyObject *simd__intrin_div_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""div_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_div_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5206#endif // div_sup
5207
5208#if 1
5209SIMD_IMPL_INTRIN_1(divisor_u64, vu64x3, u64)static PyObject *simd__intrin_divisor_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_u64}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_u64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vu64x3 = npyv_divisor_u64( arg.data.u64
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vu64x3 }; return simd_arg_to_obj(&ret);
}
5210SIMD_IMPL_INTRIN_2(divc_u64, vu64, vu64, vu64x3)static PyObject *simd__intrin_divc_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_divc_u64( arg1.data.vu64, arg2
.data.vu64x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vu64
}; return simd_arg_to_obj(&ret); }
5211#endif // intdiv_sup
5212
5213#if 0
5214#line 367
5215SIMD_IMPL_INTRIN_3(muladd_u64, vu64, vu64, vu64, vu64)static PyObject *simd__intrin_muladd_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; simd_arg arg3 = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu64 = npyv_muladd_u64
( arg1.data.vu64, arg2.data.vu64, arg3.data.vu64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5216
5217#line 367
5218SIMD_IMPL_INTRIN_3(mulsub_u64, vu64, vu64, vu64, vu64)static PyObject *simd__intrin_mulsub_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; simd_arg arg3 = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu64 = npyv_mulsub_u64
( arg1.data.vu64, arg2.data.vu64, arg3.data.vu64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5219
5220#line 367
5221SIMD_IMPL_INTRIN_3(nmuladd_u64, vu64, vu64, vu64, vu64)static PyObject *simd__intrin_nmuladd_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; simd_arg arg3 = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu64 = npyv_nmuladd_u64
( arg1.data.vu64, arg2.data.vu64, arg3.data.vu64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5222
5223#line 367
5224SIMD_IMPL_INTRIN_3(nmulsub_u64, vu64, vu64, vu64, vu64)static PyObject *simd__intrin_nmulsub_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; simd_arg arg3 = {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vu64 = npyv_nmulsub_u64
( arg1.data.vu64, arg2.data.vu64, arg3.data.vu64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5225
5226#endif // fused_sup
5227
5228#if 1
5229SIMD_IMPL_INTRIN_1(sum_u64, u64, vu64)static PyObject *simd__intrin_sum_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"sum_u64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.u64 = npyv_sum_u64( arg.data.vu64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_u64
}; return simd_arg_to_obj(&ret); }
5230#endif // sum_sup
5231
5232#if 0
5233SIMD_IMPL_INTRIN_1(sumup_u64, u64, vu64)static PyObject *simd__intrin_sumup_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_u64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.u64 = npyv_sumup_u64( arg.data.vu64 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_u64 }; return simd_arg_to_obj(&ret); }
5234#endif // sumup_sup
5235
5236/***************************
5237 * Math
5238 ***************************/
5239#if 0
5240#line 386
5241SIMD_IMPL_INTRIN_1(sqrt_u64, vu64, vu64)static PyObject *simd__intrin_sqrt_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_u64", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vu64 = npyv_sqrt_u64( arg.data.vu64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu64
}; return simd_arg_to_obj(&ret); }
5242
5243#line 386
5244SIMD_IMPL_INTRIN_1(recip_u64, vu64, vu64)static PyObject *simd__intrin_recip_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"recip_u64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vu64 = npyv_recip_u64( arg.data.vu64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
5245
5246#line 386
5247SIMD_IMPL_INTRIN_1(abs_u64, vu64, vu64)static PyObject *simd__intrin_abs_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"abs_u64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vu64 = npyv_abs_u64( arg.data.vu64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vu64
}; return simd_arg_to_obj(&ret); }
5248
5249#line 386
5250SIMD_IMPL_INTRIN_1(square_u64, vu64, vu64)static PyObject *simd__intrin_square_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&:"
"square_u64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vu64 = npyv_square_u64( arg.data.vu64 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu64 }; return simd_arg_to_obj(&ret); }
5251
5252#endif
5253
5254#line 393
5255SIMD_IMPL_INTRIN_2(max_u64, vu64, vu64, vu64)static PyObject *simd__intrin_max_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""max_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_max_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5256
5257#line 393
5258SIMD_IMPL_INTRIN_2(min_u64, vu64, vu64, vu64)static PyObject *simd__intrin_min_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""min_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_min_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5259
5260
5261#if 0
5262#line 400
5263SIMD_IMPL_INTRIN_2(maxp_u64, vu64, vu64, vu64)static PyObject *simd__intrin_maxp_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_maxp_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5264
5265#line 400
5266SIMD_IMPL_INTRIN_2(minp_u64, vu64, vu64, vu64)static PyObject *simd__intrin_minp_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vu64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_u64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vu64 = npyv_minp_u64( arg1.data.vu64, arg2
.data.vu64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vu64 }; return
simd_arg_to_obj(&ret); }
5267
5268#endif
5269
5270/***************************
5271 * Mask operations
5272 ***************************/
5273#line 410
5274 SIMD_IMPL_INTRIN_4(ifadd_u64, vu64, vb64, vu64, vu64, vu64)static PyObject *simd__intrin_ifadd_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; simd_arg arg3 = {.dtype = simd_data_vu64}; simd_arg arg4 =
{.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_u64", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu64 = npyv_ifadd_u64
( arg1.data.vb64, arg2.data.vu64, arg3.data.vu64, arg4.data.vu64
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&
ret); }
5275
5276#line 410
5277 SIMD_IMPL_INTRIN_4(ifsub_u64, vu64, vb64, vu64, vu64, vu64)static PyObject *simd__intrin_ifsub_u64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vu64
}; simd_arg arg3 = {.dtype = simd_data_vu64}; simd_arg arg4 =
{.dtype = simd_data_vu64}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_u64", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vu64 = npyv_ifsub_u64
( arg1.data.vb64, arg2.data.vu64, arg3.data.vu64, arg4.data.vu64
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&
ret); }
5278
5279
5280#endif // simd_sup
5281
5282#line 34
5283#if 1
5284/***************************
5285 * Memory
5286 ***************************/
5287#line 41
5288SIMD_IMPL_INTRIN_1(load_s64, vs64, qs64)static PyObject *simd__intrin_load_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs64}; if (!PyArg_ParseTuple( args, "O&:"
"load_s64", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs64 = npyv_load_s64( arg.data.qs64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs64
}; return simd_arg_to_obj(&ret); }
5289
5290#line 41
5291SIMD_IMPL_INTRIN_1(loada_s64, vs64, qs64)static PyObject *simd__intrin_loada_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs64}; if (!PyArg_ParseTuple( args, "O&:"
"loada_s64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs64 = npyv_loada_s64( arg.data.qs64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5292
5293#line 41
5294SIMD_IMPL_INTRIN_1(loads_s64, vs64, qs64)static PyObject *simd__intrin_loads_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs64}; if (!PyArg_ParseTuple( args, "O&:"
"loads_s64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs64 = npyv_loads_s64( arg.data.qs64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5295
5296#line 41
5297SIMD_IMPL_INTRIN_1(loadl_s64, vs64, qs64)static PyObject *simd__intrin_loadl_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qs64}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_s64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs64 = npyv_loadl_s64( arg.data.qs64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5298
5299#line 46
5300// special definition due to the nature of store
5301static PyObject *
5302simd__intrin_store_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5303{
5304 simd_arg seq_arg = {.dtype = simd_data_qs64};
5305 simd_arg vec_arg = {.dtype = simd_data_vs64};
5306 if (!PyArg_ParseTuple(
5307 args, "O&O&:store_s64",
5308 simd_arg_converter, &seq_arg,
5309 simd_arg_converter, &vec_arg
5310 )) {
5311 return NULL((void*)0);
5312 }
5313 npyv_store_s64(seq_arg.data.qs64, vec_arg.data.vs64);
5314 // write-back
5315 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5316 simd_arg_free(&seq_arg);
5317 return NULL((void*)0);
5318 }
5319 simd_arg_free(&seq_arg);
5320 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5321}
5322
5323#line 46
5324// special definition due to the nature of storea
5325static PyObject *
5326simd__intrin_storea_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5327{
5328 simd_arg seq_arg = {.dtype = simd_data_qs64};
5329 simd_arg vec_arg = {.dtype = simd_data_vs64};
5330 if (!PyArg_ParseTuple(
5331 args, "O&O&:storea_s64",
5332 simd_arg_converter, &seq_arg,
5333 simd_arg_converter, &vec_arg
5334 )) {
5335 return NULL((void*)0);
5336 }
5337 npyv_storea_s64(seq_arg.data.qs64, vec_arg.data.vs64);
5338 // write-back
5339 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5340 simd_arg_free(&seq_arg);
5341 return NULL((void*)0);
5342 }
5343 simd_arg_free(&seq_arg);
5344 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5345}
5346
5347#line 46
5348// special definition due to the nature of stores
5349static PyObject *
5350simd__intrin_stores_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5351{
5352 simd_arg seq_arg = {.dtype = simd_data_qs64};
5353 simd_arg vec_arg = {.dtype = simd_data_vs64};
5354 if (!PyArg_ParseTuple(
5355 args, "O&O&:stores_s64",
5356 simd_arg_converter, &seq_arg,
5357 simd_arg_converter, &vec_arg
5358 )) {
5359 return NULL((void*)0);
5360 }
5361 npyv_stores_s64(seq_arg.data.qs64, vec_arg.data.vs64);
5362 // write-back
5363 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5364 simd_arg_free(&seq_arg);
5365 return NULL((void*)0);
5366 }
5367 simd_arg_free(&seq_arg);
5368 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5369}
5370
5371#line 46
5372// special definition due to the nature of storel
5373static PyObject *
5374simd__intrin_storel_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5375{
5376 simd_arg seq_arg = {.dtype = simd_data_qs64};
5377 simd_arg vec_arg = {.dtype = simd_data_vs64};
5378 if (!PyArg_ParseTuple(
5379 args, "O&O&:storel_s64",
5380 simd_arg_converter, &seq_arg,
5381 simd_arg_converter, &vec_arg
5382 )) {
5383 return NULL((void*)0);
5384 }
5385 npyv_storel_s64(seq_arg.data.qs64, vec_arg.data.vs64);
5386 // write-back
5387 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5388 simd_arg_free(&seq_arg);
5389 return NULL((void*)0);
5390 }
5391 simd_arg_free(&seq_arg);
5392 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5393}
5394
5395#line 46
5396// special definition due to the nature of storeh
5397static PyObject *
5398simd__intrin_storeh_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5399{
5400 simd_arg seq_arg = {.dtype = simd_data_qs64};
5401 simd_arg vec_arg = {.dtype = simd_data_vs64};
5402 if (!PyArg_ParseTuple(
5403 args, "O&O&:storeh_s64",
5404 simd_arg_converter, &seq_arg,
5405 simd_arg_converter, &vec_arg
5406 )) {
5407 return NULL((void*)0);
5408 }
5409 npyv_storeh_s64(seq_arg.data.qs64, vec_arg.data.vs64);
5410 // write-back
5411 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5412 simd_arg_free(&seq_arg);
5413 return NULL((void*)0);
5414 }
5415 simd_arg_free(&seq_arg);
5416 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5417}
5418
5419
5420/****************************************
5421 * Non-contiguous/Partial Memory access
5422 ****************************************/
5423#if 1
5424// Partial Load
5425SIMD_IMPL_INTRIN_3(load_till_s64, vs64, qs64, u32, s64)static PyObject *simd__intrin_load_till_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs64}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_s64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs64 = npyv_load_till_s64
( arg1.data.qs64, arg2.data.u32, arg3.data.s64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5426SIMD_IMPL_INTRIN_2(load_tillz_s64, vs64, qs64, u32)static PyObject *simd__intrin_load_tillz_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qs64}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_s64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs64 = npyv_load_tillz_s64
( arg1.data.qs64, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5427
5428// Partial Store
5429static PyObject *
5430simd__intrin_store_till_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5431{
5432 simd_arg seq_arg = {.dtype = simd_data_qs64};
5433 simd_arg nlane_arg = {.dtype = simd_data_u32};
5434 simd_arg vec_arg = {.dtype = simd_data_vs64};
5435 if (!PyArg_ParseTuple(
5436 args, "O&O&O&:store_till_s64",
5437 simd_arg_converter, &seq_arg,
5438 simd_arg_converter, &nlane_arg,
5439 simd_arg_converter, &vec_arg
5440 )) {
5441 return NULL((void*)0);
5442 }
5443 npyv_store_till_s64(
5444 seq_arg.data.qs64, nlane_arg.data.u32, vec_arg.data.vs64
5445 );
5446 // write-back
5447 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5448 simd_arg_free(&seq_arg);
5449 return NULL((void*)0);
5450 }
5451 simd_arg_free(&seq_arg);
5452 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5453}
5454
5455// Non-contiguous Load
5456#line 112
5457static PyObject *
5458simd__intrin_loadn_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5459{
5460 simd_arg seq_arg = {.dtype = simd_data_qs64};
5461 simd_arg stride_arg = {.dtype = simd_data_s64};
5462#if 0
5463 simd_arg nlane_arg = {.dtype = simd_data_u32};
5464#endif // till
5465#if 0
5466 simd_arg fill_arg = {.dtype = simd_data_s64};
5467#endif
5468 if (!PyArg_ParseTuple(
5469 args, "O&O&:loadn_s64",
5470 simd_arg_converter, &seq_arg,
5471 simd_arg_converter, &stride_arg
5472#if 0
5473 ,simd_arg_converter, &nlane_arg
5474#endif
5475#if 0
5476 ,simd_arg_converter, &fill_arg
5477#endif
5478 )) {
5479 return NULL((void*)0);
5480 }
5481 npyv_lanetype_s64 *seq_ptr = seq_arg.data.qs64;
5482 npy_intp stride = (npy_intp)stride_arg.data.s64;
5483 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
5484 Py_ssize_t min_seq_len = stride * npyv_nlanes_s642;
5485 if (stride < 0) {
5486 seq_ptr += cur_seq_len -1;
5487 min_seq_len = -min_seq_len;
5488 }
5489 if (cur_seq_len < min_seq_len) {
5490 PyErr_Format(PyExc_ValueError,
5491 "loadn_s64(), according to provided stride %d, the "
5492 "minimum acceptable size of the required sequence is %d, given(%d)",
5493 stride, min_seq_len, cur_seq_len
5494 );
5495 goto err;
5496 }
5497 npyv_s64 rvec = npyv_loadn_s64(
5498 seq_ptr, stride
5499 #if 0
5500 , nlane_arg.data.u32
5501 #endif
5502 #if 0
5503 , fill_arg.data.s64
5504 #endif
5505 );
5506 simd_arg ret = {
5507 .dtype = simd_data_vs64, .data = {.vs64=rvec}
5508 };
5509 simd_arg_free(&seq_arg);
5510 return simd_arg_to_obj(&ret);
5511err:
5512 simd_arg_free(&seq_arg);
5513 return NULL((void*)0);
5514}
5515
5516#line 112
5517static PyObject *
5518simd__intrin_loadn_till_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5519{
5520 simd_arg seq_arg = {.dtype = simd_data_qs64};
5521 simd_arg stride_arg = {.dtype = simd_data_s64};
5522#if 1
5523 simd_arg nlane_arg = {.dtype = simd_data_u32};
5524#endif // till
5525#if 1
5526 simd_arg fill_arg = {.dtype = simd_data_s64};
5527#endif
5528 if (!PyArg_ParseTuple(
5529 args, "O&O&O&O&:loadn_till_s64",
5530 simd_arg_converter, &seq_arg,
5531 simd_arg_converter, &stride_arg
5532#if 1
5533 ,simd_arg_converter, &nlane_arg
5534#endif
5535#if 1
5536 ,simd_arg_converter, &fill_arg
5537#endif
5538 )) {
5539 return NULL((void*)0);
5540 }
5541 npyv_lanetype_s64 *seq_ptr = seq_arg.data.qs64;
5542 npy_intp stride = (npy_intp)stride_arg.data.s64;
5543 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
5544 Py_ssize_t min_seq_len = stride * npyv_nlanes_s642;
5545 if (stride < 0) {
5546 seq_ptr += cur_seq_len -1;
5547 min_seq_len = -min_seq_len;
5548 }
5549 if (cur_seq_len < min_seq_len) {
5550 PyErr_Format(PyExc_ValueError,
5551 "loadn_till_s64(), according to provided stride %d, the "
5552 "minimum acceptable size of the required sequence is %d, given(%d)",
5553 stride, min_seq_len, cur_seq_len
5554 );
5555 goto err;
5556 }
5557 npyv_s64 rvec = npyv_loadn_till_s64(
5558 seq_ptr, stride
5559 #if 1
5560 , nlane_arg.data.u32
5561 #endif
5562 #if 1
5563 , fill_arg.data.s64
5564 #endif
5565 );
5566 simd_arg ret = {
5567 .dtype = simd_data_vs64, .data = {.vs64=rvec}
5568 };
5569 simd_arg_free(&seq_arg);
5570 return simd_arg_to_obj(&ret);
5571err:
5572 simd_arg_free(&seq_arg);
5573 return NULL((void*)0);
5574}
5575
5576#line 112
5577static PyObject *
5578simd__intrin_loadn_tillz_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5579{
5580 simd_arg seq_arg = {.dtype = simd_data_qs64};
5581 simd_arg stride_arg = {.dtype = simd_data_s64};
5582#if 1
5583 simd_arg nlane_arg = {.dtype = simd_data_u32};
5584#endif // till
5585#if 0
5586 simd_arg fill_arg = {.dtype = simd_data_s64};
5587#endif
5588 if (!PyArg_ParseTuple(
5589 args, "O&O&O&:loadn_tillz_s64",
5590 simd_arg_converter, &seq_arg,
5591 simd_arg_converter, &stride_arg
5592#if 1
5593 ,simd_arg_converter, &nlane_arg
5594#endif
5595#if 0
5596 ,simd_arg_converter, &fill_arg
5597#endif
5598 )) {
5599 return NULL((void*)0);
5600 }
5601 npyv_lanetype_s64 *seq_ptr = seq_arg.data.qs64;
5602 npy_intp stride = (npy_intp)stride_arg.data.s64;
5603 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
5604 Py_ssize_t min_seq_len = stride * npyv_nlanes_s642;
5605 if (stride < 0) {
5606 seq_ptr += cur_seq_len -1;
5607 min_seq_len = -min_seq_len;
5608 }
5609 if (cur_seq_len < min_seq_len) {
5610 PyErr_Format(PyExc_ValueError,
5611 "loadn_tillz_s64(), according to provided stride %d, the "
5612 "minimum acceptable size of the required sequence is %d, given(%d)",
5613 stride, min_seq_len, cur_seq_len
5614 );
5615 goto err;
5616 }
5617 npyv_s64 rvec = npyv_loadn_tillz_s64(
5618 seq_ptr, stride
5619 #if 1
5620 , nlane_arg.data.u32
5621 #endif
5622 #if 0
5623 , fill_arg.data.s64
5624 #endif
5625 );
5626 simd_arg ret = {
5627 .dtype = simd_data_vs64, .data = {.vs64=rvec}
5628 };
5629 simd_arg_free(&seq_arg);
5630 return simd_arg_to_obj(&ret);
5631err:
5632 simd_arg_free(&seq_arg);
5633 return NULL((void*)0);
5634}
5635
5636
5637// Non-contiguous Store
5638#line 178
5639static PyObject *
5640simd__intrin_storen_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5641{
5642 simd_arg seq_arg = {.dtype = simd_data_qs64};
5643 simd_arg stride_arg = {.dtype = simd_data_s64};
5644 simd_arg vec_arg = {.dtype = simd_data_vs64};
5645#if 0
5646 simd_arg nlane_arg = {.dtype = simd_data_u32};
5647#endif
5648 if (!PyArg_ParseTuple(
5649 args, "O&O&O&:storen_s64",
5650 simd_arg_converter, &seq_arg,
5651 simd_arg_converter, &stride_arg
5652#if 0
5653 ,simd_arg_converter, &nlane_arg
5654#endif
5655 ,simd_arg_converter, &vec_arg
5656 )) {
5657 return NULL((void*)0);
5658 }
5659 npyv_lanetype_s64 *seq_ptr = seq_arg.data.qs64;
5660 npy_intp stride = (npy_intp)stride_arg.data.s64;
5661 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
5662 Py_ssize_t min_seq_len = stride * npyv_nlanes_s642;
5663 if (stride < 0) {
5664 seq_ptr += cur_seq_len -1;
5665 min_seq_len = -min_seq_len;
5666 }
5667 // overflow guard
5668 if (cur_seq_len < min_seq_len) {
5669 PyErr_Format(PyExc_ValueError,
5670 "storen_s64(), according to provided stride %d, the"
5671 "minimum acceptable size of the required sequence is %d, given(%d)",
5672 stride, min_seq_len, cur_seq_len
5673 );
5674 goto err;
5675 }
5676 npyv_storen_s64(
5677 seq_ptr, stride
5678 #if 0
5679 ,nlane_arg.data.u32
5680 #endif
5681 ,vec_arg.data.vs64
5682 );
5683 // write-back
5684 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5685 goto err;
5686 }
5687 simd_arg_free(&seq_arg);
5688 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5689err:
5690 simd_arg_free(&seq_arg);
5691 return NULL((void*)0);
5692}
5693
5694#line 178
5695static PyObject *
5696simd__intrin_storen_till_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5697{
5698 simd_arg seq_arg = {.dtype = simd_data_qs64};
5699 simd_arg stride_arg = {.dtype = simd_data_s64};
5700 simd_arg vec_arg = {.dtype = simd_data_vs64};
5701#if 1
5702 simd_arg nlane_arg = {.dtype = simd_data_u32};
5703#endif
5704 if (!PyArg_ParseTuple(
5705 args, "O&O&O&O&:storen_s64",
5706 simd_arg_converter, &seq_arg,
5707 simd_arg_converter, &stride_arg
5708#if 1
5709 ,simd_arg_converter, &nlane_arg
5710#endif
5711 ,simd_arg_converter, &vec_arg
5712 )) {
5713 return NULL((void*)0);
5714 }
5715 npyv_lanetype_s64 *seq_ptr = seq_arg.data.qs64;
5716 npy_intp stride = (npy_intp)stride_arg.data.s64;
5717 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
5718 Py_ssize_t min_seq_len = stride * npyv_nlanes_s642;
5719 if (stride < 0) {
5720 seq_ptr += cur_seq_len -1;
5721 min_seq_len = -min_seq_len;
5722 }
5723 // overflow guard
5724 if (cur_seq_len < min_seq_len) {
5725 PyErr_Format(PyExc_ValueError,
5726 "storen_till_s64(), according to provided stride %d, the"
5727 "minimum acceptable size of the required sequence is %d, given(%d)",
5728 stride, min_seq_len, cur_seq_len
5729 );
5730 goto err;
5731 }
5732 npyv_storen_till_s64(
5733 seq_ptr, stride
5734 #if 1
5735 ,nlane_arg.data.u32
5736 #endif
5737 ,vec_arg.data.vs64
5738 );
5739 // write-back
5740 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qs64, simd_data_qs64)) {
5741 goto err;
5742 }
5743 simd_arg_free(&seq_arg);
5744 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
5745err:
5746 simd_arg_free(&seq_arg);
5747 return NULL((void*)0);
5748}
5749
5750#endif // 1
5751
5752/***************************
5753 * Misc
5754 ***************************/
5755SIMD_IMPL_INTRIN_0(zero_s64, vs64)static PyObject *simd__intrin_zero_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_s64") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vs64, .data = {.vs64 = _mm_setzero_si128()}
, }; return simd_arg_to_obj(&a); }
5756SIMD_IMPL_INTRIN_1(setall_s64, vs64, s64)static PyObject *simd__intrin_setall_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s64}; if (!PyArg_ParseTuple( args, "O&:"
"setall_s64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vs64 = _mm_set1_epi64x((npy_int64)(arg
.data.s64))}; simd_arg_free(&arg); simd_arg ret = { .data
= data, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&
ret); }
5757SIMD_IMPL_INTRIN_3(select_s64, vs64, vb64, vs64, vs64)static PyObject *simd__intrin_select_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; simd_arg arg3 = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs64 = npyv_select_u8
( arg1.data.vb64, arg2.data.vs64, arg3.data.vs64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5758
5759#line 246
5760#if 1
5761SIMD_IMPL_INTRIN_1(reinterpret_u8_s64, vu8, vs64)static PyObject *simd__intrin_reinterpret_u8_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = arg.data.vs64
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vu8 }; return simd_arg_to_obj(&ret); }
5762#endif // simd_sup2
5763
5764#line 246
5765#if 1
5766SIMD_IMPL_INTRIN_1(reinterpret_s8_s64, vs8, vs64)static PyObject *simd__intrin_reinterpret_s8_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = arg.data.vs64
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs8 }; return simd_arg_to_obj(&ret); }
5767#endif // simd_sup2
5768
5769#line 246
5770#if 1
5771SIMD_IMPL_INTRIN_1(reinterpret_u16_s64, vu16, vs64)static PyObject *simd__intrin_reinterpret_u16_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = arg.data.
vs64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu16 }; return simd_arg_to_obj(&ret)
; }
5772#endif // simd_sup2
5773
5774#line 246
5775#if 1
5776SIMD_IMPL_INTRIN_1(reinterpret_s16_s64, vs16, vs64)static PyObject *simd__intrin_reinterpret_s16_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = arg.data.
vs64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs16 }; return simd_arg_to_obj(&ret)
; }
5777#endif // simd_sup2
5778
5779#line 246
5780#if 1
5781SIMD_IMPL_INTRIN_1(reinterpret_u32_s64, vu32, vs64)static PyObject *simd__intrin_reinterpret_u32_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = arg.data.
vs64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu32 }; return simd_arg_to_obj(&ret)
; }
5782#endif // simd_sup2
5783
5784#line 246
5785#if 1
5786SIMD_IMPL_INTRIN_1(reinterpret_s32_s64, vs32, vs64)static PyObject *simd__intrin_reinterpret_s32_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = arg.data.
vs64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret)
; }
5787#endif // simd_sup2
5788
5789#line 246
5790#if 1
5791SIMD_IMPL_INTRIN_1(reinterpret_u64_s64, vu64, vs64)static PyObject *simd__intrin_reinterpret_u64_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = arg.data.
vs64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vu64 }; return simd_arg_to_obj(&ret)
; }
5792#endif // simd_sup2
5793
5794#line 246
5795#if 1
5796SIMD_IMPL_INTRIN_1(reinterpret_s64_s64, vs64, vs64)static PyObject *simd__intrin_reinterpret_s64_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = arg.data.
vs64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret)
; }
5797#endif // simd_sup2
5798
5799#line 246
5800#if 1
5801SIMD_IMPL_INTRIN_1(reinterpret_f32_s64, vf32, vs64)static PyObject *simd__intrin_reinterpret_f32_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
( arg.data.vs64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
5802#endif // simd_sup2
5803
5804#line 246
5805#if NPY_SIMD_F641
5806SIMD_IMPL_INTRIN_1(reinterpret_f64_s64, vf64, vs64)static PyObject *simd__intrin_reinterpret_f64_s64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_s64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castsi128_pd
( arg.data.vs64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
5807#endif // simd_sup2
5808
5809
5810/**
5811 * special definition due to the nature of intrinsics
5812 * npyv_setf_s64 and npy_set_s64.
5813*/
5814#line 258
5815static PyObject *
5816simd__intrin_setf_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5817{
5818 npyv_lanetype_s64 *data = simd_sequence_from_iterable(args, simd_data_qs64, npyv_nlanes_s642);
5819 if (data == NULL((void*)0)) {
5820 return NULL((void*)0);
5821 }
5822 simd_data r = {.vs64 = npyv_setf_s64(npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5823 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5824 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5825 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5826 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5827 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5828 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5829 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5830 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5831 data[64] // for setfnpyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))
5832 )npyv__setr_epi64((npy_int64)(data[1]), (npy_int64)(data[2]))};
5833 simd_sequence_free(data);
5834 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs64);
5835}
5836
5837#line 258
5838static PyObject *
5839simd__intrin_set_s64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
5840{
5841 npyv_lanetype_s64 *data = simd_sequence_from_iterable(args, simd_data_qs64, npyv_nlanes_s642);
5842 if (data == NULL((void*)0)) {
5843 return NULL((void*)0);
5844 }
5845 simd_data r = {.vs64 = npyv_set_s64(npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5846 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5847 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5848 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5849 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5850 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5851 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5852 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5853 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5854 data[64] // for setfnpyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))
5855 )npyv__setr_epi64((npy_int64)(data[0]), (npy_int64)(data[1]))};
5856 simd_sequence_free(data);
5857 return (PyObject*)PySIMDVector_FromData(r, simd_data_vs64);
5858}
5859
5860
5861/***************************
5862 * Reorder
5863 ***************************/
5864#line 287
5865SIMD_IMPL_INTRIN_2(combinel_s64, vs64, vs64, vs64)static PyObject *simd__intrin_combinel_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_s64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs64 = _mm_unpacklo_epi64
( arg1.data.vs64, arg2.data.vs64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5866
5867#line 287
5868SIMD_IMPL_INTRIN_2(combineh_s64, vs64, vs64, vs64)static PyObject *simd__intrin_combineh_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_s64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs64 = _mm_unpackhi_epi64
( arg1.data.vs64, arg2.data.vs64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5869
5870
5871#line 293
5872SIMD_IMPL_INTRIN_2(combine_s64, vs64x2, vs64, vs64)static PyObject *simd__intrin_combine_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_s64",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs64x2 = npyv__combine
( arg1.data.vs64, arg2.data.vs64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs64x2 }; return simd_arg_to_obj(&ret); }
5873
5874#line 293
5875SIMD_IMPL_INTRIN_2(zip_s64, vs64x2, vs64, vs64)static PyObject *simd__intrin_zip_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64x2 = npyv_zip_s64( arg1.data.vs64,
arg2.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs64x2
}; return simd_arg_to_obj(&ret); }
5876
5877
5878#if 0
5879SIMD_IMPL_INTRIN_1(rev64_s64, vs64, vs64)static PyObject *simd__intrin_rev64_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_s64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs64 = npyv_rev64_s64( arg.data.vs64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5880#endif
5881
5882/***************************
5883 * Operators
5884 ***************************/
5885#if 63 > 0
5886SIMD_IMPL_INTRIN_2(shl_s64, vs64, vs64, u8)static PyObject *simd__intrin_shl_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = _mm_sll_epi64(arg1.data.vs64, _mm_cvtsi32_si128
(arg2.data.u8))}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs64
}; return simd_arg_to_obj(&ret); }
5887SIMD_IMPL_INTRIN_2(shr_s64, vs64, vs64, u8)static PyObject *simd__intrin_shr_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_shr_s64( arg1.data.vs64, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5888// immediate constant
5889SIMD_IMPL_INTRIN_2IMM(shli_s64, vs64, vs64, 63)static PyObject *simd__intrin_shli_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs64 = 0 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vs64, 0) : 1 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vs64, 1) : 2 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vs64, 2) : 3 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vs64, 3) : 4 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64
, 4) : 5 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 5) :
6 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 6) : 7 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 7) : 8 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 8) : 9 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vs64, 9) : 10 == arg2.data.u8 ?
_mm_slli_epi64(arg1.data.vs64, 10) : 11 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vs64, 11) : 12 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vs64, 12) : 13 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vs64, 13) : 14 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64
, 14) : 15 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 15
) : 16 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 16) :
17 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 17) : 18
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 18) : 19 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 19) : 20 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 20) : 21 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vs64, 21) : 22 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vs64, 22) : 23 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vs64, 23) : 24 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vs64, 24) : 25 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vs64, 25) : 26 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64
, 26) : 27 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 27
) : 28 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 28) :
29 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 29) : 30
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 30) : 31 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 31) : 32 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 32) : 33 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vs64, 33) : 34 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vs64, 34) : 35 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vs64, 35) : 36 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vs64, 36) : 37 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vs64, 37) : 38 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64
, 38) : 39 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 39
) : 40 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 40) :
41 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 41) : 42
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 42) : 43 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 43) : 44 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 44) : 45 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vs64, 45) : 46 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vs64, 46) : 47 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vs64, 47) : 48 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vs64, 48) : 49 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vs64, 49) : 50 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64
, 50) : 51 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 51
) : 52 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 52) :
53 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 53) : 54
== arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 54) : 55 ==
arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 55) : 56 == arg2
.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 56) : 57 == arg2.data
.u8 ? _mm_slli_epi64(arg1.data.vs64, 57) : 58 == arg2.data.u8
? _mm_slli_epi64(arg1.data.vs64, 58) : 59 == arg2.data.u8 ? _mm_slli_epi64
(arg1.data.vs64, 59) : 60 == arg2.data.u8 ? _mm_slli_epi64(arg1
.data.vs64, 60) : 61 == arg2.data.u8 ? _mm_slli_epi64(arg1.data
.vs64, 61) : 62 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64
, 62) : 63 == arg2.data.u8 ? _mm_slli_epi64(arg1.data.vs64, 63
) : data.vs64; simd_arg_free(&arg1); simd_arg ret = { .data
= data, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&
ret); }
5890SIMD_IMPL_INTRIN_2IMM(shri_s64, vs64, vs64, 64)static PyObject *simd__intrin_shri_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vs64 = 1 == arg2.data.u8
? npyv_shr_s64(arg1.data.vs64, 1) : 2 == arg2.data.u8 ? npyv_shr_s64
(arg1.data.vs64, 2) : 3 == arg2.data.u8 ? npyv_shr_s64(arg1.data
.vs64, 3) : 4 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 4
) : 5 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 5) : 6 ==
arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 6) : 7 == arg2.data
.u8 ? npyv_shr_s64(arg1.data.vs64, 7) : 8 == arg2.data.u8 ? npyv_shr_s64
(arg1.data.vs64, 8) : 9 == arg2.data.u8 ? npyv_shr_s64(arg1.data
.vs64, 9) : 10 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64,
10) : 11 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 11) :
12 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 12) : 13 ==
arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 13) : 14 == arg2
.data.u8 ? npyv_shr_s64(arg1.data.vs64, 14) : 15 == arg2.data
.u8 ? npyv_shr_s64(arg1.data.vs64, 15) : 16 == arg2.data.u8 ?
npyv_shr_s64(arg1.data.vs64, 16) : 17 == arg2.data.u8 ? npyv_shr_s64
(arg1.data.vs64, 17) : 18 == arg2.data.u8 ? npyv_shr_s64(arg1
.data.vs64, 18) : 19 == arg2.data.u8 ? npyv_shr_s64(arg1.data
.vs64, 19) : 20 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64
, 20) : 21 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 21)
: 22 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 22) : 23
== arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 23) : 24 == arg2
.data.u8 ? npyv_shr_s64(arg1.data.vs64, 24) : 25 == arg2.data
.u8 ? npyv_shr_s64(arg1.data.vs64, 25) : 26 == arg2.data.u8 ?
npyv_shr_s64(arg1.data.vs64, 26) : 27 == arg2.data.u8 ? npyv_shr_s64
(arg1.data.vs64, 27) : 28 == arg2.data.u8 ? npyv_shr_s64(arg1
.data.vs64, 28) : 29 == arg2.data.u8 ? npyv_shr_s64(arg1.data
.vs64, 29) : 30 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64
, 30) : 31 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 31)
: 32 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 32) : 33
== arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 33) : 34 == arg2
.data.u8 ? npyv_shr_s64(arg1.data.vs64, 34) : 35 == arg2.data
.u8 ? npyv_shr_s64(arg1.data.vs64, 35) : 36 == arg2.data.u8 ?
npyv_shr_s64(arg1.data.vs64, 36) : 37 == arg2.data.u8 ? npyv_shr_s64
(arg1.data.vs64, 37) : 38 == arg2.data.u8 ? npyv_shr_s64(arg1
.data.vs64, 38) : 39 == arg2.data.u8 ? npyv_shr_s64(arg1.data
.vs64, 39) : 40 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64
, 40) : 41 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 41)
: 42 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 42) : 43
== arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 43) : 44 == arg2
.data.u8 ? npyv_shr_s64(arg1.data.vs64, 44) : 45 == arg2.data
.u8 ? npyv_shr_s64(arg1.data.vs64, 45) : 46 == arg2.data.u8 ?
npyv_shr_s64(arg1.data.vs64, 46) : 47 == arg2.data.u8 ? npyv_shr_s64
(arg1.data.vs64, 47) : 48 == arg2.data.u8 ? npyv_shr_s64(arg1
.data.vs64, 48) : 49 == arg2.data.u8 ? npyv_shr_s64(arg1.data
.vs64, 49) : 50 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64
, 50) : 51 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 51)
: 52 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 52) : 53
== arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 53) : 54 == arg2
.data.u8 ? npyv_shr_s64(arg1.data.vs64, 54) : 55 == arg2.data
.u8 ? npyv_shr_s64(arg1.data.vs64, 55) : 56 == arg2.data.u8 ?
npyv_shr_s64(arg1.data.vs64, 56) : 57 == arg2.data.u8 ? npyv_shr_s64
(arg1.data.vs64, 57) : 58 == arg2.data.u8 ? npyv_shr_s64(arg1
.data.vs64, 58) : 59 == arg2.data.u8 ? npyv_shr_s64(arg1.data
.vs64, 59) : 60 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64
, 60) : 61 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 61)
: 62 == arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 62) : 63
== arg2.data.u8 ? npyv_shr_s64(arg1.data.vs64, 63) : 64 == arg2
.data.u8 ? npyv_shr_s64(arg1.data.vs64, 64) : data.vs64; simd_arg_free
(&arg1); simd_arg ret = { .data = data, .dtype = simd_data_vs64
}; return simd_arg_to_obj(&ret); }
5891#endif // shl_imm
5892
5893#line 314
5894SIMD_IMPL_INTRIN_2(and_s64, vs64, vs64, vs64)static PyObject *simd__intrin_and_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""and_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = _mm_and_si128( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5895
5896#line 314
5897SIMD_IMPL_INTRIN_2(or_s64, vs64, vs64, vs64)static PyObject *simd__intrin_or_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""or_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = _mm_or_si128( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5898
5899#line 314
5900SIMD_IMPL_INTRIN_2(xor_s64, vs64, vs64, vs64)static PyObject *simd__intrin_xor_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = _mm_xor_si128( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5901
5902
5903SIMD_IMPL_INTRIN_1(not_s64, vs64, vs64)static PyObject *simd__intrin_not_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"not_s64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs64 = _mm_xor_si128(arg.data.vs64, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&ret)
; }
5904
5905#line 322
5906SIMD_IMPL_INTRIN_2(cmpeq_s64, vb64, vs64, vs64)static PyObject *simd__intrin_cmpeq_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = npyv_cmpeq_u64( arg1.data.vs64,
arg2.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
5907
5908#line 322
5909SIMD_IMPL_INTRIN_2(cmpneq_s64, vb64, vs64, vs64)static PyObject *simd__intrin_cmpneq_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_xor_si128(npyv_cmpeq_u64(arg1
.data.vs64, arg2.data.vs64), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb64 }; return simd_arg_to_obj(&
ret); }
5910
5911#line 322
5912SIMD_IMPL_INTRIN_2(cmpgt_s64, vb64, vs64, vs64)static PyObject *simd__intrin_cmpgt_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = npyv_cmpgt_s64( arg1.data.vs64,
arg2.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
5913
5914#line 322
5915SIMD_IMPL_INTRIN_2(cmpge_s64, vb64, vs64, vs64)static PyObject *simd__intrin_cmpge_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_xor_si128(npyv_cmpgt_s64(arg2
.data.vs64, arg1.data.vs64), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb64 }; return simd_arg_to_obj(&
ret); }
5916
5917#line 322
5918SIMD_IMPL_INTRIN_2(cmplt_s64, vb64, vs64, vs64)static PyObject *simd__intrin_cmplt_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = npyv_cmpgt_s64(arg2.data.vs64, arg1
.data.vs64)}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb64 }; return
simd_arg_to_obj(&ret); }
5919
5920#line 322
5921SIMD_IMPL_INTRIN_2(cmple_s64, vb64, vs64, vs64)static PyObject *simd__intrin_cmple_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_xor_si128(npyv_cmpgt_s64(arg1
.data.vs64, arg2.data.vs64), _mm_set1_epi32(-1))}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg ret = { .data
= data, .dtype = simd_data_vb64 }; return simd_arg_to_obj(&
ret); }
5922
5923
5924/***************************
5925 * Conversion
5926 ***************************/
5927SIMD_IMPL_INTRIN_1(cvt_s64_b64, vs64, vb64)static PyObject *simd__intrin_cvt_s64_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb64}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_s64_b64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vs64 = arg.data.vb64}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs64
}; return simd_arg_to_obj(&ret); }
5928SIMD_IMPL_INTRIN_1(cvt_b64_s64, vb64, vs64)static PyObject *simd__intrin_cvt_b64_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b64_s64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb64 = arg.data.vs64}; simd_arg_free(
&arg); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
5929#if 0
5930SIMD_IMPL_INTRIN_1(expand_s64_s64, vs64x2, vs64)static PyObject *simd__intrin_expand_s64_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"expand_s64_s64", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vs64x2 = npyv_expand_s64_s64( arg.data
.vs64 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vs64x2 }; return simd_arg_to_obj(&ret
); }
5931#endif // expand_sup
5932/***************************
5933 * Arithmetic
5934 ***************************/
5935#line 339
5936SIMD_IMPL_INTRIN_2(add_s64, vs64, vs64, vs64)static PyObject *simd__intrin_add_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""add_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = _mm_add_epi64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5937
5938#line 339
5939SIMD_IMPL_INTRIN_2(sub_s64, vs64, vs64, vs64)static PyObject *simd__intrin_sub_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = _mm_sub_epi64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5940
5941
5942#if 0
5943#line 346
5944SIMD_IMPL_INTRIN_2(adds_s64, vs64, vs64, vs64)static PyObject *simd__intrin_adds_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_adds_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5945
5946#line 346
5947SIMD_IMPL_INTRIN_2(subs_s64, vs64, vs64, vs64)static PyObject *simd__intrin_subs_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_subs_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5948
5949#endif // sat_sup
5950
5951#if 0
5952SIMD_IMPL_INTRIN_2(mul_s64, vs64, vs64, vs64)static PyObject *simd__intrin_mul_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_mul_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5953#endif // mul_sup
5954
5955#if 0
5956SIMD_IMPL_INTRIN_2(div_s64, vs64, vs64, vs64)static PyObject *simd__intrin_div_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""div_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_div_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5957#endif // div_sup
5958
5959#if 1
5960SIMD_IMPL_INTRIN_1(divisor_s64, vs64x3, s64)static PyObject *simd__intrin_divisor_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_s64}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_s64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vs64x3 = npyv_divisor_s64( arg.data.s64
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vs64x3 }; return simd_arg_to_obj(&ret);
}
5961SIMD_IMPL_INTRIN_2(divc_s64, vs64, vs64, vs64x3)static PyObject *simd__intrin_divc_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_divc_s64( arg1.data.vs64, arg2
.data.vs64x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vs64
}; return simd_arg_to_obj(&ret); }
5962#endif // intdiv_sup
5963
5964#if 0
5965#line 367
5966SIMD_IMPL_INTRIN_3(muladd_s64, vs64, vs64, vs64, vs64)static PyObject *simd__intrin_muladd_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; simd_arg arg3 = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs64 = npyv_muladd_s64
( arg1.data.vs64, arg2.data.vs64, arg3.data.vs64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5967
5968#line 367
5969SIMD_IMPL_INTRIN_3(mulsub_s64, vs64, vs64, vs64, vs64)static PyObject *simd__intrin_mulsub_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; simd_arg arg3 = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs64 = npyv_mulsub_s64
( arg1.data.vs64, arg2.data.vs64, arg3.data.vs64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5970
5971#line 367
5972SIMD_IMPL_INTRIN_3(nmuladd_s64, vs64, vs64, vs64, vs64)static PyObject *simd__intrin_nmuladd_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; simd_arg arg3 = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs64 = npyv_nmuladd_s64
( arg1.data.vs64, arg2.data.vs64, arg3.data.vs64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5973
5974#line 367
5975SIMD_IMPL_INTRIN_3(nmulsub_s64, vs64, vs64, vs64, vs64)static PyObject *simd__intrin_nmulsub_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; simd_arg arg3 = {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vs64 = npyv_nmulsub_s64
( arg1.data.vs64, arg2.data.vs64, arg3.data.vs64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
5976
5977#endif // fused_sup
5978
5979#if 0
5980SIMD_IMPL_INTRIN_1(sum_s64, s64, vs64)static PyObject *simd__intrin_sum_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"sum_s64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.s64 = npyv_sum_s64( arg.data.vs64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_s64
}; return simd_arg_to_obj(&ret); }
5981#endif // sum_sup
5982
5983#if 0
5984SIMD_IMPL_INTRIN_1(sumup_s64, s64, vs64)static PyObject *simd__intrin_sumup_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_s64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.s64 = npyv_sumup_s64( arg.data.vs64 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_s64 }; return simd_arg_to_obj(&ret); }
5985#endif // sumup_sup
5986
5987/***************************
5988 * Math
5989 ***************************/
5990#if 0
5991#line 386
5992SIMD_IMPL_INTRIN_1(sqrt_s64, vs64, vs64)static PyObject *simd__intrin_sqrt_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_s64", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vs64 = npyv_sqrt_s64( arg.data.vs64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs64
}; return simd_arg_to_obj(&ret); }
5993
5994#line 386
5995SIMD_IMPL_INTRIN_1(recip_s64, vs64, vs64)static PyObject *simd__intrin_recip_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"recip_s64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vs64 = npyv_recip_s64( arg.data.vs64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
5996
5997#line 386
5998SIMD_IMPL_INTRIN_1(abs_s64, vs64, vs64)static PyObject *simd__intrin_abs_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"abs_s64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vs64 = npyv_abs_s64( arg.data.vs64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vs64
}; return simd_arg_to_obj(&ret); }
5999
6000#line 386
6001SIMD_IMPL_INTRIN_1(square_s64, vs64, vs64)static PyObject *simd__intrin_square_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&:"
"square_s64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vs64 = npyv_square_s64( arg.data.vs64 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vs64 }; return simd_arg_to_obj(&ret); }
6002
6003#endif
6004
6005#line 393
6006SIMD_IMPL_INTRIN_2(max_s64, vs64, vs64, vs64)static PyObject *simd__intrin_max_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""max_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_max_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
6007
6008#line 393
6009SIMD_IMPL_INTRIN_2(min_s64, vs64, vs64, vs64)static PyObject *simd__intrin_min_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""min_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_min_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
6010
6011
6012#if 0
6013#line 400
6014SIMD_IMPL_INTRIN_2(maxp_s64, vs64, vs64, vs64)static PyObject *simd__intrin_maxp_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_maxp_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
6015
6016#line 400
6017SIMD_IMPL_INTRIN_2(minp_s64, vs64, vs64, vs64)static PyObject *simd__intrin_minp_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vs64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_s64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vs64 = npyv_minp_s64( arg1.data.vs64, arg2
.data.vs64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vs64 }; return
simd_arg_to_obj(&ret); }
6018
6019#endif
6020
6021/***************************
6022 * Mask operations
6023 ***************************/
6024#line 410
6025 SIMD_IMPL_INTRIN_4(ifadd_s64, vs64, vb64, vs64, vs64, vs64)static PyObject *simd__intrin_ifadd_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; simd_arg arg3 = {.dtype = simd_data_vs64}; simd_arg arg4 =
{.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_s64", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs64 = npyv_ifadd_s64
( arg1.data.vb64, arg2.data.vs64, arg3.data.vs64, arg4.data.vs64
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&
ret); }
6026
6027#line 410
6028 SIMD_IMPL_INTRIN_4(ifsub_s64, vs64, vb64, vs64, vs64, vs64)static PyObject *simd__intrin_ifsub_s64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vs64
}; simd_arg arg3 = {.dtype = simd_data_vs64}; simd_arg arg4 =
{.dtype = simd_data_vs64}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_s64", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vs64 = npyv_ifsub_s64
( arg1.data.vb64, arg2.data.vs64, arg3.data.vs64, arg4.data.vs64
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vs64 }; return simd_arg_to_obj(&
ret); }
6029
6030
6031#endif // simd_sup
6032
6033#line 34
6034#if 1
6035/***************************
6036 * Memory
6037 ***************************/
6038#line 41
6039SIMD_IMPL_INTRIN_1(load_f32, vf32, qf32)static PyObject *simd__intrin_load_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf32}; if (!PyArg_ParseTuple( args, "O&:"
"load_f32", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vf32 = _mm_loadu_ps( arg.data.qf32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6040
6041#line 41
6042SIMD_IMPL_INTRIN_1(loada_f32, vf32, qf32)static PyObject *simd__intrin_loada_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf32}; if (!PyArg_ParseTuple( args, "O&:"
"loada_f32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf32 = _mm_load_ps( arg.data.qf32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6043
6044#line 41
6045SIMD_IMPL_INTRIN_1(loads_f32, vf32, qf32)static PyObject *simd__intrin_loads_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf32}; if (!PyArg_ParseTuple( args, "O&:"
"loads_f32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf32 = _mm_castsi128_ps(_mm_load_si128(
(const __m128i *)(arg.data.qf32)))}; simd_arg_free(&arg);
simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6046
6047#line 41
6048SIMD_IMPL_INTRIN_1(loadl_f32, vf32, qf32)static PyObject *simd__intrin_loadl_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf32}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_f32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf32 = _mm_castsi128_ps(npyv_loadl_u32(
(const npy_uint32*)(arg.data.qf32)))}; simd_arg_free(&arg
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6049
6050#line 46
6051// special definition due to the nature of store
6052static PyObject *
6053simd__intrin_store_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6054{
6055 simd_arg seq_arg = {.dtype = simd_data_qf32};
6056 simd_arg vec_arg = {.dtype = simd_data_vf32};
6057 if (!PyArg_ParseTuple(
6058 args, "O&O&:store_f32",
6059 simd_arg_converter, &seq_arg,
6060 simd_arg_converter, &vec_arg
6061 )) {
6062 return NULL((void*)0);
6063 }
6064 npyv_store_f32_mm_storeu_ps(seq_arg.data.qf32, vec_arg.data.vf32);
6065 // write-back
6066 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6067 simd_arg_free(&seq_arg);
6068 return NULL((void*)0);
6069 }
6070 simd_arg_free(&seq_arg);
6071 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6072}
6073
6074#line 46
6075// special definition due to the nature of storea
6076static PyObject *
6077simd__intrin_storea_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6078{
6079 simd_arg seq_arg = {.dtype = simd_data_qf32};
6080 simd_arg vec_arg = {.dtype = simd_data_vf32};
6081 if (!PyArg_ParseTuple(
6082 args, "O&O&:storea_f32",
6083 simd_arg_converter, &seq_arg,
6084 simd_arg_converter, &vec_arg
6085 )) {
6086 return NULL((void*)0);
6087 }
6088 npyv_storea_f32_mm_store_ps(seq_arg.data.qf32, vec_arg.data.vf32);
6089 // write-back
6090 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6091 simd_arg_free(&seq_arg);
6092 return NULL((void*)0);
6093 }
6094 simd_arg_free(&seq_arg);
6095 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6096}
6097
6098#line 46
6099// special definition due to the nature of stores
6100static PyObject *
6101simd__intrin_stores_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6102{
6103 simd_arg seq_arg = {.dtype = simd_data_qf32};
6104 simd_arg vec_arg = {.dtype = simd_data_vf32};
6105 if (!PyArg_ParseTuple(
6106 args, "O&O&:stores_f32",
6107 simd_arg_converter, &seq_arg,
6108 simd_arg_converter, &vec_arg
6109 )) {
6110 return NULL((void*)0);
6111 }
6112 npyv_stores_f32_mm_stream_ps(seq_arg.data.qf32, vec_arg.data.vf32);
6113 // write-back
6114 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6115 simd_arg_free(&seq_arg);
6116 return NULL((void*)0);
6117 }
6118 simd_arg_free(&seq_arg);
6119 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6120}
6121
6122#line 46
6123// special definition due to the nature of storel
6124static PyObject *
6125simd__intrin_storel_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6126{
6127 simd_arg seq_arg = {.dtype = simd_data_qf32};
6128 simd_arg vec_arg = {.dtype = simd_data_vf32};
6129 if (!PyArg_ParseTuple(
6130 args, "O&O&:storel_f32",
6131 simd_arg_converter, &seq_arg,
6132 simd_arg_converter, &vec_arg
6133 )) {
6134 return NULL((void*)0);
6135 }
6136 npyv_storel_f32(seq_arg.data.qf32, vec_arg.data.vf32)_mm_storel_epi64((__m128i*)(seq_arg.data.qf32), _mm_castps_si128
(vec_arg.data.vf32));
;
6137 // write-back
6138 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6139 simd_arg_free(&seq_arg);
6140 return NULL((void*)0);
6141 }
6142 simd_arg_free(&seq_arg);
6143 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6144}
6145
6146#line 46
6147// special definition due to the nature of storeh
6148static PyObject *
6149simd__intrin_storeh_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6150{
6151 simd_arg seq_arg = {.dtype = simd_data_qf32};
6152 simd_arg vec_arg = {.dtype = simd_data_vf32};
6153 if (!PyArg_ParseTuple(
6154 args, "O&O&:storeh_f32",
6155 simd_arg_converter, &seq_arg,
6156 simd_arg_converter, &vec_arg
6157 )) {
6158 return NULL((void*)0);
6159 }
6160 npyv_storeh_f32(seq_arg.data.qf32, vec_arg.data.vf32)npyv_storeh_u32((npy_uint32*)(seq_arg.data.qf32), _mm_castps_si128
(vec_arg.data.vf32))
;
6161 // write-back
6162 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6163 simd_arg_free(&seq_arg);
6164 return NULL((void*)0);
6165 }
6166 simd_arg_free(&seq_arg);
6167 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6168}
6169
6170
6171/****************************************
6172 * Non-contiguous/Partial Memory access
6173 ****************************************/
6174#if 1
6175// Partial Load
6176SIMD_IMPL_INTRIN_3(load_till_f32, vf32, qf32, u32, f32)static PyObject *simd__intrin_load_till_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qf32}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_f32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf32 = npyv_load_till_f32
( arg1.data.qf32, arg2.data.u32, arg3.data.f32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6177SIMD_IMPL_INTRIN_2(load_tillz_f32, vf32, qf32, u32)static PyObject *simd__intrin_load_tillz_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qf32}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_f32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf32 = npyv_load_tillz_f32
( arg1.data.qf32, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vf32 }; return simd_arg_to_obj(&ret); }
6178
6179// Partial Store
6180static PyObject *
6181simd__intrin_store_till_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6182{
6183 simd_arg seq_arg = {.dtype = simd_data_qf32};
6184 simd_arg nlane_arg = {.dtype = simd_data_u32};
6185 simd_arg vec_arg = {.dtype = simd_data_vf32};
6186 if (!PyArg_ParseTuple(
6187 args, "O&O&O&:store_till_f32",
6188 simd_arg_converter, &seq_arg,
6189 simd_arg_converter, &nlane_arg,
6190 simd_arg_converter, &vec_arg
6191 )) {
6192 return NULL((void*)0);
6193 }
6194 npyv_store_till_f32(
6195 seq_arg.data.qf32, nlane_arg.data.u32, vec_arg.data.vf32
6196 );
6197 // write-back
6198 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6199 simd_arg_free(&seq_arg);
6200 return NULL((void*)0);
6201 }
6202 simd_arg_free(&seq_arg);
6203 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6204}
6205
6206// Non-contiguous Load
6207#line 112
6208static PyObject *
6209simd__intrin_loadn_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6210{
6211 simd_arg seq_arg = {.dtype = simd_data_qf32};
6212 simd_arg stride_arg = {.dtype = simd_data_s64};
6213#if 0
6214 simd_arg nlane_arg = {.dtype = simd_data_u32};
6215#endif // till
6216#if 0
6217 simd_arg fill_arg = {.dtype = simd_data_f32};
6218#endif
6219 if (!PyArg_ParseTuple(
6220 args, "O&O&:loadn_f32",
6221 simd_arg_converter, &seq_arg,
6222 simd_arg_converter, &stride_arg
6223#if 0
6224 ,simd_arg_converter, &nlane_arg
6225#endif
6226#if 0
6227 ,simd_arg_converter, &fill_arg
6228#endif
6229 )) {
6230 return NULL((void*)0);
6231 }
6232 npyv_lanetype_f32 *seq_ptr = seq_arg.data.qf32;
6233 npy_intp stride = (npy_intp)stride_arg.data.s64;
6234 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
6235 Py_ssize_t min_seq_len = stride * npyv_nlanes_f324;
6236 if (stride < 0) {
6237 seq_ptr += cur_seq_len -1;
6238 min_seq_len = -min_seq_len;
6239 }
6240 if (cur_seq_len < min_seq_len) {
6241 PyErr_Format(PyExc_ValueError,
6242 "loadn_f32(), according to provided stride %d, the "
6243 "minimum acceptable size of the required sequence is %d, given(%d)",
6244 stride, min_seq_len, cur_seq_len
6245 );
6246 goto err;
6247 }
6248 npyv_f32 rvec = npyv_loadn_f32(
6249 seq_ptr, stride
6250 #if 0
6251 , nlane_arg.data.u32
6252 #endif
6253 #if 0
6254 , fill_arg.data.f32
6255 #endif
6256 );
6257 simd_arg ret = {
6258 .dtype = simd_data_vf32, .data = {.vf32=rvec}
6259 };
6260 simd_arg_free(&seq_arg);
6261 return simd_arg_to_obj(&ret);
6262err:
6263 simd_arg_free(&seq_arg);
6264 return NULL((void*)0);
6265}
6266
6267#line 112
6268static PyObject *
6269simd__intrin_loadn_till_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6270{
6271 simd_arg seq_arg = {.dtype = simd_data_qf32};
6272 simd_arg stride_arg = {.dtype = simd_data_s64};
6273#if 1
6274 simd_arg nlane_arg = {.dtype = simd_data_u32};
6275#endif // till
6276#if 1
6277 simd_arg fill_arg = {.dtype = simd_data_f32};
6278#endif
6279 if (!PyArg_ParseTuple(
6280 args, "O&O&O&O&:loadn_till_f32",
6281 simd_arg_converter, &seq_arg,
6282 simd_arg_converter, &stride_arg
6283#if 1
6284 ,simd_arg_converter, &nlane_arg
6285#endif
6286#if 1
6287 ,simd_arg_converter, &fill_arg
6288#endif
6289 )) {
6290 return NULL((void*)0);
6291 }
6292 npyv_lanetype_f32 *seq_ptr = seq_arg.data.qf32;
6293 npy_intp stride = (npy_intp)stride_arg.data.s64;
6294 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
6295 Py_ssize_t min_seq_len = stride * npyv_nlanes_f324;
6296 if (stride < 0) {
6297 seq_ptr += cur_seq_len -1;
6298 min_seq_len = -min_seq_len;
6299 }
6300 if (cur_seq_len < min_seq_len) {
6301 PyErr_Format(PyExc_ValueError,
6302 "loadn_till_f32(), according to provided stride %d, the "
6303 "minimum acceptable size of the required sequence is %d, given(%d)",
6304 stride, min_seq_len, cur_seq_len
6305 );
6306 goto err;
6307 }
6308 npyv_f32 rvec = npyv_loadn_till_f32(
6309 seq_ptr, stride
6310 #if 1
6311 , nlane_arg.data.u32
6312 #endif
6313 #if 1
6314 , fill_arg.data.f32
6315 #endif
6316 );
6317 simd_arg ret = {
6318 .dtype = simd_data_vf32, .data = {.vf32=rvec}
6319 };
6320 simd_arg_free(&seq_arg);
6321 return simd_arg_to_obj(&ret);
6322err:
6323 simd_arg_free(&seq_arg);
6324 return NULL((void*)0);
6325}
6326
6327#line 112
6328static PyObject *
6329simd__intrin_loadn_tillz_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6330{
6331 simd_arg seq_arg = {.dtype = simd_data_qf32};
6332 simd_arg stride_arg = {.dtype = simd_data_s64};
6333#if 1
6334 simd_arg nlane_arg = {.dtype = simd_data_u32};
6335#endif // till
6336#if 0
6337 simd_arg fill_arg = {.dtype = simd_data_f32};
6338#endif
6339 if (!PyArg_ParseTuple(
6340 args, "O&O&O&:loadn_tillz_f32",
6341 simd_arg_converter, &seq_arg,
6342 simd_arg_converter, &stride_arg
6343#if 1
6344 ,simd_arg_converter, &nlane_arg
6345#endif
6346#if 0
6347 ,simd_arg_converter, &fill_arg
6348#endif
6349 )) {
6350 return NULL((void*)0);
6351 }
6352 npyv_lanetype_f32 *seq_ptr = seq_arg.data.qf32;
6353 npy_intp stride = (npy_intp)stride_arg.data.s64;
6354 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
6355 Py_ssize_t min_seq_len = stride * npyv_nlanes_f324;
6356 if (stride < 0) {
6357 seq_ptr += cur_seq_len -1;
6358 min_seq_len = -min_seq_len;
6359 }
6360 if (cur_seq_len < min_seq_len) {
6361 PyErr_Format(PyExc_ValueError,
6362 "loadn_tillz_f32(), according to provided stride %d, the "
6363 "minimum acceptable size of the required sequence is %d, given(%d)",
6364 stride, min_seq_len, cur_seq_len
6365 );
6366 goto err;
6367 }
6368 npyv_f32 rvec = npyv_loadn_tillz_f32(
6369 seq_ptr, stride
6370 #if 1
6371 , nlane_arg.data.u32
6372 #endif
6373 #if 0
6374 , fill_arg.data.f32
6375 #endif
6376 );
6377 simd_arg ret = {
6378 .dtype = simd_data_vf32, .data = {.vf32=rvec}
6379 };
6380 simd_arg_free(&seq_arg);
6381 return simd_arg_to_obj(&ret);
6382err:
6383 simd_arg_free(&seq_arg);
6384 return NULL((void*)0);
6385}
6386
6387
6388// Non-contiguous Store
6389#line 178
6390static PyObject *
6391simd__intrin_storen_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6392{
6393 simd_arg seq_arg = {.dtype = simd_data_qf32};
6394 simd_arg stride_arg = {.dtype = simd_data_s64};
6395 simd_arg vec_arg = {.dtype = simd_data_vf32};
6396#if 0
6397 simd_arg nlane_arg = {.dtype = simd_data_u32};
6398#endif
6399 if (!PyArg_ParseTuple(
6400 args, "O&O&O&:storen_f32",
6401 simd_arg_converter, &seq_arg,
6402 simd_arg_converter, &stride_arg
6403#if 0
6404 ,simd_arg_converter, &nlane_arg
6405#endif
6406 ,simd_arg_converter, &vec_arg
6407 )) {
6408 return NULL((void*)0);
6409 }
6410 npyv_lanetype_f32 *seq_ptr = seq_arg.data.qf32;
6411 npy_intp stride = (npy_intp)stride_arg.data.s64;
6412 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
6413 Py_ssize_t min_seq_len = stride * npyv_nlanes_f324;
6414 if (stride < 0) {
6415 seq_ptr += cur_seq_len -1;
6416 min_seq_len = -min_seq_len;
6417 }
6418 // overflow guard
6419 if (cur_seq_len < min_seq_len) {
6420 PyErr_Format(PyExc_ValueError,
6421 "storen_f32(), according to provided stride %d, the"
6422 "minimum acceptable size of the required sequence is %d, given(%d)",
6423 stride, min_seq_len, cur_seq_len
6424 );
6425 goto err;
6426 }
6427 npyv_storen_f32(
6428 seq_ptr, stride
6429 #if 0
6430 ,nlane_arg.data.u32
6431 #endif
6432 ,vec_arg.data.vf32
6433 );
6434 // write-back
6435 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6436 goto err;
6437 }
6438 simd_arg_free(&seq_arg);
6439 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6440err:
6441 simd_arg_free(&seq_arg);
6442 return NULL((void*)0);
6443}
6444
6445#line 178
6446static PyObject *
6447simd__intrin_storen_till_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6448{
6449 simd_arg seq_arg = {.dtype = simd_data_qf32};
6450 simd_arg stride_arg = {.dtype = simd_data_s64};
6451 simd_arg vec_arg = {.dtype = simd_data_vf32};
6452#if 1
6453 simd_arg nlane_arg = {.dtype = simd_data_u32};
6454#endif
6455 if (!PyArg_ParseTuple(
6456 args, "O&O&O&O&:storen_f32",
6457 simd_arg_converter, &seq_arg,
6458 simd_arg_converter, &stride_arg
6459#if 1
6460 ,simd_arg_converter, &nlane_arg
6461#endif
6462 ,simd_arg_converter, &vec_arg
6463 )) {
6464 return NULL((void*)0);
6465 }
6466 npyv_lanetype_f32 *seq_ptr = seq_arg.data.qf32;
6467 npy_intp stride = (npy_intp)stride_arg.data.s64;
6468 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
6469 Py_ssize_t min_seq_len = stride * npyv_nlanes_f324;
6470 if (stride < 0) {
6471 seq_ptr += cur_seq_len -1;
6472 min_seq_len = -min_seq_len;
6473 }
6474 // overflow guard
6475 if (cur_seq_len < min_seq_len) {
6476 PyErr_Format(PyExc_ValueError,
6477 "storen_till_f32(), according to provided stride %d, the"
6478 "minimum acceptable size of the required sequence is %d, given(%d)",
6479 stride, min_seq_len, cur_seq_len
6480 );
6481 goto err;
6482 }
6483 npyv_storen_till_f32(
6484 seq_ptr, stride
6485 #if 1
6486 ,nlane_arg.data.u32
6487 #endif
6488 ,vec_arg.data.vf32
6489 );
6490 // write-back
6491 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf32, simd_data_qf32)) {
6492 goto err;
6493 }
6494 simd_arg_free(&seq_arg);
6495 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6496err:
6497 simd_arg_free(&seq_arg);
6498 return NULL((void*)0);
6499}
6500
6501#endif // 1
6502
6503/***************************
6504 * Misc
6505 ***************************/
6506SIMD_IMPL_INTRIN_0(zero_f32, vf32)static PyObject *simd__intrin_zero_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_f32") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vf32, .data = {.vf32 = _mm_setzero_ps()}, }
; return simd_arg_to_obj(&a); }
6507SIMD_IMPL_INTRIN_1(setall_f32, vf32, f32)static PyObject *simd__intrin_setall_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_f32}; if (!PyArg_ParseTuple( args, "O&:"
"setall_f32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vf32 = _mm_set1_ps( arg.data.f32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6508SIMD_IMPL_INTRIN_3(select_f32, vf32, vb32, vf32, vf32)static PyObject *simd__intrin_select_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; simd_arg arg3 = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf32 = npyv_select_f32
( arg1.data.vb32, arg2.data.vf32, arg3.data.vf32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6509
6510#line 246
6511#if 1
6512SIMD_IMPL_INTRIN_1(reinterpret_u8_f32, vu8, vf32)static PyObject *simd__intrin_reinterpret_u8_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu8 }; return simd_arg_to_obj
(&ret); }
6513#endif // simd_sup2
6514
6515#line 246
6516#if 1
6517SIMD_IMPL_INTRIN_1(reinterpret_s8_f32, vs8, vf32)static PyObject *simd__intrin_reinterpret_s8_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs8 }; return simd_arg_to_obj
(&ret); }
6518#endif // simd_sup2
6519
6520#line 246
6521#if 1
6522SIMD_IMPL_INTRIN_1(reinterpret_u16_f32, vu16, vf32)static PyObject *simd__intrin_reinterpret_u16_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu16 }; return simd_arg_to_obj
(&ret); }
6523#endif // simd_sup2
6524
6525#line 246
6526#if 1
6527SIMD_IMPL_INTRIN_1(reinterpret_s16_f32, vs16, vf32)static PyObject *simd__intrin_reinterpret_s16_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs16 }; return simd_arg_to_obj
(&ret); }
6528#endif // simd_sup2
6529
6530#line 246
6531#if 1
6532SIMD_IMPL_INTRIN_1(reinterpret_u32_f32, vu32, vf32)static PyObject *simd__intrin_reinterpret_u32_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu32 }; return simd_arg_to_obj
(&ret); }
6533#endif // simd_sup2
6534
6535#line 246
6536#if 1
6537SIMD_IMPL_INTRIN_1(reinterpret_s32_f32, vs32, vf32)static PyObject *simd__intrin_reinterpret_s32_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs32 }; return simd_arg_to_obj
(&ret); }
6538#endif // simd_sup2
6539
6540#line 246
6541#if 1
6542SIMD_IMPL_INTRIN_1(reinterpret_u64_f32, vu64, vf32)static PyObject *simd__intrin_reinterpret_u64_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu64 }; return simd_arg_to_obj
(&ret); }
6543#endif // simd_sup2
6544
6545#line 246
6546#if 1
6547SIMD_IMPL_INTRIN_1(reinterpret_s64_f32, vs64, vf32)static PyObject *simd__intrin_reinterpret_s64_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = _mm_castps_si128
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs64 }; return simd_arg_to_obj
(&ret); }
6548#endif // simd_sup2
6549
6550#line 246
6551#if 1
6552SIMD_IMPL_INTRIN_1(reinterpret_f32_f32, vf32, vf32)static PyObject *simd__intrin_reinterpret_f32_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = arg.data.
vf32}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vf32 }; return simd_arg_to_obj(&ret)
; }
6553#endif // simd_sup2
6554
6555#line 246
6556#if NPY_SIMD_F641
6557SIMD_IMPL_INTRIN_1(reinterpret_f64_f32, vf64, vf32)static PyObject *simd__intrin_reinterpret_f64_f32 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_f32", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = _mm_castps_pd
( arg.data.vf32 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
6558#endif // simd_sup2
6559
6560
6561/**
6562 * special definition due to the nature of intrinsics
6563 * npyv_setf_f32 and npy_set_f32.
6564*/
6565#line 258
6566static PyObject *
6567simd__intrin_setf_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6568{
6569 npyv_lanetype_f32 *data = simd_sequence_from_iterable(args, simd_data_qf32, npyv_nlanes_f324);
6570 if (data == NULL((void*)0)) {
6571 return NULL((void*)0);
6572 }
6573 simd_data r = {.vf32 = npyv_setf_f32(npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6574 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6575 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6576 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6577 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6578 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6579 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6580 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6581 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6582 data[64] // for setfnpyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
6583 )npyv__setr_ps((float)(data[1]), (float)(data[2]), (float)(data
[3]), (float)(data[4]))
};
6584 simd_sequence_free(data);
6585 return (PyObject*)PySIMDVector_FromData(r, simd_data_vf32);
6586}
6587
6588#line 258
6589static PyObject *
6590simd__intrin_set_f32(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6591{
6592 npyv_lanetype_f32 *data = simd_sequence_from_iterable(args, simd_data_qf32, npyv_nlanes_f324);
6593 if (data == NULL((void*)0)) {
6594 return NULL((void*)0);
6595 }
6596 simd_data r = {.vf32 = npyv_set_f32(npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6597 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6598 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6599 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6600 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6601 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6602 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6603 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6604 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6605 data[64] // for setfnpyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
6606 )npyv__setr_ps((float)(data[0]), (float)(data[1]), (float)(data
[2]), (float)(data[3]))
};
6607 simd_sequence_free(data);
6608 return (PyObject*)PySIMDVector_FromData(r, simd_data_vf32);
6609}
6610
6611
6612/***************************
6613 * Reorder
6614 ***************************/
6615#line 287
6616SIMD_IMPL_INTRIN_2(combinel_f32, vf32, vf32, vf32)static PyObject *simd__intrin_combinel_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_f32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
(_mm_unpacklo_epi64(_mm_castps_si128(arg1.data.vf32), _mm_castps_si128
(arg2.data.vf32)))}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6617
6618#line 287
6619SIMD_IMPL_INTRIN_2(combineh_f32, vf32, vf32, vf32)static PyObject *simd__intrin_combineh_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_f32"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf32 = _mm_castsi128_ps
(_mm_unpackhi_epi64(_mm_castps_si128(arg1.data.vf32), _mm_castps_si128
(arg2.data.vf32)))}; simd_arg_free(&arg1); simd_arg_free(
&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6620
6621
6622#line 293
6623SIMD_IMPL_INTRIN_2(combine_f32, vf32x2, vf32, vf32)static PyObject *simd__intrin_combine_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_f32",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf32x2 = npyv_combine_f32
( arg1.data.vf32, arg2.data.vf32 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vf32x2 }; return simd_arg_to_obj(&ret); }
6624
6625#line 293
6626SIMD_IMPL_INTRIN_2(zip_f32, vf32x2, vf32, vf32)static PyObject *simd__intrin_zip_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32x2 = npyv_zip_f32( arg1.data.vf32,
arg2.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vf32x2
}; return simd_arg_to_obj(&ret); }
6627
6628
6629#if 1
6630SIMD_IMPL_INTRIN_1(rev64_f32, vf32, vf32)static PyObject *simd__intrin_rev64_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_f32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf32 = npyv_rev64_f32( arg.data.vf32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vf32 }; return simd_arg_to_obj(&ret); }
6631#endif
6632
6633/***************************
6634 * Operators
6635 ***************************/
6636#if 0 > 0
6637SIMD_IMPL_INTRIN_2(shl_f32, vf32, vf32, u8)static PyObject *simd__intrin_shl_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = npyv_shl_f32( arg1.data.vf32, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6638SIMD_IMPL_INTRIN_2(shr_f32, vf32, vf32, u8)static PyObject *simd__intrin_shr_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = npyv_shr_f32( arg1.data.vf32, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6639// immediate constant
6640SIMD_IMPL_INTRIN_2IMM(shli_f32, vf32, vf32, 0)static PyObject *simd__intrin_shli_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vf32 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shli_f32, vf32 ) data.vf32; simd_arg_free
(&arg1); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6641SIMD_IMPL_INTRIN_2IMM(shri_f32, vf32, vf32, 0)static PyObject *simd__intrin_shri_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vf32 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shri_f32, vf32 ) data.vf32; simd_arg_free
(&arg1); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6642#endif // shl_imm
6643
6644#line 314
6645SIMD_IMPL_INTRIN_2(and_f32, vf32, vf32, vf32)static PyObject *simd__intrin_and_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""and_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_and_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6646
6647#line 314
6648SIMD_IMPL_INTRIN_2(or_f32, vf32, vf32, vf32)static PyObject *simd__intrin_or_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""or_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_or_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6649
6650#line 314
6651SIMD_IMPL_INTRIN_2(xor_f32, vf32, vf32, vf32)static PyObject *simd__intrin_xor_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_xor_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6652
6653
6654SIMD_IMPL_INTRIN_1(not_f32, vf32, vf32)static PyObject *simd__intrin_not_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"not_f32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vf32 = _mm_xor_ps(arg.data.vf32, _mm_castsi128_ps
(_mm_set1_epi32(-1)))}; simd_arg_free(&arg); simd_arg ret
= { .data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
6655
6656#line 322
6657SIMD_IMPL_INTRIN_2(cmpeq_f32, vb32, vf32, vf32)static PyObject *simd__intrin_cmpeq_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_castps_si128(_mm_cmpeq_ps(arg1
.data.vf32, arg2.data.vf32))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
6658
6659#line 322
6660SIMD_IMPL_INTRIN_2(cmpneq_f32, vb32, vf32, vf32)static PyObject *simd__intrin_cmpneq_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_castps_si128(_mm_cmpneq_ps(
arg1.data.vf32, arg2.data.vf32))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
6661
6662#line 322
6663SIMD_IMPL_INTRIN_2(cmpgt_f32, vb32, vf32, vf32)static PyObject *simd__intrin_cmpgt_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_castps_si128(_mm_cmpgt_ps(arg1
.data.vf32, arg2.data.vf32))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
6664
6665#line 322
6666SIMD_IMPL_INTRIN_2(cmpge_f32, vb32, vf32, vf32)static PyObject *simd__intrin_cmpge_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_castps_si128(_mm_cmpge_ps(arg1
.data.vf32, arg2.data.vf32))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
6667
6668#line 322
6669SIMD_IMPL_INTRIN_2(cmplt_f32, vb32, vf32, vf32)static PyObject *simd__intrin_cmplt_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_castps_si128(_mm_cmplt_ps(arg1
.data.vf32, arg2.data.vf32))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
6670
6671#line 322
6672SIMD_IMPL_INTRIN_2(cmple_f32, vb32, vf32, vf32)static PyObject *simd__intrin_cmple_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_castps_si128(_mm_cmple_ps(arg1
.data.vf32, arg2.data.vf32))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb32
}; return simd_arg_to_obj(&ret); }
6673
6674
6675/***************************
6676 * Conversion
6677 ***************************/
6678SIMD_IMPL_INTRIN_1(cvt_f32_b32, vf32, vb32)static PyObject *simd__intrin_cvt_f32_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb32}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_f32_b32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vf32 = _mm_castsi128_ps( arg.data.vb32
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vf32 }; return simd_arg_to_obj(&ret); }
6679SIMD_IMPL_INTRIN_1(cvt_b32_f32, vb32, vf32)static PyObject *simd__intrin_cvt_b32_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b32_f32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb32 = _mm_castps_si128( arg.data.vf32
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vb32 }; return simd_arg_to_obj(&ret); }
6680#if 0
6681SIMD_IMPL_INTRIN_1(expand_f32_f32, vf32x2, vf32)static PyObject *simd__intrin_expand_f32_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"expand_f32_f32", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vf32x2 = npyv_expand_f32_f32( arg.data
.vf32 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vf32x2 }; return simd_arg_to_obj(&ret
); }
6682#endif // expand_sup
6683/***************************
6684 * Arithmetic
6685 ***************************/
6686#line 339
6687SIMD_IMPL_INTRIN_2(add_f32, vf32, vf32, vf32)static PyObject *simd__intrin_add_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""add_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_add_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6688
6689#line 339
6690SIMD_IMPL_INTRIN_2(sub_f32, vf32, vf32, vf32)static PyObject *simd__intrin_sub_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_sub_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6691
6692
6693#if 0
6694#line 346
6695SIMD_IMPL_INTRIN_2(adds_f32, vf32, vf32, vf32)static PyObject *simd__intrin_adds_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = npyv_adds_f32( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6696
6697#line 346
6698SIMD_IMPL_INTRIN_2(subs_f32, vf32, vf32, vf32)static PyObject *simd__intrin_subs_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = npyv_subs_f32( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6699
6700#endif // sat_sup
6701
6702#if 1
6703SIMD_IMPL_INTRIN_2(mul_f32, vf32, vf32, vf32)static PyObject *simd__intrin_mul_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_mul_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6704#endif // mul_sup
6705
6706#if 1
6707SIMD_IMPL_INTRIN_2(div_f32, vf32, vf32, vf32)static PyObject *simd__intrin_div_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""div_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_div_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6708#endif // div_sup
6709
6710#if 0
6711SIMD_IMPL_INTRIN_1(divisor_f32, vf32x3, f32)static PyObject *simd__intrin_divisor_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_f32}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_f32", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vf32x3 = npyv_divisor_f32( arg.data.f32
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vf32x3 }; return simd_arg_to_obj(&ret);
}
6712SIMD_IMPL_INTRIN_2(divc_f32, vf32, vf32, vf32x3)static PyObject *simd__intrin_divc_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = npyv_divc_f32( arg1.data.vf32, arg2
.data.vf32x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6713#endif // intdiv_sup
6714
6715#if 1
6716#line 367
6717SIMD_IMPL_INTRIN_3(muladd_f32, vf32, vf32, vf32, vf32)static PyObject *simd__intrin_muladd_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; simd_arg arg3 = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf32 = npyv_muladd_f32
( arg1.data.vf32, arg2.data.vf32, arg3.data.vf32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6718
6719#line 367
6720SIMD_IMPL_INTRIN_3(mulsub_f32, vf32, vf32, vf32, vf32)static PyObject *simd__intrin_mulsub_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; simd_arg arg3 = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf32 = npyv_mulsub_f32
( arg1.data.vf32, arg2.data.vf32, arg3.data.vf32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6721
6722#line 367
6723SIMD_IMPL_INTRIN_3(nmuladd_f32, vf32, vf32, vf32, vf32)static PyObject *simd__intrin_nmuladd_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; simd_arg arg3 = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf32 = npyv_nmuladd_f32
( arg1.data.vf32, arg2.data.vf32, arg3.data.vf32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6724
6725#line 367
6726SIMD_IMPL_INTRIN_3(nmulsub_f32, vf32, vf32, vf32, vf32)static PyObject *simd__intrin_nmulsub_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; simd_arg arg3 = {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf32 = npyv_nmulsub_f32
( arg1.data.vf32, arg2.data.vf32, arg3.data.vf32 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6727
6728#endif // fused_sup
6729
6730#if 1
6731SIMD_IMPL_INTRIN_1(sum_f32, f32, vf32)static PyObject *simd__intrin_sum_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"sum_f32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.f32 = npyv_sum_f32( arg.data.vf32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_f32
}; return simd_arg_to_obj(&ret); }
6732#endif // sum_sup
6733
6734#if 0
6735SIMD_IMPL_INTRIN_1(sumup_f32, f32, vf32)static PyObject *simd__intrin_sumup_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_f32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.f32 = npyv_sumup_f32( arg.data.vf32 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_f32 }; return simd_arg_to_obj(&ret); }
6736#endif // sumup_sup
6737
6738/***************************
6739 * Math
6740 ***************************/
6741#if 1
6742#line 386
6743SIMD_IMPL_INTRIN_1(sqrt_f32, vf32, vf32)static PyObject *simd__intrin_sqrt_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_f32", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vf32 = _mm_sqrt_ps( arg.data.vf32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6744
6745#line 386
6746SIMD_IMPL_INTRIN_1(recip_f32, vf32, vf32)static PyObject *simd__intrin_recip_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"recip_f32", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf32 = npyv_recip_f32( arg.data.vf32 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vf32 }; return simd_arg_to_obj(&ret); }
6747
6748#line 386
6749SIMD_IMPL_INTRIN_1(abs_f32, vf32, vf32)static PyObject *simd__intrin_abs_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"abs_f32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vf32 = npyv_abs_f32( arg.data.vf32 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf32
}; return simd_arg_to_obj(&ret); }
6750
6751#line 386
6752SIMD_IMPL_INTRIN_1(square_f32, vf32, vf32)static PyObject *simd__intrin_square_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"square_f32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vf32 = npyv_square_f32( arg.data.vf32 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vf32 }; return simd_arg_to_obj(&ret); }
6753
6754#endif
6755
6756#line 393
6757SIMD_IMPL_INTRIN_2(max_f32, vf32, vf32, vf32)static PyObject *simd__intrin_max_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""max_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_max_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6758
6759#line 393
6760SIMD_IMPL_INTRIN_2(min_f32, vf32, vf32, vf32)static PyObject *simd__intrin_min_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""min_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = _mm_min_ps( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6761
6762
6763#if 1
6764#line 400
6765SIMD_IMPL_INTRIN_2(maxp_f32, vf32, vf32, vf32)static PyObject *simd__intrin_maxp_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = npyv_maxp_f32( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6766
6767#line 400
6768SIMD_IMPL_INTRIN_2(minp_f32, vf32, vf32, vf32)static PyObject *simd__intrin_minp_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_f32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf32 = npyv_minp_f32( arg1.data.vf32, arg2
.data.vf32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf32 }; return
simd_arg_to_obj(&ret); }
6769
6770#endif
6771
6772/***************************
6773 * Mask operations
6774 ***************************/
6775#line 410
6776 SIMD_IMPL_INTRIN_4(ifadd_f32, vf32, vb32, vf32, vf32, vf32)static PyObject *simd__intrin_ifadd_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; simd_arg arg3 = {.dtype = simd_data_vf32}; simd_arg arg4 =
{.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_f32", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vf32 = npyv_ifadd_f32
( arg1.data.vb32, arg2.data.vf32, arg3.data.vf32, arg4.data.vf32
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vf32 }; return simd_arg_to_obj(&
ret); }
6777
6778#line 410
6779 SIMD_IMPL_INTRIN_4(ifsub_f32, vf32, vb32, vf32, vf32, vf32)static PyObject *simd__intrin_ifsub_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vf32
}; simd_arg arg3 = {.dtype = simd_data_vf32}; simd_arg arg4 =
{.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_f32", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vf32 = npyv_ifsub_f32
( arg1.data.vb32, arg2.data.vf32, arg3.data.vf32, arg4.data.vf32
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vf32 }; return simd_arg_to_obj(&
ret); }
6780
6781
6782#endif // simd_sup
6783
6784#line 34
6785#if NPY_SIMD_F641
6786/***************************
6787 * Memory
6788 ***************************/
6789#line 41
6790SIMD_IMPL_INTRIN_1(load_f64, vf64, qf64)static PyObject *simd__intrin_load_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf64}; if (!PyArg_ParseTuple( args, "O&:"
"load_f64", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vf64 = _mm_loadu_pd( arg.data.qf64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
6791
6792#line 41
6793SIMD_IMPL_INTRIN_1(loada_f64, vf64, qf64)static PyObject *simd__intrin_loada_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf64}; if (!PyArg_ParseTuple( args, "O&:"
"loada_f64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf64 = _mm_load_pd( arg.data.qf64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
6794
6795#line 41
6796SIMD_IMPL_INTRIN_1(loads_f64, vf64, qf64)static PyObject *simd__intrin_loads_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf64}; if (!PyArg_ParseTuple( args, "O&:"
"loads_f64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf64 = _mm_castsi128_pd(_mm_load_si128(
(const __m128i *)(arg.data.qf64)))}; simd_arg_free(&arg);
simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
6797
6798#line 41
6799SIMD_IMPL_INTRIN_1(loadl_f64, vf64, qf64)static PyObject *simd__intrin_loadl_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_qf64}; if (!PyArg_ParseTuple( args, "O&:"
"loadl_f64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf64 = _mm_castsi128_pd(npyv_loadl_u32(
(const npy_uint32*)(arg.data.qf64)))}; simd_arg_free(&arg
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
6800
6801#line 46
6802// special definition due to the nature of store
6803static PyObject *
6804simd__intrin_store_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6805{
6806 simd_arg seq_arg = {.dtype = simd_data_qf64};
6807 simd_arg vec_arg = {.dtype = simd_data_vf64};
6808 if (!PyArg_ParseTuple(
6809 args, "O&O&:store_f64",
6810 simd_arg_converter, &seq_arg,
6811 simd_arg_converter, &vec_arg
6812 )) {
6813 return NULL((void*)0);
6814 }
6815 npyv_store_f64_mm_storeu_pd(seq_arg.data.qf64, vec_arg.data.vf64);
6816 // write-back
6817 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
6818 simd_arg_free(&seq_arg);
6819 return NULL((void*)0);
6820 }
6821 simd_arg_free(&seq_arg);
6822 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6823}
6824
6825#line 46
6826// special definition due to the nature of storea
6827static PyObject *
6828simd__intrin_storea_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6829{
6830 simd_arg seq_arg = {.dtype = simd_data_qf64};
6831 simd_arg vec_arg = {.dtype = simd_data_vf64};
6832 if (!PyArg_ParseTuple(
6833 args, "O&O&:storea_f64",
6834 simd_arg_converter, &seq_arg,
6835 simd_arg_converter, &vec_arg
6836 )) {
6837 return NULL((void*)0);
6838 }
6839 npyv_storea_f64_mm_store_pd(seq_arg.data.qf64, vec_arg.data.vf64);
6840 // write-back
6841 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
6842 simd_arg_free(&seq_arg);
6843 return NULL((void*)0);
6844 }
6845 simd_arg_free(&seq_arg);
6846 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6847}
6848
6849#line 46
6850// special definition due to the nature of stores
6851static PyObject *
6852simd__intrin_stores_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6853{
6854 simd_arg seq_arg = {.dtype = simd_data_qf64};
6855 simd_arg vec_arg = {.dtype = simd_data_vf64};
6856 if (!PyArg_ParseTuple(
6857 args, "O&O&:stores_f64",
6858 simd_arg_converter, &seq_arg,
6859 simd_arg_converter, &vec_arg
6860 )) {
6861 return NULL((void*)0);
6862 }
6863 npyv_stores_f64_mm_stream_pd(seq_arg.data.qf64, vec_arg.data.vf64);
6864 // write-back
6865 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
6866 simd_arg_free(&seq_arg);
6867 return NULL((void*)0);
6868 }
6869 simd_arg_free(&seq_arg);
6870 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6871}
6872
6873#line 46
6874// special definition due to the nature of storel
6875static PyObject *
6876simd__intrin_storel_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6877{
6878 simd_arg seq_arg = {.dtype = simd_data_qf64};
6879 simd_arg vec_arg = {.dtype = simd_data_vf64};
6880 if (!PyArg_ParseTuple(
6881 args, "O&O&:storel_f64",
6882 simd_arg_converter, &seq_arg,
6883 simd_arg_converter, &vec_arg
6884 )) {
6885 return NULL((void*)0);
6886 }
6887 npyv_storel_f64(seq_arg.data.qf64, vec_arg.data.vf64)_mm_storel_epi64((__m128i*)(seq_arg.data.qf64), _mm_castpd_si128
(vec_arg.data.vf64));
;
6888 // write-back
6889 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
6890 simd_arg_free(&seq_arg);
6891 return NULL((void*)0);
6892 }
6893 simd_arg_free(&seq_arg);
6894 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6895}
6896
6897#line 46
6898// special definition due to the nature of storeh
6899static PyObject *
6900simd__intrin_storeh_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6901{
6902 simd_arg seq_arg = {.dtype = simd_data_qf64};
6903 simd_arg vec_arg = {.dtype = simd_data_vf64};
6904 if (!PyArg_ParseTuple(
6905 args, "O&O&:storeh_f64",
6906 simd_arg_converter, &seq_arg,
6907 simd_arg_converter, &vec_arg
6908 )) {
6909 return NULL((void*)0);
6910 }
6911 npyv_storeh_f64(seq_arg.data.qf64, vec_arg.data.vf64)npyv_storeh_u32((npy_uint32*)(seq_arg.data.qf64), _mm_castpd_si128
(vec_arg.data.vf64))
;
6912 // write-back
6913 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
6914 simd_arg_free(&seq_arg);
6915 return NULL((void*)0);
6916 }
6917 simd_arg_free(&seq_arg);
6918 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6919}
6920
6921
6922/****************************************
6923 * Non-contiguous/Partial Memory access
6924 ****************************************/
6925#if 1
6926// Partial Load
6927SIMD_IMPL_INTRIN_3(load_till_f64, vf64, qf64, u32, f64)static PyObject *simd__intrin_load_till_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qf64}; simd_arg arg2 = {.dtype = simd_data_u32
}; simd_arg arg3 = {.dtype = simd_data_f64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""load_till_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf64 = npyv_load_till_f64
( arg1.data.qf64, arg2.data.u32, arg3.data.f64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
6928SIMD_IMPL_INTRIN_2(load_tillz_f64, vf64, qf64, u32)static PyObject *simd__intrin_load_tillz_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_qf64}; simd_arg arg2 = {.dtype = simd_data_u32
}; if (!PyArg_ParseTuple( args, "O&O&:""load_tillz_f64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf64 = npyv_load_tillz_f64
( arg1.data.qf64, arg2.data.u32 )}; simd_arg_free(&arg1);
simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vf64 }; return simd_arg_to_obj(&ret); }
6929
6930// Partial Store
6931static PyObject *
6932simd__intrin_store_till_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6933{
6934 simd_arg seq_arg = {.dtype = simd_data_qf64};
6935 simd_arg nlane_arg = {.dtype = simd_data_u32};
6936 simd_arg vec_arg = {.dtype = simd_data_vf64};
6937 if (!PyArg_ParseTuple(
6938 args, "O&O&O&:store_till_f64",
6939 simd_arg_converter, &seq_arg,
6940 simd_arg_converter, &nlane_arg,
6941 simd_arg_converter, &vec_arg
6942 )) {
6943 return NULL((void*)0);
6944 }
6945 npyv_store_till_f64(
6946 seq_arg.data.qf64, nlane_arg.data.u32, vec_arg.data.vf64
6947 );
6948 // write-back
6949 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
6950 simd_arg_free(&seq_arg);
6951 return NULL((void*)0);
6952 }
6953 simd_arg_free(&seq_arg);
6954 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
6955}
6956
6957// Non-contiguous Load
6958#line 112
6959static PyObject *
6960simd__intrin_loadn_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
6961{
6962 simd_arg seq_arg = {.dtype = simd_data_qf64};
6963 simd_arg stride_arg = {.dtype = simd_data_s64};
6964#if 0
6965 simd_arg nlane_arg = {.dtype = simd_data_u32};
6966#endif // till
6967#if 0
6968 simd_arg fill_arg = {.dtype = simd_data_f64};
6969#endif
6970 if (!PyArg_ParseTuple(
6971 args, "O&O&:loadn_f64",
6972 simd_arg_converter, &seq_arg,
6973 simd_arg_converter, &stride_arg
6974#if 0
6975 ,simd_arg_converter, &nlane_arg
6976#endif
6977#if 0
6978 ,simd_arg_converter, &fill_arg
6979#endif
6980 )) {
6981 return NULL((void*)0);
6982 }
6983 npyv_lanetype_f64 *seq_ptr = seq_arg.data.qf64;
6984 npy_intp stride = (npy_intp)stride_arg.data.s64;
6985 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
6986 Py_ssize_t min_seq_len = stride * npyv_nlanes_f642;
6987 if (stride < 0) {
6988 seq_ptr += cur_seq_len -1;
6989 min_seq_len = -min_seq_len;
6990 }
6991 if (cur_seq_len < min_seq_len) {
6992 PyErr_Format(PyExc_ValueError,
6993 "loadn_f64(), according to provided stride %d, the "
6994 "minimum acceptable size of the required sequence is %d, given(%d)",
6995 stride, min_seq_len, cur_seq_len
6996 );
6997 goto err;
6998 }
6999 npyv_f64 rvec = npyv_loadn_f64(
7000 seq_ptr, stride
7001 #if 0
7002 , nlane_arg.data.u32
7003 #endif
7004 #if 0
7005 , fill_arg.data.f64
7006 #endif
7007 );
7008 simd_arg ret = {
7009 .dtype = simd_data_vf64, .data = {.vf64=rvec}
7010 };
7011 simd_arg_free(&seq_arg);
7012 return simd_arg_to_obj(&ret);
7013err:
7014 simd_arg_free(&seq_arg);
7015 return NULL((void*)0);
7016}
7017
7018#line 112
7019static PyObject *
7020simd__intrin_loadn_till_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
7021{
7022 simd_arg seq_arg = {.dtype = simd_data_qf64};
7023 simd_arg stride_arg = {.dtype = simd_data_s64};
7024#if 1
7025 simd_arg nlane_arg = {.dtype = simd_data_u32};
7026#endif // till
7027#if 1
7028 simd_arg fill_arg = {.dtype = simd_data_f64};
7029#endif
7030 if (!PyArg_ParseTuple(
7031 args, "O&O&O&O&:loadn_till_f64",
7032 simd_arg_converter, &seq_arg,
7033 simd_arg_converter, &stride_arg
7034#if 1
7035 ,simd_arg_converter, &nlane_arg
7036#endif
7037#if 1
7038 ,simd_arg_converter, &fill_arg
7039#endif
7040 )) {
7041 return NULL((void*)0);
7042 }
7043 npyv_lanetype_f64 *seq_ptr = seq_arg.data.qf64;
7044 npy_intp stride = (npy_intp)stride_arg.data.s64;
7045 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
7046 Py_ssize_t min_seq_len = stride * npyv_nlanes_f642;
7047 if (stride < 0) {
7048 seq_ptr += cur_seq_len -1;
7049 min_seq_len = -min_seq_len;
7050 }
7051 if (cur_seq_len < min_seq_len) {
7052 PyErr_Format(PyExc_ValueError,
7053 "loadn_till_f64(), according to provided stride %d, the "
7054 "minimum acceptable size of the required sequence is %d, given(%d)",
7055 stride, min_seq_len, cur_seq_len
7056 );
7057 goto err;
7058 }
7059 npyv_f64 rvec = npyv_loadn_till_f64(
7060 seq_ptr, stride
7061 #if 1
7062 , nlane_arg.data.u32
7063 #endif
7064 #if 1
7065 , fill_arg.data.f64
7066 #endif
7067 );
7068 simd_arg ret = {
7069 .dtype = simd_data_vf64, .data = {.vf64=rvec}
7070 };
7071 simd_arg_free(&seq_arg);
7072 return simd_arg_to_obj(&ret);
7073err:
7074 simd_arg_free(&seq_arg);
7075 return NULL((void*)0);
7076}
7077
7078#line 112
7079static PyObject *
7080simd__intrin_loadn_tillz_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
7081{
7082 simd_arg seq_arg = {.dtype = simd_data_qf64};
7083 simd_arg stride_arg = {.dtype = simd_data_s64};
7084#if 1
7085 simd_arg nlane_arg = {.dtype = simd_data_u32};
7086#endif // till
7087#if 0
7088 simd_arg fill_arg = {.dtype = simd_data_f64};
7089#endif
7090 if (!PyArg_ParseTuple(
7091 args, "O&O&O&:loadn_tillz_f64",
7092 simd_arg_converter, &seq_arg,
7093 simd_arg_converter, &stride_arg
7094#if 1
7095 ,simd_arg_converter, &nlane_arg
7096#endif
7097#if 0
7098 ,simd_arg_converter, &fill_arg
7099#endif
7100 )) {
7101 return NULL((void*)0);
7102 }
7103 npyv_lanetype_f64 *seq_ptr = seq_arg.data.qf64;
7104 npy_intp stride = (npy_intp)stride_arg.data.s64;
7105 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
7106 Py_ssize_t min_seq_len = stride * npyv_nlanes_f642;
7107 if (stride < 0) {
7108 seq_ptr += cur_seq_len -1;
7109 min_seq_len = -min_seq_len;
7110 }
7111 if (cur_seq_len < min_seq_len) {
7112 PyErr_Format(PyExc_ValueError,
7113 "loadn_tillz_f64(), according to provided stride %d, the "
7114 "minimum acceptable size of the required sequence is %d, given(%d)",
7115 stride, min_seq_len, cur_seq_len
7116 );
7117 goto err;
7118 }
7119 npyv_f64 rvec = npyv_loadn_tillz_f64(
7120 seq_ptr, stride
7121 #if 1
7122 , nlane_arg.data.u32
7123 #endif
7124 #if 0
7125 , fill_arg.data.f64
7126 #endif
7127 );
7128 simd_arg ret = {
7129 .dtype = simd_data_vf64, .data = {.vf64=rvec}
7130 };
7131 simd_arg_free(&seq_arg);
7132 return simd_arg_to_obj(&ret);
7133err:
7134 simd_arg_free(&seq_arg);
7135 return NULL((void*)0);
7136}
7137
7138
7139// Non-contiguous Store
7140#line 178
7141static PyObject *
7142simd__intrin_storen_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
7143{
7144 simd_arg seq_arg = {.dtype = simd_data_qf64};
7145 simd_arg stride_arg = {.dtype = simd_data_s64};
7146 simd_arg vec_arg = {.dtype = simd_data_vf64};
7147#if 0
7148 simd_arg nlane_arg = {.dtype = simd_data_u32};
7149#endif
7150 if (!PyArg_ParseTuple(
7151 args, "O&O&O&:storen_f64",
7152 simd_arg_converter, &seq_arg,
7153 simd_arg_converter, &stride_arg
7154#if 0
7155 ,simd_arg_converter, &nlane_arg
7156#endif
7157 ,simd_arg_converter, &vec_arg
7158 )) {
7159 return NULL((void*)0);
7160 }
7161 npyv_lanetype_f64 *seq_ptr = seq_arg.data.qf64;
7162 npy_intp stride = (npy_intp)stride_arg.data.s64;
7163 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
7164 Py_ssize_t min_seq_len = stride * npyv_nlanes_f642;
7165 if (stride < 0) {
7166 seq_ptr += cur_seq_len -1;
7167 min_seq_len = -min_seq_len;
7168 }
7169 // overflow guard
7170 if (cur_seq_len < min_seq_len) {
7171 PyErr_Format(PyExc_ValueError,
7172 "storen_f64(), according to provided stride %d, the"
7173 "minimum acceptable size of the required sequence is %d, given(%d)",
7174 stride, min_seq_len, cur_seq_len
7175 );
7176 goto err;
7177 }
7178 npyv_storen_f64(
7179 seq_ptr, stride
7180 #if 0
7181 ,nlane_arg.data.u32
7182 #endif
7183 ,vec_arg.data.vf64
7184 );
7185 // write-back
7186 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
7187 goto err;
7188 }
7189 simd_arg_free(&seq_arg);
7190 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
7191err:
7192 simd_arg_free(&seq_arg);
7193 return NULL((void*)0);
7194}
7195
7196#line 178
7197static PyObject *
7198simd__intrin_storen_till_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
7199{
7200 simd_arg seq_arg = {.dtype = simd_data_qf64};
7201 simd_arg stride_arg = {.dtype = simd_data_s64};
7202 simd_arg vec_arg = {.dtype = simd_data_vf64};
7203#if 1
7204 simd_arg nlane_arg = {.dtype = simd_data_u32};
7205#endif
7206 if (!PyArg_ParseTuple(
7207 args, "O&O&O&O&:storen_f64",
7208 simd_arg_converter, &seq_arg,
7209 simd_arg_converter, &stride_arg
7210#if 1
7211 ,simd_arg_converter, &nlane_arg
7212#endif
7213 ,simd_arg_converter, &vec_arg
7214 )) {
7215 return NULL((void*)0);
7216 }
7217 npyv_lanetype_f64 *seq_ptr = seq_arg.data.qf64;
7218 npy_intp stride = (npy_intp)stride_arg.data.s64;
7219 Py_ssize_t cur_seq_len = simd_sequence_len(seq_ptr);
7220 Py_ssize_t min_seq_len = stride * npyv_nlanes_f642;
7221 if (stride < 0) {
7222 seq_ptr += cur_seq_len -1;
7223 min_seq_len = -min_seq_len;
7224 }
7225 // overflow guard
7226 if (cur_seq_len < min_seq_len) {
7227 PyErr_Format(PyExc_ValueError,
7228 "storen_till_f64(), according to provided stride %d, the"
7229 "minimum acceptable size of the required sequence is %d, given(%d)",
7230 stride, min_seq_len, cur_seq_len
7231 );
7232 goto err;
7233 }
7234 npyv_storen_till_f64(
7235 seq_ptr, stride
7236 #if 1
7237 ,nlane_arg.data.u32
7238 #endif
7239 ,vec_arg.data.vf64
7240 );
7241 // write-back
7242 if (simd_sequence_fill_iterable(seq_arg.obj, seq_arg.data.qf64, simd_data_qf64)) {
7243 goto err;
7244 }
7245 simd_arg_free(&seq_arg);
7246 Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&
_Py_NoneStruct)
;
7247err:
7248 simd_arg_free(&seq_arg);
7249 return NULL((void*)0);
7250}
7251
7252#endif // 1
7253
7254/***************************
7255 * Misc
7256 ***************************/
7257SIMD_IMPL_INTRIN_0(zero_f64, vf64)static PyObject *simd__intrin_zero_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "zero_f64") ) return ((void*)0); simd_arg a = { .
dtype = simd_data_vf64, .data = {.vf64 = _mm_setzero_pd()}, }
; return simd_arg_to_obj(&a); }
7258SIMD_IMPL_INTRIN_1(setall_f64, vf64, f64)static PyObject *simd__intrin_setall_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_f64}; if (!PyArg_ParseTuple( args, "O&:"
"setall_f64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vf64 = _mm_set1_pd( arg.data.f64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
7259SIMD_IMPL_INTRIN_3(select_f64, vf64, vb64, vf64, vf64)static PyObject *simd__intrin_select_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; simd_arg arg3 = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""select_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf64 = npyv_select_f64
( arg1.data.vb64, arg2.data.vf64, arg3.data.vf64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7260
7261#line 246
7262#if 1
7263SIMD_IMPL_INTRIN_1(reinterpret_u8_f64, vu8, vf64)static PyObject *simd__intrin_reinterpret_u8_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u8_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu8 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu8 }; return simd_arg_to_obj
(&ret); }
7264#endif // simd_sup2
7265
7266#line 246
7267#if 1
7268SIMD_IMPL_INTRIN_1(reinterpret_s8_f64, vs8, vf64)static PyObject *simd__intrin_reinterpret_s8_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s8_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs8 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs8 }; return simd_arg_to_obj
(&ret); }
7269#endif // simd_sup2
7270
7271#line 246
7272#if 1
7273SIMD_IMPL_INTRIN_1(reinterpret_u16_f64, vu16, vf64)static PyObject *simd__intrin_reinterpret_u16_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u16_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu16 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu16 }; return simd_arg_to_obj
(&ret); }
7274#endif // simd_sup2
7275
7276#line 246
7277#if 1
7278SIMD_IMPL_INTRIN_1(reinterpret_s16_f64, vs16, vf64)static PyObject *simd__intrin_reinterpret_s16_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s16_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs16 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs16 }; return simd_arg_to_obj
(&ret); }
7279#endif // simd_sup2
7280
7281#line 246
7282#if 1
7283SIMD_IMPL_INTRIN_1(reinterpret_u32_f64, vu32, vf64)static PyObject *simd__intrin_reinterpret_u32_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u32_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu32 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu32 }; return simd_arg_to_obj
(&ret); }
7284#endif // simd_sup2
7285
7286#line 246
7287#if 1
7288SIMD_IMPL_INTRIN_1(reinterpret_s32_f64, vs32, vf64)static PyObject *simd__intrin_reinterpret_s32_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s32_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs32 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs32 }; return simd_arg_to_obj
(&ret); }
7289#endif // simd_sup2
7290
7291#line 246
7292#if 1
7293SIMD_IMPL_INTRIN_1(reinterpret_u64_f64, vu64, vf64)static PyObject *simd__intrin_reinterpret_u64_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_u64_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vu64 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vu64 }; return simd_arg_to_obj
(&ret); }
7294#endif // simd_sup2
7295
7296#line 246
7297#if 1
7298SIMD_IMPL_INTRIN_1(reinterpret_s64_f64, vs64, vf64)static PyObject *simd__intrin_reinterpret_s64_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_s64_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vs64 = _mm_castpd_si128
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vs64 }; return simd_arg_to_obj
(&ret); }
7299#endif // simd_sup2
7300
7301#line 246
7302#if 1
7303SIMD_IMPL_INTRIN_1(reinterpret_f32_f64, vf32, vf64)static PyObject *simd__intrin_reinterpret_f32_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f32_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf32 = _mm_castpd_ps
( arg.data.vf64 )}; simd_arg_free(&arg); simd_arg ret = {
.data = data, .dtype = simd_data_vf32 }; return simd_arg_to_obj
(&ret); }
7304#endif // simd_sup2
7305
7306#line 246
7307#if NPY_SIMD_F641
7308SIMD_IMPL_INTRIN_1(reinterpret_f64_f64, vf64, vf64)static PyObject *simd__intrin_reinterpret_f64_f64 (PyObject* (
__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject
*args) { simd_arg arg = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&:""reinterpret_f64_f64", simd_arg_converter, &
arg )) return ((void*)0); simd_data data = {.vf64 = arg.data.
vf64}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vf64 }; return simd_arg_to_obj(&ret)
; }
7309#endif // simd_sup2
7310
7311
7312/**
7313 * special definition due to the nature of intrinsics
7314 * npyv_setf_f64 and npy_set_f64.
7315*/
7316#line 258
7317static PyObject *
7318simd__intrin_setf_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
7319{
7320 npyv_lanetype_f64 *data = simd_sequence_from_iterable(args, simd_data_qf64, npyv_nlanes_f642);
7321 if (data == NULL((void*)0)) {
7322 return NULL((void*)0);
7323 }
7324 simd_data r = {.vf64 = npyv_setf_f64(npyv__setr_pd((double)(data[1]), (double)(data[2]))
7325 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7326 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7327 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7328 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7329 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7330 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7331 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7332 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_pd((double)(data[1]), (double)(data[2]))
7333 data[64] // for setfnpyv__setr_pd((double)(data[1]), (double)(data[2]))
7334 )npyv__setr_pd((double)(data[1]), (double)(data[2]))};
7335 simd_sequence_free(data);
7336 return (PyObject*)PySIMDVector_FromData(r, simd_data_vf64);
7337}
7338
7339#line 258
7340static PyObject *
7341simd__intrin_set_f64(PyObject* NPY_UNUSED(self)(__NPY_UNUSED_TAGGEDself) __attribute__ ((__unused__)), PyObject *args)
7342{
7343 npyv_lanetype_f64 *data = simd_sequence_from_iterable(args, simd_data_qf64, npyv_nlanes_f642);
1
Calling 'simd_sequence_from_iterable'
7344 if (data == NULL((void*)0)) {
7345 return NULL((void*)0);
7346 }
7347 simd_data r = {.vf64 = npyv_set_f64(npyv__setr_pd((double)(data[0]), (double)(data[1]))
7348 data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7349 data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7350 data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7351 data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7352 data[32], data[33], data[34], data[35], data[36], data[37], data[38], data[39],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7353 data[40], data[41], data[42], data[43], data[44], data[45], data[46], data[47],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7354 data[48], data[49], data[50], data[51], data[52], data[53], data[54], data[55],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7355 data[56], data[57], data[58], data[59], data[60], data[61], data[62], data[63],npyv__setr_pd((double)(data[0]), (double)(data[1]))
7356 data[64] // for setfnpyv__setr_pd((double)(data[0]), (double)(data[1]))
7357 )npyv__setr_pd((double)(data[0]), (double)(data[1]))};
7358 simd_sequence_free(data);
7359 return (PyObject*)PySIMDVector_FromData(r, simd_data_vf64);
7360}
7361
7362
7363/***************************
7364 * Reorder
7365 ***************************/
7366#line 287
7367SIMD_IMPL_INTRIN_2(combinel_f64, vf64, vf64, vf64)static PyObject *simd__intrin_combinel_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""combinel_f64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf64 = _mm_unpacklo_pd
( arg1.data.vf64, arg2.data.vf64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vf64 }; return simd_arg_to_obj(&ret); }
7368
7369#line 287
7370SIMD_IMPL_INTRIN_2(combineh_f64, vf64, vf64, vf64)static PyObject *simd__intrin_combineh_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""combineh_f64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf64 = _mm_unpackhi_pd
( arg1.data.vf64, arg2.data.vf64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vf64 }; return simd_arg_to_obj(&ret); }
7371
7372
7373#line 293
7374SIMD_IMPL_INTRIN_2(combine_f64, vf64x2, vf64, vf64)static PyObject *simd__intrin_combine_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""combine_f64",
simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vf64x2 = npyv_combine_f64
( arg1.data.vf64, arg2.data.vf64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vf64x2 }; return simd_arg_to_obj(&ret); }
7375
7376#line 293
7377SIMD_IMPL_INTRIN_2(zip_f64, vf64x2, vf64, vf64)static PyObject *simd__intrin_zip_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""zip_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64x2 = npyv_zip_f64( arg1.data.vf64,
arg2.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vf64x2
}; return simd_arg_to_obj(&ret); }
7378
7379
7380#if 0
7381SIMD_IMPL_INTRIN_1(rev64_f64, vf64, vf64)static PyObject *simd__intrin_rev64_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"rev64_f64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf64 = npyv_rev64_f64( arg.data.vf64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vf64 }; return simd_arg_to_obj(&ret); }
7382#endif
7383
7384/***************************
7385 * Operators
7386 ***************************/
7387#if 0 > 0
7388SIMD_IMPL_INTRIN_2(shl_f64, vf64, vf64, u8)static PyObject *simd__intrin_shl_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shl_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = npyv_shl_f64( arg1.data.vf64, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7389SIMD_IMPL_INTRIN_2(shr_f64, vf64, vf64, u8)static PyObject *simd__intrin_shr_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shr_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = npyv_shr_f64( arg1.data.vf64, arg2
.data.u8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7390// immediate constant
7391SIMD_IMPL_INTRIN_2IMM(shli_f64, vf64, vf64, 0)static PyObject *simd__intrin_shli_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shli_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vf64 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shli_f64, vf64 ) data.vf64; simd_arg_free
(&arg1); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
7392SIMD_IMPL_INTRIN_2IMM(shri_f64, vf64, vf64, 0)static PyObject *simd__intrin_shri_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_u8
}; if (!PyArg_ParseTuple( args, "O&O&:""shri_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.u64 = 0}; data.vf64 = SIMD__IMPL_COUNT_0
( SIMD__REPEAT_2IMM, shri_f64, vf64 ) data.vf64; simd_arg_free
(&arg1); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
7393#endif // shl_imm
7394
7395#line 314
7396SIMD_IMPL_INTRIN_2(and_f64, vf64, vf64, vf64)static PyObject *simd__intrin_and_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""and_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_and_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7397
7398#line 314
7399SIMD_IMPL_INTRIN_2(or_f64, vf64, vf64, vf64)static PyObject *simd__intrin_or_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""or_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_or_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7400
7401#line 314
7402SIMD_IMPL_INTRIN_2(xor_f64, vf64, vf64, vf64)static PyObject *simd__intrin_xor_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_xor_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7403
7404
7405SIMD_IMPL_INTRIN_1(not_f64, vf64, vf64)static PyObject *simd__intrin_not_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"not_f64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vf64 = _mm_xor_pd(arg.data.vf64, _mm_castsi128_pd
(_mm_set1_epi32(-1)))}; simd_arg_free(&arg); simd_arg ret
= { .data = data, .dtype = simd_data_vf64 }; return simd_arg_to_obj
(&ret); }
7406
7407#line 322
7408SIMD_IMPL_INTRIN_2(cmpeq_f64, vb64, vf64, vf64)static PyObject *simd__intrin_cmpeq_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpeq_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_castpd_si128(_mm_cmpeq_pd(arg1
.data.vf64, arg2.data.vf64))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
7409
7410#line 322
7411SIMD_IMPL_INTRIN_2(cmpneq_f64, vb64, vf64, vf64)static PyObject *simd__intrin_cmpneq_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpneq_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_castpd_si128(_mm_cmpneq_pd(
arg1.data.vf64, arg2.data.vf64))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
7412
7413#line 322
7414SIMD_IMPL_INTRIN_2(cmpgt_f64, vb64, vf64, vf64)static PyObject *simd__intrin_cmpgt_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpgt_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_castpd_si128(_mm_cmpgt_pd(arg1
.data.vf64, arg2.data.vf64))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
7415
7416#line 322
7417SIMD_IMPL_INTRIN_2(cmpge_f64, vb64, vf64, vf64)static PyObject *simd__intrin_cmpge_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmpge_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_castpd_si128(_mm_cmpge_pd(arg1
.data.vf64, arg2.data.vf64))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
7418
7419#line 322
7420SIMD_IMPL_INTRIN_2(cmplt_f64, vb64, vf64, vf64)static PyObject *simd__intrin_cmplt_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmplt_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_castpd_si128(_mm_cmplt_pd(arg1
.data.vf64, arg2.data.vf64))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
7421
7422#line 322
7423SIMD_IMPL_INTRIN_2(cmple_f64, vb64, vf64, vf64)static PyObject *simd__intrin_cmple_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""cmple_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_castpd_si128(_mm_cmple_pd(arg1
.data.vf64, arg2.data.vf64))}; simd_arg_free(&arg1); simd_arg_free
(&arg2); simd_arg ret = { .data = data, .dtype = simd_data_vb64
}; return simd_arg_to_obj(&ret); }
7424
7425
7426/***************************
7427 * Conversion
7428 ***************************/
7429SIMD_IMPL_INTRIN_1(cvt_f64_b64, vf64, vb64)static PyObject *simd__intrin_cvt_f64_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb64}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_f64_b64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vf64 = _mm_castsi128_pd( arg.data.vb64
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vf64 }; return simd_arg_to_obj(&ret); }
7430SIMD_IMPL_INTRIN_1(cvt_b64_f64, vb64, vf64)static PyObject *simd__intrin_cvt_b64_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"cvt_b64_f64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vb64 = _mm_castpd_si128( arg.data.vf64
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vb64 }; return simd_arg_to_obj(&ret); }
7431#if 0
7432SIMD_IMPL_INTRIN_1(expand_f64_f64, vf64x2, vf64)static PyObject *simd__intrin_expand_f64_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"expand_f64_f64", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vf64x2 = npyv_expand_f64_f64( arg.data
.vf64 )}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vf64x2 }; return simd_arg_to_obj(&ret
); }
7433#endif // expand_sup
7434/***************************
7435 * Arithmetic
7436 ***************************/
7437#line 339
7438SIMD_IMPL_INTRIN_2(add_f64, vf64, vf64, vf64)static PyObject *simd__intrin_add_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""add_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_add_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7439
7440#line 339
7441SIMD_IMPL_INTRIN_2(sub_f64, vf64, vf64, vf64)static PyObject *simd__intrin_sub_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""sub_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_sub_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7442
7443
7444#if 0
7445#line 346
7446SIMD_IMPL_INTRIN_2(adds_f64, vf64, vf64, vf64)static PyObject *simd__intrin_adds_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""adds_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = npyv_adds_f64( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7447
7448#line 346
7449SIMD_IMPL_INTRIN_2(subs_f64, vf64, vf64, vf64)static PyObject *simd__intrin_subs_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""subs_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = npyv_subs_f64( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7450
7451#endif // sat_sup
7452
7453#if 1
7454SIMD_IMPL_INTRIN_2(mul_f64, vf64, vf64, vf64)static PyObject *simd__intrin_mul_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""mul_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_mul_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7455#endif // mul_sup
7456
7457#if 1
7458SIMD_IMPL_INTRIN_2(div_f64, vf64, vf64, vf64)static PyObject *simd__intrin_div_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""div_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_div_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7459#endif // div_sup
7460
7461#if 0
7462SIMD_IMPL_INTRIN_1(divisor_f64, vf64x3, f64)static PyObject *simd__intrin_divisor_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_f64}; if (!PyArg_ParseTuple( args, "O&:"
"divisor_f64", simd_arg_converter, &arg )) return ((void*
)0); simd_data data = {.vf64x3 = npyv_divisor_f64( arg.data.f64
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vf64x3 }; return simd_arg_to_obj(&ret);
}
7463SIMD_IMPL_INTRIN_2(divc_f64, vf64, vf64, vf64x3)static PyObject *simd__intrin_divc_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64x3
}; if (!PyArg_ParseTuple( args, "O&O&:""divc_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = npyv_divc_f64( arg1.data.vf64, arg2
.data.vf64x3 )}; simd_arg_free(&arg1); simd_arg_free(&
arg2); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
7464#endif // intdiv_sup
7465
7466#if 1
7467#line 367
7468SIMD_IMPL_INTRIN_3(muladd_f64, vf64, vf64, vf64, vf64)static PyObject *simd__intrin_muladd_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; simd_arg arg3 = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""muladd_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf64 = npyv_muladd_f64
( arg1.data.vf64, arg2.data.vf64, arg3.data.vf64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7469
7470#line 367
7471SIMD_IMPL_INTRIN_3(mulsub_f64, vf64, vf64, vf64, vf64)static PyObject *simd__intrin_mulsub_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; simd_arg arg3 = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""mulsub_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf64 = npyv_mulsub_f64
( arg1.data.vf64, arg2.data.vf64, arg3.data.vf64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7472
7473#line 367
7474SIMD_IMPL_INTRIN_3(nmuladd_f64, vf64, vf64, vf64, vf64)static PyObject *simd__intrin_nmuladd_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; simd_arg arg3 = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmuladd_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf64 = npyv_nmuladd_f64
( arg1.data.vf64, arg2.data.vf64, arg3.data.vf64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7475
7476#line 367
7477SIMD_IMPL_INTRIN_3(nmulsub_f64, vf64, vf64, vf64, vf64)static PyObject *simd__intrin_nmulsub_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; simd_arg arg3 = {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple
( args, "O&O&O&:""nmulsub_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2, simd_arg_converter
, &arg3 )) return ((void*)0); simd_data data = {.vf64 = npyv_nmulsub_f64
( arg1.data.vf64, arg2.data.vf64, arg3.data.vf64 )}; simd_arg_free
(&arg1); simd_arg_free(&arg2); simd_arg_free(&arg3
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7478
7479#endif // fused_sup
7480
7481#if 1
7482SIMD_IMPL_INTRIN_1(sum_f64, f64, vf64)static PyObject *simd__intrin_sum_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"sum_f64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.f64 = npyv_sum_f64( arg.data.vf64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_f64
}; return simd_arg_to_obj(&ret); }
7483#endif // sum_sup
7484
7485#if 0
7486SIMD_IMPL_INTRIN_1(sumup_f64, f64, vf64)static PyObject *simd__intrin_sumup_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"sumup_f64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.f64 = npyv_sumup_f64( arg.data.vf64 )};
simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_f64 }; return simd_arg_to_obj(&ret); }
7487#endif // sumup_sup
7488
7489/***************************
7490 * Math
7491 ***************************/
7492#if 1
7493#line 386
7494SIMD_IMPL_INTRIN_1(sqrt_f64, vf64, vf64)static PyObject *simd__intrin_sqrt_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"sqrt_f64", simd_arg_converter, &arg )) return ((void*)0)
; simd_data data = {.vf64 = _mm_sqrt_pd( arg.data.vf64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
7495
7496#line 386
7497SIMD_IMPL_INTRIN_1(recip_f64, vf64, vf64)static PyObject *simd__intrin_recip_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"recip_f64", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.vf64 = npyv_recip_f64( arg.data.vf64 )}
; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vf64 }; return simd_arg_to_obj(&ret); }
7498
7499#line 386
7500SIMD_IMPL_INTRIN_1(abs_f64, vf64, vf64)static PyObject *simd__intrin_abs_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"abs_f64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vf64 = npyv_abs_f64( arg.data.vf64 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_vf64
}; return simd_arg_to_obj(&ret); }
7501
7502#line 386
7503SIMD_IMPL_INTRIN_1(square_f64, vf64, vf64)static PyObject *simd__intrin_square_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"square_f64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vf64 = npyv_square_f64( arg.data.vf64 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vf64 }; return simd_arg_to_obj(&ret); }
7504
7505#endif
7506
7507#line 393
7508SIMD_IMPL_INTRIN_2(max_f64, vf64, vf64, vf64)static PyObject *simd__intrin_max_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""max_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_max_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7509
7510#line 393
7511SIMD_IMPL_INTRIN_2(min_f64, vf64, vf64, vf64)static PyObject *simd__intrin_min_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""min_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = _mm_min_pd( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7512
7513
7514#if 1
7515#line 400
7516SIMD_IMPL_INTRIN_2(maxp_f64, vf64, vf64, vf64)static PyObject *simd__intrin_maxp_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""maxp_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = npyv_maxp_f64( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7517
7518#line 400
7519SIMD_IMPL_INTRIN_2(minp_f64, vf64, vf64, vf64)static PyObject *simd__intrin_minp_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""minp_f64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vf64 = npyv_minp_f64( arg1.data.vf64, arg2
.data.vf64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vf64 }; return
simd_arg_to_obj(&ret); }
7520
7521#endif
7522
7523/***************************
7524 * Mask operations
7525 ***************************/
7526#line 410
7527 SIMD_IMPL_INTRIN_4(ifadd_f64, vf64, vb64, vf64, vf64, vf64)static PyObject *simd__intrin_ifadd_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; simd_arg arg3 = {.dtype = simd_data_vf64}; simd_arg arg4 =
{.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifadd_f64", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vf64 = npyv_ifadd_f64
( arg1.data.vb64, arg2.data.vf64, arg3.data.vf64, arg4.data.vf64
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vf64 }; return simd_arg_to_obj(&
ret); }
7528
7529#line 410
7530 SIMD_IMPL_INTRIN_4(ifsub_f64, vf64, vb64, vf64, vf64, vf64)static PyObject *simd__intrin_ifsub_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; simd_arg arg3 = {.dtype = simd_data_vf64}; simd_arg arg4 =
{.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&O&O&O&:"
"ifsub_f64", simd_arg_converter, &arg1, simd_arg_converter
, &arg2, simd_arg_converter, &arg3, simd_arg_converter
, &arg4 )) return ((void*)0); simd_data data = {.vf64 = npyv_ifsub_f64
( arg1.data.vb64, arg2.data.vf64, arg3.data.vf64, arg4.data.vf64
)}; simd_arg_free(&arg1); simd_arg_free(&arg2); simd_arg_free
(&arg3); simd_arg_free(&arg4); simd_arg ret = { .data
= data, .dtype = simd_data_vf64 }; return simd_arg_to_obj(&
ret); }
7531
7532
7533#endif // simd_sup
7534
7535/*************************************************************************
7536 * Variant
7537 ************************************************************************/
7538SIMD_IMPL_INTRIN_0N(cleanup)static PyObject *simd__intrin_cleanup (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { if (!PyArg_ParseTuple
( args, ":" "cleanup") ) return ((void*)0); ((void)0); return
_Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (&_Py_NoneStruct
); }
7539
7540/*************************************************************************
7541 * A special section for f32/f64 intrinsics outside the main repeater
7542 ************************************************************************/
7543/***************************
7544 * Operators
7545 ***************************/
7546// check special cases
7547SIMD_IMPL_INTRIN_1(notnan_f32, vb32, vf32)static PyObject *simd__intrin_notnan_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"notnan_f32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vb32 = npyv_notnan_f32( arg.data.vf32 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vb32 }; return simd_arg_to_obj(&ret); }
7548#if NPY_SIMD_F641
7549 SIMD_IMPL_INTRIN_1(notnan_f64, vb64, vf64)static PyObject *simd__intrin_notnan_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf64}; if (!PyArg_ParseTuple( args, "O&:"
"notnan_f64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.vb64 = npyv_notnan_f64( arg.data.vf64 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_vb64 }; return simd_arg_to_obj(&ret); }
7550#endif
7551/***************************
7552 * Conversions
7553 ***************************/
7554// round to nearest integer (assume even)
7555SIMD_IMPL_INTRIN_1(round_s32_f32, vs32, vf32)static PyObject *simd__intrin_round_s32_f32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vf32}; if (!PyArg_ParseTuple( args, "O&:"
"round_s32_f32", simd_arg_converter, &arg )) return ((void
*)0); simd_data data = {.vs32 = _mm_cvtps_epi32( arg.data.vf32
)}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vs32 }; return simd_arg_to_obj(&ret); }
7556#if NPY_SIMD_F641
7557 SIMD_IMPL_INTRIN_2(round_s32_f64, vs32, vf64, vf64)static PyObject *simd__intrin_round_s32_f64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vf64}; simd_arg arg2 = {.dtype = simd_data_vf64
}; if (!PyArg_ParseTuple( args, "O&O&:""round_s32_f64"
, simd_arg_converter, &arg1, simd_arg_converter, &arg2
)) return ((void*)0); simd_data data = {.vs32 = npyv_round_s32_f64
( arg1.data.vf64, arg2.data.vf64 )}; simd_arg_free(&arg1)
; simd_arg_free(&arg2); simd_arg ret = { .data = data, .dtype
= simd_data_vs32 }; return simd_arg_to_obj(&ret); }
7558#endif
7559
7560/*************************************************************************
7561 * A special section for boolean intrinsics outside the main repeater
7562 ************************************************************************/
7563/***************************
7564 * Operators
7565 ***************************/
7566// Logical
7567#line 450
7568SIMD_IMPL_INTRIN_2(and_b8, vb8, vb8, vb8)static PyObject *simd__intrin_and_b8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vb8
}; if (!PyArg_ParseTuple( args, "O&O&:""and_b8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_and_si128( arg1.data.vb8, arg2
.data.vb8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
7569SIMD_IMPL_INTRIN_2(or_b8, vb8, vb8, vb8)static PyObject *simd__intrin_or_b8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vb8
}; if (!PyArg_ParseTuple( args, "O&O&:""or_b8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_or_si128( arg1.data.vb8, arg2
.data.vb8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
7570SIMD_IMPL_INTRIN_2(xor_b8, vb8, vb8, vb8)static PyObject *simd__intrin_xor_b8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb8}; simd_arg arg2 = {.dtype = simd_data_vb8
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_b8", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb8 = _mm_xor_si128( arg1.data.vb8, arg2
.data.vb8 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb8 }; return
simd_arg_to_obj(&ret); }
7571SIMD_IMPL_INTRIN_1(not_b8, vb8, vb8)static PyObject *simd__intrin_not_b8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb8}; if (!PyArg_ParseTuple( args, "O&:"
"not_b8", simd_arg_converter, &arg )) return ((void*)0); simd_data
data = {.vb8 = _mm_xor_si128(arg.data.vb8, _mm_set1_epi32(-1
))}; simd_arg_free(&arg); simd_arg ret = { .data = data, .
dtype = simd_data_vb8 }; return simd_arg_to_obj(&ret); }
7572
7573#line 450
7574SIMD_IMPL_INTRIN_2(and_b16, vb16, vb16, vb16)static PyObject *simd__intrin_and_b16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vb16
}; if (!PyArg_ParseTuple( args, "O&O&:""and_b16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_and_si128( arg1.data.vb16, arg2
.data.vb16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb16 }; return
simd_arg_to_obj(&ret); }
7575SIMD_IMPL_INTRIN_2(or_b16, vb16, vb16, vb16)static PyObject *simd__intrin_or_b16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vb16
}; if (!PyArg_ParseTuple( args, "O&O&:""or_b16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_or_si128( arg1.data.vb16, arg2
.data.vb16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb16 }; return
simd_arg_to_obj(&ret); }
7576SIMD_IMPL_INTRIN_2(xor_b16, vb16, vb16, vb16)static PyObject *simd__intrin_xor_b16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb16}; simd_arg arg2 = {.dtype = simd_data_vb16
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_b16", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb16 = _mm_xor_si128( arg1.data.vb16, arg2
.data.vb16 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb16 }; return
simd_arg_to_obj(&ret); }
7577SIMD_IMPL_INTRIN_1(not_b16, vb16, vb16)static PyObject *simd__intrin_not_b16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb16}; if (!PyArg_ParseTuple( args, "O&:"
"not_b16", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vb16 = _mm_xor_si128(arg.data.vb16, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vb16 }; return simd_arg_to_obj(&ret)
; }
7578
7579#line 450
7580SIMD_IMPL_INTRIN_2(and_b32, vb32, vb32, vb32)static PyObject *simd__intrin_and_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vb32
}; if (!PyArg_ParseTuple( args, "O&O&:""and_b32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_and_si128( arg1.data.vb32, arg2
.data.vb32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb32 }; return
simd_arg_to_obj(&ret); }
7581SIMD_IMPL_INTRIN_2(or_b32, vb32, vb32, vb32)static PyObject *simd__intrin_or_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vb32
}; if (!PyArg_ParseTuple( args, "O&O&:""or_b32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_or_si128( arg1.data.vb32, arg2
.data.vb32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb32 }; return
simd_arg_to_obj(&ret); }
7582SIMD_IMPL_INTRIN_2(xor_b32, vb32, vb32, vb32)static PyObject *simd__intrin_xor_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb32}; simd_arg arg2 = {.dtype = simd_data_vb32
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_b32", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb32 = _mm_xor_si128( arg1.data.vb32, arg2
.data.vb32 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb32 }; return
simd_arg_to_obj(&ret); }
7583SIMD_IMPL_INTRIN_1(not_b32, vb32, vb32)static PyObject *simd__intrin_not_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb32}; if (!PyArg_ParseTuple( args, "O&:"
"not_b32", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vb32 = _mm_xor_si128(arg.data.vb32, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vb32 }; return simd_arg_to_obj(&ret)
; }
7584
7585#line 450
7586SIMD_IMPL_INTRIN_2(and_b64, vb64, vb64, vb64)static PyObject *simd__intrin_and_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vb64
}; if (!PyArg_ParseTuple( args, "O&O&:""and_b64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_and_si128( arg1.data.vb64, arg2
.data.vb64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb64 }; return
simd_arg_to_obj(&ret); }
7587SIMD_IMPL_INTRIN_2(or_b64, vb64, vb64, vb64)static PyObject *simd__intrin_or_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vb64
}; if (!PyArg_ParseTuple( args, "O&O&:""or_b64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_or_si128( arg1.data.vb64, arg2
.data.vb64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb64 }; return
simd_arg_to_obj(&ret); }
7588SIMD_IMPL_INTRIN_2(xor_b64, vb64, vb64, vb64)static PyObject *simd__intrin_xor_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg1
= {.dtype = simd_data_vb64}; simd_arg arg2 = {.dtype = simd_data_vb64
}; if (!PyArg_ParseTuple( args, "O&O&:""xor_b64", simd_arg_converter
, &arg1, simd_arg_converter, &arg2 )) return ((void*)
0); simd_data data = {.vb64 = _mm_xor_si128( arg1.data.vb64, arg2
.data.vb64 )}; simd_arg_free(&arg1); simd_arg_free(&arg2
); simd_arg ret = { .data = data, .dtype = simd_data_vb64 }; return
simd_arg_to_obj(&ret); }
7589SIMD_IMPL_INTRIN_1(not_b64, vb64, vb64)static PyObject *simd__intrin_not_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb64}; if (!PyArg_ParseTuple( args, "O&:"
"not_b64", simd_arg_converter, &arg )) return ((void*)0);
simd_data data = {.vb64 = _mm_xor_si128(arg.data.vb64, _mm_set1_epi32
(-1))}; simd_arg_free(&arg); simd_arg ret = { .data = data
, .dtype = simd_data_vb64 }; return simd_arg_to_obj(&ret)
; }
7590
7591/***************************
7592 * Conversions
7593 ***************************/
7594// Convert mask vector to integer bitfield
7595#line 462
7596SIMD_IMPL_INTRIN_1(tobits_b8, u64, vb8)static PyObject *simd__intrin_tobits_b8 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb8}; if (!PyArg_ParseTuple( args, "O&:"
"tobits_b8", simd_arg_converter, &arg )) return ((void*)0
); simd_data data = {.u64 = npyv_tobits_b8( arg.data.vb8 )}; simd_arg_free
(&arg); simd_arg ret = { .data = data, .dtype = simd_data_u64
}; return simd_arg_to_obj(&ret); }
7597
7598#line 462
7599SIMD_IMPL_INTRIN_1(tobits_b16, u64, vb16)static PyObject *simd__intrin_tobits_b16 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb16}; if (!PyArg_ParseTuple( args, "O&:"
"tobits_b16", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.u64 = npyv_tobits_b16( arg.data.vb16 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_u64 }; return simd_arg_to_obj(&ret); }
7600
7601#line 462
7602SIMD_IMPL_INTRIN_1(tobits_b32, u64, vb32)static PyObject *simd__intrin_tobits_b32 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb32}; if (!PyArg_ParseTuple( args, "O&:"
"tobits_b32", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.u64 = npyv_tobits_b32( arg.data.vb32 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_u64 }; return simd_arg_to_obj(&ret); }
7603
7604#line 462
7605SIMD_IMPL_INTRIN_1(tobits_b64, u64, vb64)static PyObject *simd__intrin_tobits_b64 (PyObject* (__NPY_UNUSED_TAGGEDself
) __attribute__ ((__unused__)), PyObject *args) { simd_arg arg
= {.dtype = simd_data_vb64}; if (!PyArg_ParseTuple( args, "O&:"
"tobits_b64", simd_arg_converter, &arg )) return ((void*)
0); simd_data data = {.u64 = npyv_tobits_b64( arg.data.vb64 )
}; simd_arg_free(&arg); simd_arg ret = { .data = data, .dtype
= simd_data_u64 }; return simd_arg_to_obj(&ret); }
7606
7607
7608
7609//#########################################################################
7610//## Attach module functions
7611//#########################################################################
7612static PyMethodDef simd__intrinsics_methods[] = {
7613#line 489
7614#if 1
7615
7616/***************************
7617 * Memory
7618 ***************************/
7619#line 497
7620SIMD_INTRIN_DEF(load_u8){ "load_u8", simd__intrin_load_u8, 0x0001, ((void*)0) } ,
7621
7622#line 497
7623SIMD_INTRIN_DEF(loada_u8){ "loada_u8", simd__intrin_loada_u8, 0x0001, ((void*)0) } ,
7624
7625#line 497
7626SIMD_INTRIN_DEF(loads_u8){ "loads_u8", simd__intrin_loads_u8, 0x0001, ((void*)0) } ,
7627
7628#line 497
7629SIMD_INTRIN_DEF(loadl_u8){ "loadl_u8", simd__intrin_loadl_u8, 0x0001, ((void*)0) } ,
7630
7631#line 497
7632SIMD_INTRIN_DEF(store_u8){ "store_u8", simd__intrin_store_u8, 0x0001, ((void*)0) } ,
7633
7634#line 497
7635SIMD_INTRIN_DEF(storea_u8){ "storea_u8", simd__intrin_storea_u8, 0x0001, ((void*)0) } ,
7636
7637#line 497
7638SIMD_INTRIN_DEF(stores_u8){ "stores_u8", simd__intrin_stores_u8, 0x0001, ((void*)0) } ,
7639
7640#line 497
7641SIMD_INTRIN_DEF(storel_u8){ "storel_u8", simd__intrin_storel_u8, 0x0001, ((void*)0) } ,
7642
7643#line 497
7644SIMD_INTRIN_DEF(storeh_u8){ "storeh_u8", simd__intrin_storeh_u8, 0x0001, ((void*)0) } ,
7645
7646
7647/****************************************
7648 * Non-contiguous/Partial Memory access
7649 ****************************************/
7650#if 0
7651#line 508
7652SIMD_INTRIN_DEF(load_till_u8){ "load_till_u8", simd__intrin_load_till_u8, 0x0001, ((void*)
0) } ,
7653
7654#line 508
7655SIMD_INTRIN_DEF(load_tillz_u8){ "load_tillz_u8", simd__intrin_load_tillz_u8, 0x0001, ((void
*)0) } ,
7656
7657#line 508
7658SIMD_INTRIN_DEF(loadn_u8){ "loadn_u8", simd__intrin_loadn_u8, 0x0001, ((void*)0) } ,
7659
7660#line 508
7661SIMD_INTRIN_DEF(loadn_till_u8){ "loadn_till_u8", simd__intrin_loadn_till_u8, 0x0001, ((void
*)0) } ,
7662
7663#line 508
7664SIMD_INTRIN_DEF(loadn_tillz_u8){ "loadn_tillz_u8", simd__intrin_loadn_tillz_u8, 0x0001, ((void
*)0) } ,
7665
7666#line 508
7667SIMD_INTRIN_DEF(store_till_u8){ "store_till_u8", simd__intrin_store_till_u8, 0x0001, ((void
*)0) } ,
7668
7669#line 508
7670SIMD_INTRIN_DEF(storen_u8){ "storen_u8", simd__intrin_storen_u8, 0x0001, ((void*)0) } ,
7671
7672#line 508
7673SIMD_INTRIN_DEF(storen_till_u8){ "storen_till_u8", simd__intrin_storen_till_u8, 0x0001, ((void
*)0) } ,
7674
7675#endif // ncont_sup
7676
7677/***************************
7678 * Misc
7679 ***************************/
7680#line 519
7681#if 1
7682SIMD_INTRIN_DEF(reinterpret_u8_u8){ "reinterpret_u8_u8", simd__intrin_reinterpret_u8_u8, 0x0001
, ((void*)0) } ,
7683#endif // simd_sup2
7684
7685#line 519
7686#if 1
7687SIMD_INTRIN_DEF(reinterpret_s8_u8){ "reinterpret_s8_u8", simd__intrin_reinterpret_s8_u8, 0x0001
, ((void*)0) } ,
7688#endif // simd_sup2
7689
7690#line 519
7691#if 1
7692SIMD_INTRIN_DEF(reinterpret_u16_u8){ "reinterpret_u16_u8", simd__intrin_reinterpret_u16_u8, 0x0001
, ((void*)0) } ,
7693#endif // simd_sup2
7694
7695#line 519
7696#if 1
7697SIMD_INTRIN_DEF(reinterpret_s16_u8){ "reinterpret_s16_u8", simd__intrin_reinterpret_s16_u8, 0x0001
, ((void*)0) } ,
7698#endif // simd_sup2
7699
7700#line 519
7701#if 1
7702SIMD_INTRIN_DEF(reinterpret_u32_u8){ "reinterpret_u32_u8", simd__intrin_reinterpret_u32_u8, 0x0001
, ((void*)0) } ,
7703#endif // simd_sup2
7704
7705#line 519
7706#if 1
7707SIMD_INTRIN_DEF(reinterpret_s32_u8){ "reinterpret_s32_u8", simd__intrin_reinterpret_s32_u8, 0x0001
, ((void*)0) } ,
7708#endif // simd_sup2
7709
7710#line 519
7711#if 1
7712SIMD_INTRIN_DEF(reinterpret_u64_u8){ "reinterpret_u64_u8", simd__intrin_reinterpret_u64_u8, 0x0001
, ((void*)0) } ,
7713#endif // simd_sup2
7714
7715#line 519
7716#if 1
7717SIMD_INTRIN_DEF(reinterpret_s64_u8){ "reinterpret_s64_u8", simd__intrin_reinterpret_s64_u8, 0x0001
, ((void*)0) } ,
7718#endif // simd_sup2
7719
7720#line 519
7721#if 1
7722SIMD_INTRIN_DEF(reinterpret_f32_u8){ "reinterpret_f32_u8", simd__intrin_reinterpret_f32_u8, 0x0001
, ((void*)0) } ,
7723#endif // simd_sup2
7724
7725#line 519
7726#if NPY_SIMD_F641
7727SIMD_INTRIN_DEF(reinterpret_f64_u8){ "reinterpret_f64_u8", simd__intrin_reinterpret_f64_u8, 0x0001
, ((void*)0) } ,
7728#endif // simd_sup2
7729
7730
7731#line 527
7732SIMD_INTRIN_DEF(set_u8){ "set_u8", simd__intrin_set_u8, 0x0001, ((void*)0) } ,
7733
7734#line 527
7735SIMD_INTRIN_DEF(setf_u8){ "setf_u8", simd__intrin_setf_u8, 0x0001, ((void*)0) } ,
7736
7737#line 527
7738SIMD_INTRIN_DEF(setall_u8){ "setall_u8", simd__intrin_setall_u8, 0x0001, ((void*)0) } ,
7739
7740#line 527
7741SIMD_INTRIN_DEF(zero_u8){ "zero_u8", simd__intrin_zero_u8, 0x0001, ((void*)0) } ,
7742
7743#line 527
7744SIMD_INTRIN_DEF(select_u8){ "select_u8", simd__intrin_select_u8, 0x0001, ((void*)0) } ,
7745
7746
7747/***************************
7748 * Reorder
7749 ***************************/
7750#line 536
7751SIMD_INTRIN_DEF(combinel_u8){ "combinel_u8", simd__intrin_combinel_u8, 0x0001, ((void*)0)
} ,
7752
7753#line 536
7754SIMD_INTRIN_DEF(combineh_u8){ "combineh_u8", simd__intrin_combineh_u8, 0x0001, ((void*)0)
} ,
7755
7756#line 536
7757SIMD_INTRIN_DEF(combine_u8){ "combine_u8", simd__intrin_combine_u8, 0x0001, ((void*)0) }
,
7758
7759#line 536
7760SIMD_INTRIN_DEF(zip_u8){ "zip_u8", simd__intrin_zip_u8, 0x0001, ((void*)0) } ,
7761
7762
7763#if 1
7764SIMD_INTRIN_DEF(rev64_u8){ "rev64_u8", simd__intrin_rev64_u8, 0x0001, ((void*)0) } ,
7765#endif
7766
7767/***************************
7768 * Operators
7769 ***************************/
7770#if 0 > 0
7771#line 550
7772SIMD_INTRIN_DEF(shl_u8){ "shl_u8", simd__intrin_shl_u8, 0x0001, ((void*)0) } ,
7773
7774#line 550
7775SIMD_INTRIN_DEF(shr_u8){ "shr_u8", simd__intrin_shr_u8, 0x0001, ((void*)0) } ,
7776
7777#line 550
7778SIMD_INTRIN_DEF(shli_u8){ "shli_u8", simd__intrin_shli_u8, 0x0001, ((void*)0) } ,
7779
7780#line 550
7781SIMD_INTRIN_DEF(shri_u8){ "shri_u8", simd__intrin_shri_u8, 0x0001, ((void*)0) } ,
7782
7783#endif // shl_imm
7784
7785#line 557
7786SIMD_INTRIN_DEF(and_u8){ "and_u8", simd__intrin_and_u8, 0x0001, ((void*)0) } ,
7787
7788#line 557
7789SIMD_INTRIN_DEF(or_u8){ "or_u8", simd__intrin_or_u8, 0x0001, ((void*)0) } ,
7790
7791#line 557
7792SIMD_INTRIN_DEF(xor_u8){ "xor_u8", simd__intrin_xor_u8, 0x0001, ((void*)0) } ,
7793
7794#line 557
7795SIMD_INTRIN_DEF(not_u8){ "not_u8", simd__intrin_not_u8, 0x0001, ((void*)0) } ,
7796
7797#line 557
7798SIMD_INTRIN_DEF(cmpeq_u8){ "cmpeq_u8", simd__intrin_cmpeq_u8, 0x0001, ((void*)0) } ,
7799
7800#line 557
7801SIMD_INTRIN_DEF(cmpneq_u8){ "cmpneq_u8", simd__intrin_cmpneq_u8, 0x0001, ((void*)0) } ,
7802
7803#line 557
7804SIMD_INTRIN_DEF(cmpgt_u8){ "cmpgt_u8", simd__intrin_cmpgt_u8, 0x0001, ((void*)0) } ,
7805
7806#line 557
7807SIMD_INTRIN_DEF(cmpge_u8){ "cmpge_u8", simd__intrin_cmpge_u8, 0x0001, ((void*)0) } ,
7808
7809#line 557
7810SIMD_INTRIN_DEF(cmplt_u8){ "cmplt_u8", simd__intrin_cmplt_u8, 0x0001, ((void*)0) } ,
7811
7812#line 557
7813SIMD_INTRIN_DEF(cmple_u8){ "cmple_u8", simd__intrin_cmple_u8, 0x0001, ((void*)0) } ,
7814
7815
7816/***************************
7817 * Conversion
7818 ***************************/
7819SIMD_INTRIN_DEF(cvt_u8_b8){ "cvt_u8_b8", simd__intrin_cvt_u8_b8, 0x0001, ((void*)0) } ,
7820SIMD_INTRIN_DEF(cvt_b8_u8){ "cvt_b8_u8", simd__intrin_cvt_b8_u8, 0x0001, ((void*)0) } ,
7821#if 1
7822SIMD_INTRIN_DEF(expand_u16_u8){ "expand_u16_u8", simd__intrin_expand_u16_u8, 0x0001, ((void
*)0) } ,
7823#endif // expand_sup
7824/***************************
7825 * Arithmetic
7826 ***************************/
7827#line 574
7828SIMD_INTRIN_DEF(add_u8){ "add_u8", simd__intrin_add_u8, 0x0001, ((void*)0) } ,
7829
7830#line 574
7831SIMD_INTRIN_DEF(sub_u8){ "sub_u8", simd__intrin_sub_u8, 0x0001, ((void*)0) } ,
7832
7833
7834#if 1
7835#line 581
7836SIMD_INTRIN_DEF(adds_u8){ "adds_u8", simd__intrin_adds_u8, 0x0001, ((void*)0) } ,
7837
7838#line 581
7839SIMD_INTRIN_DEF(subs_u8){ "subs_u8", simd__intrin_subs_u8, 0x0001, ((void*)0) } ,
7840
7841#endif // sat_sup
7842
7843#if 1
7844SIMD_INTRIN_DEF(mul_u8){ "mul_u8", simd__intrin_mul_u8, 0x0001, ((void*)0) } ,
7845#endif // mul_sup
7846
7847#if 0
7848SIMD_INTRIN_DEF(div_u8){ "div_u8", simd__intrin_div_u8, 0x0001, ((void*)0) } ,
7849#endif // div_sup
7850
7851#if 1
7852SIMD_INTRIN_DEF(divisor_u8){ "divisor_u8", simd__intrin_divisor_u8, 0x0001, ((void*)0) }
,
7853SIMD_INTRIN_DEF(divc_u8){ "divc_u8", simd__intrin_divc_u8, 0x0001, ((void*)0) } ,
7854#endif // intdiv_sup
7855
7856#if 0
7857#line 602
7858SIMD_INTRIN_DEF(muladd_u8){ "muladd_u8", simd__intrin_muladd_u8, 0x0001, ((void*)0) } ,
7859
7860#line 602
7861SIMD_INTRIN_DEF(mulsub_u8){ "mulsub_u8", simd__intrin_mulsub_u8, 0x0001, ((void*)0) } ,
7862
7863#line 602
7864SIMD_INTRIN_DEF(nmuladd_u8){ "nmuladd_u8", simd__intrin_nmuladd_u8, 0x0001, ((void*)0) }
,
7865
7866#line 602
7867SIMD_INTRIN_DEF(nmulsub_u8){ "nmulsub_u8", simd__intrin_nmulsub_u8, 0x0001, ((void*)0) }
,
7868
7869#endif // fused_sup
7870
7871#if 0
7872SIMD_INTRIN_DEF(sum_u8){ "sum_u8", simd__intrin_sum_u8, 0x0001, ((void*)0) } ,
7873#endif // sum_sup
7874
7875#if 1
7876SIMD_INTRIN_DEF(sumup_u8){ "sumup_u8", simd__intrin_sumup_u8, 0x0001, ((void*)0) } ,
7877#endif // sumup_sup
7878/***************************
7879 * Math
7880 ***************************/
7881#if 0
7882#line 620
7883SIMD_INTRIN_DEF(sqrt_u8){ "sqrt_u8", simd__intrin_sqrt_u8, 0x0001, ((void*)0) } ,
7884
7885#line 620
7886SIMD_INTRIN_DEF(recip_u8){ "recip_u8", simd__intrin_recip_u8, 0x0001, ((void*)0) } ,
7887
7888#line 620
7889SIMD_INTRIN_DEF(abs_u8){ "abs_u8", simd__intrin_abs_u8, 0x0001, ((void*)0) } ,
7890
7891#line 620
7892SIMD_INTRIN_DEF(square_u8){ "square_u8", simd__intrin_square_u8, 0x0001, ((void*)0) } ,
7893
7894#endif
7895
7896#line 627
7897SIMD_INTRIN_DEF(max_u8){ "max_u8", simd__intrin_max_u8, 0x0001, ((void*)0) } ,
7898
7899#line 627
7900SIMD_INTRIN_DEF(min_u8){ "min_u8", simd__intrin_min_u8, 0x0001, ((void*)0) } ,
7901
7902
7903#if 0
7904#line 634
7905SIMD_INTRIN_DEF(maxp_u8){ "maxp_u8", simd__intrin_maxp_u8, 0x0001, ((void*)0) } ,
7906
7907#line 634
7908SIMD_INTRIN_DEF(minp_u8){ "minp_u8", simd__intrin_minp_u8, 0x0001, ((void*)0) } ,
7909
7910#endif
7911
7912/***************************
7913 * Mask operations
7914 ***************************/
7915#line 644
7916 SIMD_INTRIN_DEF(ifadd_u8){ "ifadd_u8", simd__intrin_ifadd_u8, 0x0001, ((void*)0) } ,
7917
7918#line 644
7919 SIMD_INTRIN_DEF(ifsub_u8){ "ifsub_u8", simd__intrin_ifsub_u8, 0x0001, ((void*)0) } ,
7920
7921
7922#endif // simd_sup
7923
7924#line 489
7925#if 1
7926
7927/***************************
7928 * Memory
7929 ***************************/
7930#line 497
7931SIMD_INTRIN_DEF(load_s8){ "load_s8", simd__intrin_load_s8, 0x0001, ((void*)0) } ,
7932
7933#line 497
7934SIMD_INTRIN_DEF(loada_s8){ "loada_s8", simd__intrin_loada_s8, 0x0001, ((void*)0) } ,
7935
7936#line 497
7937SIMD_INTRIN_DEF(loads_s8){ "loads_s8", simd__intrin_loads_s8, 0x0001, ((void*)0) } ,
7938
7939#line 497
7940SIMD_INTRIN_DEF(loadl_s8){ "loadl_s8", simd__intrin_loadl_s8, 0x0001, ((void*)0) } ,
7941
7942#line 497
7943SIMD_INTRIN_DEF(store_s8){ "store_s8", simd__intrin_store_s8, 0x0001, ((void*)0) } ,
7944
7945#line 497
7946SIMD_INTRIN_DEF(storea_s8){ "storea_s8", simd__intrin_storea_s8, 0x0001, ((void*)0) } ,
7947
7948#line 497
7949SIMD_INTRIN_DEF(stores_s8){ "stores_s8", simd__intrin_stores_s8, 0x0001, ((void*)0) } ,
7950
7951#line 497
7952SIMD_INTRIN_DEF(storel_s8){ "storel_s8", simd__intrin_storel_s8, 0x0001, ((void*)0) } ,
7953
7954#line 497
7955SIMD_INTRIN_DEF(storeh_s8){ "storeh_s8", simd__intrin_storeh_s8, 0x0001, ((void*)0) } ,
7956
7957
7958/****************************************
7959 * Non-contiguous/Partial Memory access
7960 ****************************************/
7961#if 0
7962#line 508
7963SIMD_INTRIN_DEF(load_till_s8){ "load_till_s8", simd__intrin_load_till_s8, 0x0001, ((void*)
0) } ,
7964
7965#line 508
7966SIMD_INTRIN_DEF(load_tillz_s8){ "load_tillz_s8", simd__intrin_load_tillz_s8, 0x0001, ((void
*)0) } ,
7967
7968#line 508
7969SIMD_INTRIN_DEF(loadn_s8){ "loadn_s8", simd__intrin_loadn_s8, 0x0001, ((void*)0) } ,
7970
7971#line 508
7972SIMD_INTRIN_DEF(loadn_till_s8){ "loadn_till_s8", simd__intrin_loadn_till_s8, 0x0001, ((void
*)0) } ,
7973
7974#line 508
7975SIMD_INTRIN_DEF(loadn_tillz_s8){ "loadn_tillz_s8", simd__intrin_loadn_tillz_s8, 0x0001, ((void
*)0) } ,
7976
7977#line 508
7978SIMD_INTRIN_DEF(store_till_s8){ "store_till_s8", simd__intrin_store_till_s8, 0x0001, ((void
*)0) } ,
7979
7980#line 508
7981SIMD_INTRIN_DEF(storen_s8){ "storen_s8", simd__intrin_storen_s8, 0x0001, ((void*)0) } ,
7982
7983#line 508
7984SIMD_INTRIN_DEF(storen_till_s8){ "storen_till_s8", simd__intrin_storen_till_s8, 0x0001, ((void
*)0) } ,
7985
7986#endif // ncont_sup
7987
7988/***************************
7989 * Misc
7990 ***************************/
7991#line 519
7992#if 1
7993SIMD_INTRIN_DEF(reinterpret_u8_s8){ "reinterpret_u8_s8", simd__intrin_reinterpret_u8_s8, 0x0001
, ((void*)0) } ,
7994#endif // simd_sup2
7995
7996#line 519
7997#if 1
7998SIMD_INTRIN_DEF(reinterpret_s8_s8){ "reinterpret_s8_s8", simd__intrin_reinterpret_s8_s8, 0x0001
, ((void*)0) } ,
7999#endif // simd_sup2
8000
8001#line 519
8002#if 1
8003SIMD_INTRIN_DEF(reinterpret_u16_s8){ "reinterpret_u16_s8", simd__intrin_reinterpret_u16_s8, 0x0001
, ((void*)0) } ,
8004#endif // simd_sup2
8005
8006#line 519
8007#if 1
8008SIMD_INTRIN_DEF(reinterpret_s16_s8){ "reinterpret_s16_s8", simd__intrin_reinterpret_s16_s8, 0x0001
, ((void*)0) } ,
8009#endif // simd_sup2
8010
8011#line 519
8012#if 1
8013SIMD_INTRIN_DEF(reinterpret_u32_s8){ "reinterpret_u32_s8", simd__intrin_reinterpret_u32_s8, 0x0001
, ((void*)0) } ,
8014#endif // simd_sup2
8015
8016#line 519
8017#if 1
8018SIMD_INTRIN_DEF(reinterpret_s32_s8){ "reinterpret_s32_s8", simd__intrin_reinterpret_s32_s8, 0x0001
, ((void*)0) } ,
8019#endif // simd_sup2
8020
8021#line 519
8022#if 1
8023SIMD_INTRIN_DEF(reinterpret_u64_s8){ "reinterpret_u64_s8", simd__intrin_reinterpret_u64_s8, 0x0001
, ((void*)0) } ,
8024#endif // simd_sup2
8025
8026#line 519
8027#if 1
8028SIMD_INTRIN_DEF(reinterpret_s64_s8){ "reinterpret_s64_s8", simd__intrin_reinterpret_s64_s8, 0x0001
, ((void*)0) } ,
8029#endif // simd_sup2
8030
8031#line 519
8032#if 1
8033SIMD_INTRIN_DEF(reinterpret_f32_s8){ "reinterpret_f32_s8", simd__intrin_reinterpret_f32_s8, 0x0001
, ((void*)0) } ,
8034#endif // simd_sup2
8035
8036#line 519
8037#if NPY_SIMD_F641
8038SIMD_INTRIN_DEF(reinterpret_f64_s8){ "reinterpret_f64_s8", simd__intrin_reinterpret_f64_s8, 0x0001
, ((void*)0) } ,
8039#endif // simd_sup2
8040
8041
8042#line 527
8043SIMD_INTRIN_DEF(set_s8){ "set_s8", simd__intrin_set_s8, 0x0001, ((void*)0) } ,
8044
8045#line 527
8046SIMD_INTRIN_DEF(setf_s8){ "setf_s8", simd__intrin_setf_s8, 0x0001, ((void*)0) } ,
8047
8048#line 527
8049SIMD_INTRIN_DEF(setall_s8){ "setall_s8", simd__intrin_setall_s8, 0x0001, ((void*)0) } ,
8050
8051#line 527
8052SIMD_INTRIN_DEF(zero_s8){ "zero_s8", simd__intrin_zero_s8, 0x0001, ((void*)0) } ,
8053
8054#line 527
8055SIMD_INTRIN_DEF(select_s8){ "select_s8", simd__intrin_select_s8, 0x0001, ((void*)0) } ,
8056
8057
8058/***************************
8059 * Reorder
8060 ***************************/
8061#line 536
8062SIMD_INTRIN_DEF(combinel_s8){ "combinel_s8", simd__intrin_combinel_s8, 0x0001, ((void*)0)
} ,
8063
8064#line 536
8065SIMD_INTRIN_DEF(combineh_s8){ "combineh_s8", simd__intrin_combineh_s8, 0x0001, ((void*)0)
} ,
8066
8067#line 536
8068SIMD_INTRIN_DEF(combine_s8){ "combine_s8", simd__intrin_combine_s8, 0x0001, ((void*)0) }
,
8069
8070#line 536
8071SIMD_INTRIN_DEF(zip_s8){ "zip_s8", simd__intrin_zip_s8, 0x0001, ((void*)0) } ,
8072
8073
8074#if 1
8075SIMD_INTRIN_DEF(rev64_s8){ "rev64_s8", simd__intrin_rev64_s8, 0x0001, ((void*)0) } ,
8076#endif
8077
8078/***************************
8079 * Operators
8080 ***************************/
8081#if 0 > 0
8082#line 550
8083SIMD_INTRIN_DEF(shl_s8){ "shl_s8", simd__intrin_shl_s8, 0x0001, ((void*)0) } ,
8084
8085#line 550
8086SIMD_INTRIN_DEF(shr_s8){ "shr_s8", simd__intrin_shr_s8, 0x0001, ((void*)0) } ,
8087
8088#line 550
8089SIMD_INTRIN_DEF(shli_s8){ "shli_s8", simd__intrin_shli_s8, 0x0001, ((void*)0) } ,
8090
8091#line 550
8092SIMD_INTRIN_DEF(shri_s8){ "shri_s8", simd__intrin_shri_s8, 0x0001, ((void*)0) } ,
8093
8094#endif // shl_imm
8095
8096#line 557
8097SIMD_INTRIN_DEF(and_s8){ "and_s8", simd__intrin_and_s8, 0x0001, ((void*)0) } ,
8098
8099#line 557
8100SIMD_INTRIN_DEF(or_s8){ "or_s8", simd__intrin_or_s8, 0x0001, ((void*)0) } ,
8101
8102#line 557
8103SIMD_INTRIN_DEF(xor_s8){ "xor_s8", simd__intrin_xor_s8, 0x0001, ((void*)0) } ,
8104
8105#line 557
8106SIMD_INTRIN_DEF(not_s8){ "not_s8", simd__intrin_not_s8, 0x0001, ((void*)0) } ,
8107
8108#line 557
8109SIMD_INTRIN_DEF(cmpeq_s8){ "cmpeq_s8", simd__intrin_cmpeq_s8, 0x0001, ((void*)0) } ,
8110
8111#line 557
8112SIMD_INTRIN_DEF(cmpneq_s8){ "cmpneq_s8", simd__intrin_cmpneq_s8, 0x0001, ((void*)0) } ,
8113
8114#line 557
8115SIMD_INTRIN_DEF(cmpgt_s8){ "cmpgt_s8", simd__intrin_cmpgt_s8, 0x0001, ((void*)0) } ,
8116
8117#line 557
8118SIMD_INTRIN_DEF(cmpge_s8){ "cmpge_s8", simd__intrin_cmpge_s8, 0x0001, ((void*)0) } ,
8119
8120#line 557
8121SIMD_INTRIN_DEF(cmplt_s8){ "cmplt_s8", simd__intrin_cmplt_s8, 0x0001, ((void*)0) } ,
8122
8123#line 557
8124SIMD_INTRIN_DEF(cmple_s8){ "cmple_s8", simd__intrin_cmple_s8, 0x0001, ((void*)0) } ,
8125
8126
8127/***************************
8128 * Conversion
8129 ***************************/
8130SIMD_INTRIN_DEF(cvt_s8_b8){ "cvt_s8_b8", simd__intrin_cvt_s8_b8, 0x0001, ((void*)0) } ,
8131SIMD_INTRIN_DEF(cvt_b8_s8){ "cvt_b8_s8", simd__intrin_cvt_b8_s8, 0x0001, ((void*)0) } ,
8132#if 0
8133SIMD_INTRIN_DEF(expand_s8_s8){ "expand_s8_s8", simd__intrin_expand_s8_s8, 0x0001, ((void*)
0) } ,
8134#endif // expand_sup
8135/***************************
8136 * Arithmetic
8137 ***************************/
8138#line 574
8139SIMD_INTRIN_DEF(add_s8){ "add_s8", simd__intrin_add_s8, 0x0001, ((void*)0) } ,
8140
8141#line 574
8142SIMD_INTRIN_DEF(sub_s8){ "sub_s8", simd__intrin_sub_s8, 0x0001, ((void*)0) } ,
8143
8144
8145#if 1
8146#line 581
8147SIMD_INTRIN_DEF(adds_s8){ "adds_s8", simd__intrin_adds_s8, 0x0001, ((void*)0) } ,
8148
8149#line 581
8150SIMD_INTRIN_DEF(subs_s8){ "subs_s8", simd__intrin_subs_s8, 0x0001, ((void*)0) } ,
8151
8152#endif // sat_sup
8153
8154#if 1
8155SIMD_INTRIN_DEF(mul_s8){ "mul_s8", simd__intrin_mul_s8, 0x0001, ((void*)0) } ,
8156#endif // mul_sup
8157
8158#if 0
8159SIMD_INTRIN_DEF(div_s8){ "div_s8", simd__intrin_div_s8, 0x0001, ((void*)0) } ,
8160#endif // div_sup
8161
8162#if 1
8163SIMD_INTRIN_DEF(divisor_s8){ "divisor_s8", simd__intrin_divisor_s8, 0x0001, ((void*)0) }
,
8164SIMD_INTRIN_DEF(divc_s8){ "divc_s8", simd__intrin_divc_s8, 0x0001, ((void*)0) } ,
8165#endif // intdiv_sup
8166
8167#if 0
8168#line 602
8169SIMD_INTRIN_DEF(muladd_s8){ "muladd_s8", simd__intrin_muladd_s8, 0x0001, ((void*)0) } ,
8170
8171#line 602
8172SIMD_INTRIN_DEF(mulsub_s8){ "mulsub_s8", simd__intrin_mulsub_s8, 0x0001, ((void*)0) } ,
8173
8174#line 602
8175SIMD_INTRIN_DEF(nmuladd_s8){ "nmuladd_s8", simd__intrin_nmuladd_s8, 0x0001, ((void*)0) }
,
8176
8177#line 602
8178SIMD_INTRIN_DEF(nmulsub_s8){ "nmulsub_s8", simd__intrin_nmulsub_s8, 0x0001, ((void*)0) }
,
8179
8180#endif // fused_sup
8181
8182#if 0
8183SIMD_INTRIN_DEF(sum_s8){ "sum_s8", simd__intrin_sum_s8, 0x0001, ((void*)0) } ,
8184#endif // sum_sup
8185
8186#if 0
8187SIMD_INTRIN_DEF(sumup_s8){ "sumup_s8", simd__intrin_sumup_s8, 0x0001, ((void*)0) } ,
8188#endif // sumup_sup
8189/***************************
8190 * Math
8191 ***************************/
8192#if 0
8193#line 620
8194SIMD_INTRIN_DEF(sqrt_s8){ "sqrt_s8", simd__intrin_sqrt_s8, 0x0001, ((void*)0) } ,
8195
8196#line 620
8197SIMD_INTRIN_DEF(recip_s8){ "recip_s8", simd__intrin_recip_s8, 0x0001, ((void*)0) } ,
8198
8199#line 620
8200SIMD_INTRIN_DEF(abs_s8){ "abs_s8", simd__intrin_abs_s8, 0x0001, ((void*)0) } ,
8201
8202#line 620
8203SIMD_INTRIN_DEF(square_s8){ "square_s8", simd__intrin_square_s8, 0x0001, ((void*)0) } ,
8204
8205#endif
8206
8207#line 627
8208SIMD_INTRIN_DEF(max_s8){ "max_s8", simd__intrin_max_s8, 0x0001, ((void*)0) } ,
8209
8210#line 627
8211SIMD_INTRIN_DEF(min_s8){ "min_s8", simd__intrin_min_s8, 0x0001, ((void*)0) } ,
8212
8213
8214#if 0
8215#line 634
8216SIMD_INTRIN_DEF(maxp_s8){ "maxp_s8", simd__intrin_maxp_s8, 0x0001, ((void*)0) } ,
8217
8218#line 634
8219SIMD_INTRIN_DEF(minp_s8){ "minp_s8", simd__intrin_minp_s8, 0x0001, ((void*)0) } ,
8220
8221#endif
8222
8223/***************************
8224 * Mask operations
8225 ***************************/
8226#line 644
8227 SIMD_INTRIN_DEF(ifadd_s8){ "ifadd_s8", simd__intrin_ifadd_s8, 0x0001, ((void*)0) } ,
8228
8229#line 644
8230 SIMD_INTRIN_DEF(ifsub_s8){ "ifsub_s8", simd__intrin_ifsub_s8, 0x0001, ((void*)0) } ,
8231
8232
8233#endif // simd_sup
8234
8235#line 489
8236#if 1
8237
8238/***************************
8239 * Memory
8240 ***************************/
8241#line 497
8242SIMD_INTRIN_DEF(load_u16){ "load_u16", simd__intrin_load_u16, 0x0001, ((void*)0) } ,
8243
8244#line 497
8245SIMD_INTRIN_DEF(loada_u16){ "loada_u16", simd__intrin_loada_u16, 0x0001, ((void*)0) } ,
8246
8247#line 497
8248SIMD_INTRIN_DEF(loads_u16){ "loads_u16", simd__intrin_loads_u16, 0x0001, ((void*)0) } ,
8249
8250#line 497
8251SIMD_INTRIN_DEF(loadl_u16){ "loadl_u16", simd__intrin_loadl_u16, 0x0001, ((void*)0) } ,
8252
8253#line 497
8254SIMD_INTRIN_DEF(store_u16){ "store_u16", simd__intrin_store_u16, 0x0001, ((void*)0) } ,
8255
8256#line 497
8257SIMD_INTRIN_DEF(storea_u16){ "storea_u16", simd__intrin_storea_u16, 0x0001, ((void*)0) }
,
8258
8259#line 497
8260SIMD_INTRIN_DEF(stores_u16){ "stores_u16", simd__intrin_stores_u16, 0x0001, ((void*)0) }
,
8261
8262#line 497
8263SIMD_INTRIN_DEF(storel_u16){ "storel_u16", simd__intrin_storel_u16, 0x0001, ((void*)0) }
,
8264
8265#line 497
8266SIMD_INTRIN_DEF(storeh_u16){ "storeh_u16", simd__intrin_storeh_u16, 0x0001, ((void*)0) }
,
8267
8268
8269/****************************************
8270 * Non-contiguous/Partial Memory access
8271 ****************************************/
8272#if 0
8273#line 508
8274SIMD_INTRIN_DEF(load_till_u16){ "load_till_u16", simd__intrin_load_till_u16, 0x0001, ((void
*)0) } ,
8275
8276#line 508
8277SIMD_INTRIN_DEF(load_tillz_u16){ "load_tillz_u16", simd__intrin_load_tillz_u16, 0x0001, ((void
*)0) } ,
8278
8279#line 508
8280SIMD_INTRIN_DEF(loadn_u16){ "loadn_u16", simd__intrin_loadn_u16, 0x0001, ((void*)0) } ,
8281
8282#line 508
8283SIMD_INTRIN_DEF(loadn_till_u16){ "loadn_till_u16", simd__intrin_loadn_till_u16, 0x0001, ((void
*)0) } ,
8284
8285#line 508
8286SIMD_INTRIN_DEF(loadn_tillz_u16){ "loadn_tillz_u16", simd__intrin_loadn_tillz_u16, 0x0001, ((
void*)0) } ,
8287
8288#line 508
8289SIMD_INTRIN_DEF(store_till_u16){ "store_till_u16", simd__intrin_store_till_u16, 0x0001, ((void
*)0) } ,
8290
8291#line 508
8292SIMD_INTRIN_DEF(storen_u16){ "storen_u16", simd__intrin_storen_u16, 0x0001, ((void*)0) }
,
8293
8294#line 508
8295SIMD_INTRIN_DEF(storen_till_u16){ "storen_till_u16", simd__intrin_storen_till_u16, 0x0001, ((
void*)0) } ,
8296
8297#endif // ncont_sup
8298
8299/***************************
8300 * Misc
8301 ***************************/
8302#line 519
8303#if 1
8304SIMD_INTRIN_DEF(reinterpret_u8_u16){ "reinterpret_u8_u16", simd__intrin_reinterpret_u8_u16, 0x0001
, ((void*)0) } ,
8305#endif // simd_sup2
8306
8307#line 519
8308#if 1
8309SIMD_INTRIN_DEF(reinterpret_s8_u16){ "reinterpret_s8_u16", simd__intrin_reinterpret_s8_u16, 0x0001
, ((void*)0) } ,
8310#endif // simd_sup2
8311
8312#line 519
8313#if 1
8314SIMD_INTRIN_DEF(reinterpret_u16_u16){ "reinterpret_u16_u16", simd__intrin_reinterpret_u16_u16, 0x0001
, ((void*)0) } ,
8315#endif // simd_sup2
8316
8317#line 519
8318#if 1
8319SIMD_INTRIN_DEF(reinterpret_s16_u16){ "reinterpret_s16_u16", simd__intrin_reinterpret_s16_u16, 0x0001
, ((void*)0) } ,
8320#endif // simd_sup2
8321
8322#line 519
8323#if 1
8324SIMD_INTRIN_DEF(reinterpret_u32_u16){ "reinterpret_u32_u16", simd__intrin_reinterpret_u32_u16, 0x0001
, ((void*)0) } ,
8325#endif // simd_sup2
8326
8327#line 519
8328#if 1
8329SIMD_INTRIN_DEF(reinterpret_s32_u16){ "reinterpret_s32_u16", simd__intrin_reinterpret_s32_u16, 0x0001
, ((void*)0) } ,
8330#endif // simd_sup2
8331
8332#line 519
8333#if 1
8334SIMD_INTRIN_DEF(reinterpret_u64_u16){ "reinterpret_u64_u16", simd__intrin_reinterpret_u64_u16, 0x0001
, ((void*)0) } ,
8335#endif // simd_sup2
8336
8337#line 519
8338#if 1
8339SIMD_INTRIN_DEF(reinterpret_s64_u16){ "reinterpret_s64_u16", simd__intrin_reinterpret_s64_u16, 0x0001
, ((void*)0) } ,
8340#endif // simd_sup2
8341
8342#line 519
8343#if 1
8344SIMD_INTRIN_DEF(reinterpret_f32_u16){ "reinterpret_f32_u16", simd__intrin_reinterpret_f32_u16, 0x0001
, ((void*)0) } ,
8345#endif // simd_sup2
8346
8347#line 519
8348#if NPY_SIMD_F641
8349SIMD_INTRIN_DEF(reinterpret_f64_u16){ "reinterpret_f64_u16", simd__intrin_reinterpret_f64_u16, 0x0001
, ((void*)0) } ,
8350#endif // simd_sup2
8351
8352
8353#line 527
8354SIMD_INTRIN_DEF(set_u16){ "set_u16", simd__intrin_set_u16, 0x0001, ((void*)0) } ,
8355
8356#line 527
8357SIMD_INTRIN_DEF(setf_u16){ "setf_u16", simd__intrin_setf_u16, 0x0001, ((void*)0) } ,
8358
8359#line 527
8360SIMD_INTRIN_DEF(setall_u16){ "setall_u16", simd__intrin_setall_u16, 0x0001, ((void*)0) }
,
8361
8362#line 527
8363SIMD_INTRIN_DEF(zero_u16){ "zero_u16", simd__intrin_zero_u16, 0x0001, ((void*)0) } ,
8364
8365#line 527
8366SIMD_INTRIN_DEF(select_u16){ "select_u16", simd__intrin_select_u16, 0x0001, ((void*)0) }
,
8367
8368
8369/***************************
8370 * Reorder
8371 ***************************/
8372#line 536
8373SIMD_INTRIN_DEF(combinel_u16){ "combinel_u16", simd__intrin_combinel_u16, 0x0001, ((void*)
0) } ,
8374
8375#line 536
8376SIMD_INTRIN_DEF(combineh_u16){ "combineh_u16", simd__intrin_combineh_u16, 0x0001, ((void*)
0) } ,
8377
8378#line 536
8379SIMD_INTRIN_DEF(combine_u16){ "combine_u16", simd__intrin_combine_u16, 0x0001, ((void*)0)
} ,
8380
8381#line 536
8382SIMD_INTRIN_DEF(zip_u16){ "zip_u16", simd__intrin_zip_u16, 0x0001, ((void*)0) } ,
8383
8384
8385#if 1
8386SIMD_INTRIN_DEF(rev64_u16){ "rev64_u16", simd__intrin_rev64_u16, 0x0001, ((void*)0) } ,
8387#endif
8388
8389/***************************
8390 * Operators
8391 ***************************/
8392#if 15 > 0
8393#line 550
8394SIMD_INTRIN_DEF(shl_u16){ "shl_u16", simd__intrin_shl_u16, 0x0001, ((void*)0) } ,
8395
8396#line 550
8397SIMD_INTRIN_DEF(shr_u16){ "shr_u16", simd__intrin_shr_u16, 0x0001, ((void*)0) } ,
8398
8399#line 550
8400SIMD_INTRIN_DEF(shli_u16){ "shli_u16", simd__intrin_shli_u16, 0x0001, ((void*)0) } ,
8401
8402#line 550
8403SIMD_INTRIN_DEF(shri_u16){ "shri_u16", simd__intrin_shri_u16, 0x0001, ((void*)0) } ,
8404
8405#endif // shl_imm
8406
8407#line 557
8408SIMD_INTRIN_DEF(and_u16){ "and_u16", simd__intrin_and_u16, 0x0001, ((void*)0) } ,
8409
8410#line 557
8411SIMD_INTRIN_DEF(or_u16){ "or_u16", simd__intrin_or_u16, 0x0001, ((void*)0) } ,
8412
8413#line 557
8414SIMD_INTRIN_DEF(xor_u16){ "xor_u16", simd__intrin_xor_u16, 0x0001, ((void*)0) } ,
8415
8416#line 557
8417SIMD_INTRIN_DEF(not_u16){ "not_u16", simd__intrin_not_u16, 0x0001, ((void*)0) } ,
8418
8419#line 557
8420SIMD_INTRIN_DEF(cmpeq_u16){ "cmpeq_u16", simd__intrin_cmpeq_u16, 0x0001, ((void*)0) } ,
8421
8422#line 557
8423SIMD_INTRIN_DEF(cmpneq_u16){ "cmpneq_u16", simd__intrin_cmpneq_u16, 0x0001, ((void*)0) }
,
8424
8425#line 557
8426SIMD_INTRIN_DEF(cmpgt_u16){ "cmpgt_u16", simd__intrin_cmpgt_u16, 0x0001, ((void*)0) } ,
8427
8428#line 557
8429SIMD_INTRIN_DEF(cmpge_u16){ "cmpge_u16", simd__intrin_cmpge_u16, 0x0001, ((void*)0) } ,
8430
8431#line 557
8432SIMD_INTRIN_DEF(cmplt_u16){ "cmplt_u16", simd__intrin_cmplt_u16, 0x0001, ((void*)0) } ,
8433
8434#line 557
8435SIMD_INTRIN_DEF(cmple_u16){ "cmple_u16", simd__intrin_cmple_u16, 0x0001, ((void*)0) } ,
8436
8437
8438/***************************
8439 * Conversion
8440 ***************************/
8441SIMD_INTRIN_DEF(cvt_u16_b16){ "cvt_u16_b16", simd__intrin_cvt_u16_b16, 0x0001, ((void*)0)
} ,
8442SIMD_INTRIN_DEF(cvt_b16_u16){ "cvt_b16_u16", simd__intrin_cvt_b16_u16, 0x0001, ((void*)0)
} ,
8443#if 1
8444SIMD_INTRIN_DEF(expand_u32_u16){ "expand_u32_u16", simd__intrin_expand_u32_u16, 0x0001, ((void
*)0) } ,
8445#endif // expand_sup
8446/***************************
8447 * Arithmetic
8448 ***************************/
8449#line 574
8450SIMD_INTRIN_DEF(add_u16){ "add_u16", simd__intrin_add_u16, 0x0001, ((void*)0) } ,
8451
8452#line 574
8453SIMD_INTRIN_DEF(sub_u16){ "sub_u16", simd__intrin_sub_u16, 0x0001, ((void*)0) } ,
8454
8455
8456#if 1
8457#line 581
8458SIMD_INTRIN_DEF(adds_u16){ "adds_u16", simd__intrin_adds_u16, 0x0001, ((void*)0) } ,
8459
8460#line 581
8461SIMD_INTRIN_DEF(subs_u16){ "subs_u16", simd__intrin_subs_u16, 0x0001, ((void*)0) } ,
8462
8463#endif // sat_sup
8464
8465#if 1
8466SIMD_INTRIN_DEF(mul_u16){ "mul_u16", simd__intrin_mul_u16, 0x0001, ((void*)0) } ,
8467#endif // mul_sup
8468
8469#if 0
8470SIMD_INTRIN_DEF(div_u16){ "div_u16", simd__intrin_div_u16, 0x0001, ((void*)0) } ,
8471#endif // div_sup
8472
8473#if 1
8474SIMD_INTRIN_DEF(divisor_u16){ "divisor_u16", simd__intrin_divisor_u16, 0x0001, ((void*)0)
} ,
8475SIMD_INTRIN_DEF(divc_u16){ "divc_u16", simd__intrin_divc_u16, 0x0001, ((void*)0) } ,
8476#endif // intdiv_sup
8477
8478#if 0
8479#line 602
8480SIMD_INTRIN_DEF(muladd_u16){ "muladd_u16", simd__intrin_muladd_u16, 0x0001, ((void*)0) }
,
8481
8482#line 602
8483SIMD_INTRIN_DEF(mulsub_u16){ "mulsub_u16", simd__intrin_mulsub_u16, 0x0001, ((void*)0) }
,
8484
8485#line 602
8486SIMD_INTRIN_DEF(nmuladd_u16){ "nmuladd_u16", simd__intrin_nmuladd_u16, 0x0001, ((void*)0)
} ,
8487
8488#line 602
8489SIMD_INTRIN_DEF(nmulsub_u16){ "nmulsub_u16", simd__intrin_nmulsub_u16, 0x0001, ((void*)0)
} ,
8490
8491#endif // fused_sup
8492
8493#if 0
8494SIMD_INTRIN_DEF(sum_u16){ "sum_u16", simd__intrin_sum_u16, 0x0001, ((void*)0) } ,
8495#endif // sum_sup
8496
8497#if 1
8498SIMD_INTRIN_DEF(sumup_u16){ "sumup_u16", simd__intrin_sumup_u16, 0x0001, ((void*)0) } ,
8499#endif // sumup_sup
8500/***************************
8501 * Math
8502 ***************************/
8503#if 0
8504#line 620
8505SIMD_INTRIN_DEF(sqrt_u16){ "sqrt_u16", simd__intrin_sqrt_u16, 0x0001, ((void*)0) } ,
8506
8507#line 620
8508SIMD_INTRIN_DEF(recip_u16){ "recip_u16", simd__intrin_recip_u16, 0x0001, ((void*)0) } ,
8509
8510#line 620
8511SIMD_INTRIN_DEF(abs_u16){ "abs_u16", simd__intrin_abs_u16, 0x0001, ((void*)0) } ,
8512
8513#line 620
8514SIMD_INTRIN_DEF(square_u16){ "square_u16", simd__intrin_square_u16, 0x0001, ((void*)0) }
,
8515
8516#endif
8517
8518#line 627
8519SIMD_INTRIN_DEF(max_u16){ "max_u16", simd__intrin_max_u16, 0x0001, ((void*)0) } ,
8520
8521#line 627
8522SIMD_INTRIN_DEF(min_u16){ "min_u16", simd__intrin_min_u16, 0x0001, ((void*)0) } ,
8523
8524
8525#if 0
8526#line 634
8527SIMD_INTRIN_DEF(maxp_u16){ "maxp_u16", simd__intrin_maxp_u16, 0x0001, ((void*)0) } ,
8528
8529#line 634
8530SIMD_INTRIN_DEF(minp_u16){ "minp_u16", simd__intrin_minp_u16, 0x0001, ((void*)0) } ,
8531
8532#endif
8533
8534/***************************
8535 * Mask operations
8536 ***************************/
8537#line 644
8538 SIMD_INTRIN_DEF(ifadd_u16){ "ifadd_u16", simd__intrin_ifadd_u16, 0x0001, ((void*)0) } ,
8539
8540#line 644
8541 SIMD_INTRIN_DEF(ifsub_u16){ "ifsub_u16", simd__intrin_ifsub_u16, 0x0001, ((void*)0) } ,
8542
8543
8544#endif // simd_sup
8545
8546#line 489
8547#if 1
8548
8549/***************************
8550 * Memory
8551 ***************************/
8552#line 497
8553SIMD_INTRIN_DEF(load_s16){ "load_s16", simd__intrin_load_s16, 0x0001, ((void*)0) } ,
8554
8555#line 497
8556SIMD_INTRIN_DEF(loada_s16){ "loada_s16", simd__intrin_loada_s16, 0x0001, ((void*)0) } ,
8557
8558#line 497
8559SIMD_INTRIN_DEF(loads_s16){ "loads_s16", simd__intrin_loads_s16, 0x0001, ((void*)0) } ,
8560
8561#line 497
8562SIMD_INTRIN_DEF(loadl_s16){ "loadl_s16", simd__intrin_loadl_s16, 0x0001, ((void*)0) } ,
8563
8564#line 497
8565SIMD_INTRIN_DEF(store_s16){ "store_s16", simd__intrin_store_s16, 0x0001, ((void*)0) } ,
8566
8567#line 497
8568SIMD_INTRIN_DEF(storea_s16){ "storea_s16", simd__intrin_storea_s16, 0x0001, ((void*)0) }
,
8569
8570#line 497
8571SIMD_INTRIN_DEF(stores_s16){ "stores_s16", simd__intrin_stores_s16, 0x0001, ((void*)0) }
,
8572
8573#line 497
8574SIMD_INTRIN_DEF(storel_s16){ "storel_s16", simd__intrin_storel_s16, 0x0001, ((void*)0) }
,
8575
8576#line 497
8577SIMD_INTRIN_DEF(storeh_s16){ "storeh_s16", simd__intrin_storeh_s16, 0x0001, ((void*)0) }
,
8578
8579
8580/****************************************
8581 * Non-contiguous/Partial Memory access
8582 ****************************************/
8583#if 0
8584#line 508
8585SIMD_INTRIN_DEF(load_till_s16){ "load_till_s16", simd__intrin_load_till_s16, 0x0001, ((void
*)0) } ,
8586
8587#line 508
8588SIMD_INTRIN_DEF(load_tillz_s16){ "load_tillz_s16", simd__intrin_load_tillz_s16, 0x0001, ((void
*)0) } ,
8589
8590#line 508
8591SIMD_INTRIN_DEF(loadn_s16){ "loadn_s16", simd__intrin_loadn_s16, 0x0001, ((void*)0) } ,
8592
8593#line 508
8594SIMD_INTRIN_DEF(loadn_till_s16){ "loadn_till_s16", simd__intrin_loadn_till_s16, 0x0001, ((void
*)0) } ,
8595
8596#line 508
8597SIMD_INTRIN_DEF(loadn_tillz_s16){ "loadn_tillz_s16", simd__intrin_loadn_tillz_s16, 0x0001, ((
void*)0) } ,
8598
8599#line 508
8600SIMD_INTRIN_DEF(store_till_s16){ "store_till_s16", simd__intrin_store_till_s16, 0x0001, ((void
*)0) } ,
8601
8602#line 508
8603SIMD_INTRIN_DEF(storen_s16){ "storen_s16", simd__intrin_storen_s16, 0x0001, ((void*)0) }
,
8604
8605#line 508
8606SIMD_INTRIN_DEF(storen_till_s16){ "storen_till_s16", simd__intrin_storen_till_s16, 0x0001, ((
void*)0) } ,
8607
8608#endif // ncont_sup
8609
8610/***************************
8611 * Misc
8612 ***************************/
8613#line 519
8614#if 1
8615SIMD_INTRIN_DEF(reinterpret_u8_s16){ "reinterpret_u8_s16", simd__intrin_reinterpret_u8_s16, 0x0001
, ((void*)0) } ,
8616#endif // simd_sup2
8617
8618#line 519
8619#if 1
8620SIMD_INTRIN_DEF(reinterpret_s8_s16){ "reinterpret_s8_s16", simd__intrin_reinterpret_s8_s16, 0x0001
, ((void*)0) } ,
8621#endif // simd_sup2
8622
8623#line 519
8624#if 1
8625SIMD_INTRIN_DEF(reinterpret_u16_s16){ "reinterpret_u16_s16", simd__intrin_reinterpret_u16_s16, 0x0001
, ((void*)0) } ,
8626#endif // simd_sup2
8627
8628#line 519
8629#if 1
8630SIMD_INTRIN_DEF(reinterpret_s16_s16){ "reinterpret_s16_s16", simd__intrin_reinterpret_s16_s16, 0x0001
, ((void*)0) } ,
8631#endif // simd_sup2
8632
8633#line 519
8634#if 1
8635SIMD_INTRIN_DEF(reinterpret_u32_s16){ "reinterpret_u32_s16", simd__intrin_reinterpret_u32_s16, 0x0001
, ((void*)0) } ,
8636#endif // simd_sup2
8637
8638#line 519
8639#if 1
8640SIMD_INTRIN_DEF(reinterpret_s32_s16){ "reinterpret_s32_s16", simd__intrin_reinterpret_s32_s16, 0x0001
, ((void*)0) } ,
8641#endif // simd_sup2
8642
8643#line 519
8644#if 1
8645SIMD_INTRIN_DEF(reinterpret_u64_s16){ "reinterpret_u64_s16", simd__intrin_reinterpret_u64_s16, 0x0001
, ((void*)0) } ,
8646#endif // simd_sup2
8647
8648#line 519
8649#if 1
8650SIMD_INTRIN_DEF(reinterpret_s64_s16){ "reinterpret_s64_s16", simd__intrin_reinterpret_s64_s16, 0x0001
, ((void*)0) } ,
8651#endif // simd_sup2
8652
8653#line 519
8654#if 1
8655SIMD_INTRIN_DEF(reinterpret_f32_s16){ "reinterpret_f32_s16", simd__intrin_reinterpret_f32_s16, 0x0001
, ((void*)0) } ,
8656#endif // simd_sup2
8657
8658#line 519
8659#if NPY_SIMD_F641
8660SIMD_INTRIN_DEF(reinterpret_f64_s16){ "reinterpret_f64_s16", simd__intrin_reinterpret_f64_s16, 0x0001
, ((void*)0) } ,
8661#endif // simd_sup2
8662
8663
8664#line 527
8665SIMD_INTRIN_DEF(set_s16){ "set_s16", simd__intrin_set_s16, 0x0001, ((void*)0) } ,
8666
8667#line 527
8668SIMD_INTRIN_DEF(setf_s16){ "setf_s16", simd__intrin_setf_s16, 0x0001, ((void*)0) } ,
8669
8670#line 527
8671SIMD_INTRIN_DEF(setall_s16){ "setall_s16", simd__intrin_setall_s16, 0x0001, ((void*)0) }
,
8672
8673#line 527
8674SIMD_INTRIN_DEF(zero_s16){ "zero_s16", simd__intrin_zero_s16, 0x0001, ((void*)0) } ,
8675
8676#line 527
8677SIMD_INTRIN_DEF(select_s16){ "select_s16", simd__intrin_select_s16, 0x0001, ((void*)0) }
,
8678
8679
8680/***************************
8681 * Reorder
8682 ***************************/
8683#line 536
8684SIMD_INTRIN_DEF(combinel_s16){ "combinel_s16", simd__intrin_combinel_s16, 0x0001, ((void*)
0) } ,
8685
8686#line 536
8687SIMD_INTRIN_DEF(combineh_s16){ "combineh_s16", simd__intrin_combineh_s16, 0x0001, ((void*)
0) } ,
8688
8689#line 536
8690SIMD_INTRIN_DEF(combine_s16){ "combine_s16", simd__intrin_combine_s16, 0x0001, ((void*)0)
} ,
8691
8692#line 536
8693SIMD_INTRIN_DEF(zip_s16){ "zip_s16", simd__intrin_zip_s16, 0x0001, ((void*)0) } ,
8694
8695
8696#if 1
8697SIMD_INTRIN_DEF(rev64_s16){ "rev64_s16", simd__intrin_rev64_s16, 0x0001, ((void*)0) } ,
8698#endif
8699
8700/***************************
8701 * Operators
8702 ***************************/
8703#if 15 > 0
8704#line 550
8705SIMD_INTRIN_DEF(shl_s16){ "shl_s16", simd__intrin_shl_s16, 0x0001, ((void*)0) } ,
8706
8707#line 550
8708SIMD_INTRIN_DEF(shr_s16){ "shr_s16", simd__intrin_shr_s16, 0x0001, ((void*)0) } ,
8709
8710#line 550
8711SIMD_INTRIN_DEF(shli_s16){ "shli_s16", simd__intrin_shli_s16, 0x0001, ((void*)0) } ,
8712
8713#line 550
8714SIMD_INTRIN_DEF(shri_s16){ "shri_s16", simd__intrin_shri_s16, 0x0001, ((void*)0) } ,
8715
8716#endif // shl_imm
8717
8718#line 557
8719SIMD_INTRIN_DEF(and_s16){ "and_s16", simd__intrin_and_s16, 0x0001, ((void*)0) } ,
8720
8721#line 557
8722SIMD_INTRIN_DEF(or_s16){ "or_s16", simd__intrin_or_s16, 0x0001, ((void*)0) } ,
8723
8724#line 557
8725SIMD_INTRIN_DEF(xor_s16){ "xor_s16", simd__intrin_xor_s16, 0x0001, ((void*)0) } ,
8726
8727#line 557
8728SIMD_INTRIN_DEF(not_s16){ "not_s16", simd__intrin_not_s16, 0x0001, ((void*)0) } ,
8729
8730#line 557
8731SIMD_INTRIN_DEF(cmpeq_s16){ "cmpeq_s16", simd__intrin_cmpeq_s16, 0x0001, ((void*)0) } ,
8732
8733#line 557
8734SIMD_INTRIN_DEF(cmpneq_s16){ "cmpneq_s16", simd__intrin_cmpneq_s16, 0x0001, ((void*)0) }
,
8735
8736#line 557
8737SIMD_INTRIN_DEF(cmpgt_s16){ "cmpgt_s16", simd__intrin_cmpgt_s16, 0x0001, ((void*)0) } ,
8738
8739#line 557
8740SIMD_INTRIN_DEF(cmpge_s16){ "cmpge_s16", simd__intrin_cmpge_s16, 0x0001, ((void*)0) } ,
8741
8742#line 557
8743SIMD_INTRIN_DEF(cmplt_s16){ "cmplt_s16", simd__intrin_cmplt_s16, 0x0001, ((void*)0) } ,
8744
8745#line 557
8746SIMD_INTRIN_DEF(cmple_s16){ "cmple_s16", simd__intrin_cmple_s16, 0x0001, ((void*)0) } ,
8747
8748
8749/***************************
8750 * Conversion
8751 ***************************/
8752SIMD_INTRIN_DEF(cvt_s16_b16){ "cvt_s16_b16", simd__intrin_cvt_s16_b16, 0x0001, ((void*)0)
} ,
8753SIMD_INTRIN_DEF(cvt_b16_s16){ "cvt_b16_s16", simd__intrin_cvt_b16_s16, 0x0001, ((void*)0)
} ,
8754#if 0
8755SIMD_INTRIN_DEF(expand_s16_s16){ "expand_s16_s16", simd__intrin_expand_s16_s16, 0x0001, ((void
*)0) } ,
8756#endif // expand_sup
8757/***************************
8758 * Arithmetic
8759 ***************************/
8760#line 574
8761SIMD_INTRIN_DEF(add_s16){ "add_s16", simd__intrin_add_s16, 0x0001, ((void*)0) } ,
8762
8763#line 574
8764SIMD_INTRIN_DEF(sub_s16){ "sub_s16", simd__intrin_sub_s16, 0x0001, ((void*)0) } ,
8765
8766
8767#if 1
8768#line 581
8769SIMD_INTRIN_DEF(adds_s16){ "adds_s16", simd__intrin_adds_s16, 0x0001, ((void*)0) } ,
8770
8771#line 581
8772SIMD_INTRIN_DEF(subs_s16){ "subs_s16", simd__intrin_subs_s16, 0x0001, ((void*)0) } ,
8773
8774#endif // sat_sup
8775
8776#if 1
8777SIMD_INTRIN_DEF(mul_s16){ "mul_s16", simd__intrin_mul_s16, 0x0001, ((void*)0) } ,
8778#endif // mul_sup
8779
8780#if 0
8781SIMD_INTRIN_DEF(div_s16){ "div_s16", simd__intrin_div_s16, 0x0001, ((void*)0) } ,
8782#endif // div_sup
8783
8784#if 1
8785SIMD_INTRIN_DEF(divisor_s16){ "divisor_s16", simd__intrin_divisor_s16, 0x0001, ((void*)0)
} ,
8786SIMD_INTRIN_DEF(divc_s16){ "divc_s16", simd__intrin_divc_s16, 0x0001, ((void*)0) } ,
8787#endif // intdiv_sup
8788
8789#if 0
8790#line 602
8791SIMD_INTRIN_DEF(muladd_s16){ "muladd_s16", simd__intrin_muladd_s16, 0x0001, ((void*)0) }
,
8792
8793#line 602
8794SIMD_INTRIN_DEF(mulsub_s16){ "mulsub_s16", simd__intrin_mulsub_s16, 0x0001, ((void*)0) }
,
8795
8796#line 602
8797SIMD_INTRIN_DEF(nmuladd_s16){ "nmuladd_s16", simd__intrin_nmuladd_s16, 0x0001, ((void*)0)
} ,
8798
8799#line 602
8800SIMD_INTRIN_DEF(nmulsub_s16){ "nmulsub_s16", simd__intrin_nmulsub_s16, 0x0001, ((void*)0)
} ,
8801
8802#endif // fused_sup
8803
8804#if 0
8805SIMD_INTRIN_DEF(sum_s16){ "sum_s16", simd__intrin_sum_s16, 0x0001, ((void*)0) } ,
8806#endif // sum_sup
8807
8808#if 0
8809SIMD_INTRIN_DEF(sumup_s16){ "sumup_s16", simd__intrin_sumup_s16, 0x0001, ((void*)0) } ,
8810#endif // sumup_sup
8811/***************************
8812 * Math
8813 ***************************/
8814#if 0
8815#line 620
8816SIMD_INTRIN_DEF(sqrt_s16){ "sqrt_s16", simd__intrin_sqrt_s16, 0x0001, ((void*)0) } ,
8817
8818#line 620
8819SIMD_INTRIN_DEF(recip_s16){ "recip_s16", simd__intrin_recip_s16, 0x0001, ((void*)0) } ,
8820
8821#line 620
8822SIMD_INTRIN_DEF(abs_s16){ "abs_s16", simd__intrin_abs_s16, 0x0001, ((void*)0) } ,
8823
8824#line 620
8825SIMD_INTRIN_DEF(square_s16){ "square_s16", simd__intrin_square_s16, 0x0001, ((void*)0) }
,
8826
8827#endif
8828
8829#line 627
8830SIMD_INTRIN_DEF(max_s16){ "max_s16", simd__intrin_max_s16, 0x0001, ((void*)0) } ,
8831
8832#line 627
8833SIMD_INTRIN_DEF(min_s16){ "min_s16", simd__intrin_min_s16, 0x0001, ((void*)0) } ,
8834
8835
8836#if 0
8837#line 634
8838SIMD_INTRIN_DEF(maxp_s16){ "maxp_s16", simd__intrin_maxp_s16, 0x0001, ((void*)0) } ,
8839
8840#line 634
8841SIMD_INTRIN_DEF(minp_s16){ "minp_s16", simd__intrin_minp_s16, 0x0001, ((void*)0) } ,
8842
8843#endif
8844
8845/***************************
8846 * Mask operations
8847 ***************************/
8848#line 644
8849 SIMD_INTRIN_DEF(ifadd_s16){ "ifadd_s16", simd__intrin_ifadd_s16, 0x0001, ((void*)0) } ,
8850
8851#line 644
8852 SIMD_INTRIN_DEF(ifsub_s16){ "ifsub_s16", simd__intrin_ifsub_s16, 0x0001, ((void*)0) } ,
8853
8854
8855#endif // simd_sup
8856
8857#line 489
8858#if 1
8859
8860/***************************
8861 * Memory
8862 ***************************/
8863#line 497
8864SIMD_INTRIN_DEF(load_u32){ "load_u32", simd__intrin_load_u32, 0x0001, ((void*)0) } ,
8865
8866#line 497
8867SIMD_INTRIN_DEF(loada_u32){ "loada_u32", simd__intrin_loada_u32, 0x0001, ((void*)0) } ,
8868
8869#line 497
8870SIMD_INTRIN_DEF(loads_u32){ "loads_u32", simd__intrin_loads_u32, 0x0001, ((void*)0) } ,
8871
8872#line 497
8873SIMD_INTRIN_DEF(loadl_u32){ "loadl_u32", simd__intrin_loadl_u32, 0x0001, ((void*)0) } ,
8874
8875#line 497
8876SIMD_INTRIN_DEF(store_u32){ "store_u32", simd__intrin_store_u32, 0x0001, ((void*)0) } ,
8877
8878#line 497
8879SIMD_INTRIN_DEF(storea_u32){ "storea_u32", simd__intrin_storea_u32, 0x0001, ((void*)0) }
,
8880
8881#line 497
8882SIMD_INTRIN_DEF(stores_u32){ "stores_u32", simd__intrin_stores_u32, 0x0001, ((void*)0) }
,
8883
8884#line 497
8885SIMD_INTRIN_DEF(storel_u32){ "storel_u32", simd__intrin_storel_u32, 0x0001, ((void*)0) }
,
8886
8887#line 497
8888SIMD_INTRIN_DEF(storeh_u32){ "storeh_u32", simd__intrin_storeh_u32, 0x0001, ((void*)0) }
,
8889
8890
8891/****************************************
8892 * Non-contiguous/Partial Memory access
8893 ****************************************/
8894#if 1
8895#line 508
8896SIMD_INTRIN_DEF(load_till_u32){ "load_till_u32", simd__intrin_load_till_u32, 0x0001, ((void
*)0) } ,
8897
8898#line 508
8899SIMD_INTRIN_DEF(load_tillz_u32){ "load_tillz_u32", simd__intrin_load_tillz_u32, 0x0001, ((void
*)0) } ,
8900
8901#line 508
8902SIMD_INTRIN_DEF(loadn_u32){ "loadn_u32", simd__intrin_loadn_u32, 0x0001, ((void*)0) } ,
8903
8904#line 508
8905SIMD_INTRIN_DEF(loadn_till_u32){ "loadn_till_u32", simd__intrin_loadn_till_u32, 0x0001, ((void
*)0) } ,
8906
8907#line 508
8908SIMD_INTRIN_DEF(loadn_tillz_u32){ "loadn_tillz_u32", simd__intrin_loadn_tillz_u32, 0x0001, ((
void*)0) } ,
8909
8910#line 508
8911SIMD_INTRIN_DEF(store_till_u32){ "store_till_u32", simd__intrin_store_till_u32, 0x0001, ((void
*)0) } ,
8912
8913#line 508
8914SIMD_INTRIN_DEF(storen_u32){ "storen_u32", simd__intrin_storen_u32, 0x0001, ((void*)0) }
,
8915
8916#line 508
8917SIMD_INTRIN_DEF(storen_till_u32){ "storen_till_u32", simd__intrin_storen_till_u32, 0x0001, ((
void*)0) } ,
8918
8919#endif // ncont_sup
8920
8921/***************************
8922 * Misc
8923 ***************************/
8924#line 519
8925#if 1
8926SIMD_INTRIN_DEF(reinterpret_u8_u32){ "reinterpret_u8_u32", simd__intrin_reinterpret_u8_u32, 0x0001
, ((void*)0) } ,
8927#endif // simd_sup2
8928
8929#line 519
8930#if 1
8931SIMD_INTRIN_DEF(reinterpret_s8_u32){ "reinterpret_s8_u32", simd__intrin_reinterpret_s8_u32, 0x0001
, ((void*)0) } ,
8932#endif // simd_sup2
8933
8934#line 519
8935#if 1
8936SIMD_INTRIN_DEF(reinterpret_u16_u32){ "reinterpret_u16_u32", simd__intrin_reinterpret_u16_u32, 0x0001
, ((void*)0) } ,
8937#endif // simd_sup2
8938
8939#line 519
8940#if 1
8941SIMD_INTRIN_DEF(reinterpret_s16_u32){ "reinterpret_s16_u32", simd__intrin_reinterpret_s16_u32, 0x0001
, ((void*)0) } ,
8942#endif // simd_sup2
8943
8944#line 519
8945#if 1
8946SIMD_INTRIN_DEF(reinterpret_u32_u32){ "reinterpret_u32_u32", simd__intrin_reinterpret_u32_u32, 0x0001
, ((void*)0) } ,
8947#endif // simd_sup2
8948
8949#line 519
8950#if 1
8951SIMD_INTRIN_DEF(reinterpret_s32_u32){ "reinterpret_s32_u32", simd__intrin_reinterpret_s32_u32, 0x0001
, ((void*)0) } ,
8952#endif // simd_sup2
8953
8954#line 519
8955#if 1
8956SIMD_INTRIN_DEF(reinterpret_u64_u32){ "reinterpret_u64_u32", simd__intrin_reinterpret_u64_u32, 0x0001
, ((void*)0) } ,
8957#endif // simd_sup2
8958
8959#line 519
8960#if 1
8961SIMD_INTRIN_DEF(reinterpret_s64_u32){ "reinterpret_s64_u32", simd__intrin_reinterpret_s64_u32, 0x0001
, ((void*)0) } ,
8962#endif // simd_sup2
8963
8964#line 519
8965#if 1
8966SIMD_INTRIN_DEF(reinterpret_f32_u32){ "reinterpret_f32_u32", simd__intrin_reinterpret_f32_u32, 0x0001
, ((void*)0) } ,
8967#endif // simd_sup2
8968
8969#line 519
8970#if NPY_SIMD_F641
8971SIMD_INTRIN_DEF(reinterpret_f64_u32){ "reinterpret_f64_u32", simd__intrin_reinterpret_f64_u32, 0x0001
, ((void*)0) } ,
8972#endif // simd_sup2
8973
8974
8975#line 527
8976SIMD_INTRIN_DEF(set_u32){ "set_u32", simd__intrin_set_u32, 0x0001, ((void*)0) } ,
8977
8978#line 527
8979SIMD_INTRIN_DEF(setf_u32){ "setf_u32", simd__intrin_setf_u32, 0x0001, ((void*)0) } ,
8980
8981#line 527
8982SIMD_INTRIN_DEF(setall_u32){ "setall_u32", simd__intrin_setall_u32, 0x0001, ((void*)0) }
,
8983
8984#line 527
8985SIMD_INTRIN_DEF(zero_u32){ "zero_u32", simd__intrin_zero_u32, 0x0001, ((void*)0) } ,
8986
8987#line 527
8988SIMD_INTRIN_DEF(select_u32){ "select_u32", simd__intrin_select_u32, 0x0001, ((void*)0) }
,
8989
8990
8991/***************************
8992 * Reorder
8993 ***************************/
8994#line 536
8995SIMD_INTRIN_DEF(combinel_u32){ "combinel_u32", simd__intrin_combinel_u32, 0x0001, ((void*)
0) } ,
8996
8997#line 536
8998SIMD_INTRIN_DEF(combineh_u32){ "combineh_u32", simd__intrin_combineh_u32, 0x0001, ((void*)
0) } ,
8999
9000#line 536
9001SIMD_INTRIN_DEF(combine_u32){ "combine_u32", simd__intrin_combine_u32, 0x0001, ((void*)0)
} ,
9002
9003#line 536
9004SIMD_INTRIN_DEF(zip_u32){ "zip_u32", simd__intrin_zip_u32, 0x0001, ((void*)0) } ,
9005
9006
9007#if 1
9008SIMD_INTRIN_DEF(rev64_u32){ "rev64_u32", simd__intrin_rev64_u32, 0x0001, ((void*)0) } ,
9009#endif
9010
9011/***************************
9012 * Operators
9013 ***************************/
9014#if 31 > 0
9015#line 550
9016SIMD_INTRIN_DEF(shl_u32){ "shl_u32", simd__intrin_shl_u32, 0x0001, ((void*)0) } ,
9017
9018#line 550
9019SIMD_INTRIN_DEF(shr_u32){ "shr_u32", simd__intrin_shr_u32, 0x0001, ((void*)0) } ,
9020
9021#line 550
9022SIMD_INTRIN_DEF(shli_u32){ "shli_u32", simd__intrin_shli_u32, 0x0001, ((void*)0) } ,
9023
9024#line 550
9025SIMD_INTRIN_DEF(shri_u32){ "shri_u32", simd__intrin_shri_u32, 0x0001, ((void*)0) } ,
9026
9027#endif // shl_imm
9028
9029#line 557
9030SIMD_INTRIN_DEF(and_u32){ "and_u32", simd__intrin_and_u32, 0x0001, ((void*)0) } ,
9031
9032#line 557
9033SIMD_INTRIN_DEF(or_u32){ "or_u32", simd__intrin_or_u32, 0x0001, ((void*)0) } ,
9034
9035#line 557
9036SIMD_INTRIN_DEF(xor_u32){ "xor_u32", simd__intrin_xor_u32, 0x0001, ((void*)0) } ,
9037
9038#line 557
9039SIMD_INTRIN_DEF(not_u32){ "not_u32", simd__intrin_not_u32, 0x0001, ((void*)0) } ,
9040
9041#line 557
9042SIMD_INTRIN_DEF(cmpeq_u32){ "cmpeq_u32", simd__intrin_cmpeq_u32, 0x0001, ((void*)0) } ,
9043
9044#line 557
9045SIMD_INTRIN_DEF(cmpneq_u32){ "cmpneq_u32", simd__intrin_cmpneq_u32, 0x0001, ((void*)0) }
,
9046
9047#line 557
9048SIMD_INTRIN_DEF(cmpgt_u32){ "cmpgt_u32", simd__intrin_cmpgt_u32, 0x0001, ((void*)0) } ,
9049
9050#line 557
9051SIMD_INTRIN_DEF(cmpge_u32){ "cmpge_u32", simd__intrin_cmpge_u32, 0x0001, ((void*)0) } ,
9052
9053#line 557
9054SIMD_INTRIN_DEF(cmplt_u32){ "cmplt_u32", simd__intrin_cmplt_u32, 0x0001, ((void*)0) } ,
9055
9056#line 557
9057SIMD_INTRIN_DEF(cmple_u32){ "cmple_u32", simd__intrin_cmple_u32, 0x0001, ((void*)0) } ,
9058
9059
9060/***************************
9061 * Conversion
9062 ***************************/
9063SIMD_INTRIN_DEF(cvt_u32_b32){ "cvt_u32_b32", simd__intrin_cvt_u32_b32, 0x0001, ((void*)0)
} ,
9064SIMD_INTRIN_DEF(cvt_b32_u32){ "cvt_b32_u32", simd__intrin_cvt_b32_u32, 0x0001, ((void*)0)
} ,
9065#if 0
9066SIMD_INTRIN_DEF(expand_u32_u32){ "expand_u32_u32", simd__intrin_expand_u32_u32, 0x0001, ((void
*)0) } ,
9067#endif // expand_sup
9068/***************************
9069 * Arithmetic
9070 ***************************/
9071#line 574
9072SIMD_INTRIN_DEF(add_u32){ "add_u32", simd__intrin_add_u32, 0x0001, ((void*)0) } ,
9073
9074#line 574
9075SIMD_INTRIN_DEF(sub_u32){ "sub_u32", simd__intrin_sub_u32, 0x0001, ((void*)0) } ,
9076
9077
9078#if 0
9079#line 581
9080SIMD_INTRIN_DEF(adds_u32){ "adds_u32", simd__intrin_adds_u32, 0x0001, ((void*)0) } ,
9081
9082#line 581
9083SIMD_INTRIN_DEF(subs_u32){ "subs_u32", simd__intrin_subs_u32, 0x0001, ((void*)0) } ,
9084
9085#endif // sat_sup
9086
9087#if 1
9088SIMD_INTRIN_DEF(mul_u32){ "mul_u32", simd__intrin_mul_u32, 0x0001, ((void*)0) } ,
9089#endif // mul_sup
9090
9091#if 0
9092SIMD_INTRIN_DEF(div_u32){ "div_u32", simd__intrin_div_u32, 0x0001, ((void*)0) } ,
9093#endif // div_sup
9094
9095#if 1
9096SIMD_INTRIN_DEF(divisor_u32){ "divisor_u32", simd__intrin_divisor_u32, 0x0001, ((void*)0)
} ,
9097SIMD_INTRIN_DEF(divc_u32){ "divc_u32", simd__intrin_divc_u32, 0x0001, ((void*)0) } ,
9098#endif // intdiv_sup
9099
9100#if 0
9101#line 602
9102SIMD_INTRIN_DEF(muladd_u32){ "muladd_u32", simd__intrin_muladd_u32, 0x0001, ((void*)0) }
,
9103
9104#line 602
9105SIMD_INTRIN_DEF(mulsub_u32){ "mulsub_u32", simd__intrin_mulsub_u32, 0x0001, ((void*)0) }
,
9106
9107#line 602
9108SIMD_INTRIN_DEF(nmuladd_u32){ "nmuladd_u32", simd__intrin_nmuladd_u32, 0x0001, ((void*)0)
} ,
9109
9110#line 602
9111SIMD_INTRIN_DEF(nmulsub_u32){ "nmulsub_u32", simd__intrin_nmulsub_u32, 0x0001, ((void*)0)
} ,
9112
9113#endif // fused_sup
9114
9115#if 1
9116SIMD_INTRIN_DEF(sum_u32){ "sum_u32", simd__intrin_sum_u32, 0x0001, ((void*)0) } ,
9117#endif // sum_sup
9118
9119#if 0
9120SIMD_INTRIN_DEF(sumup_u32){ "sumup_u32", simd__intrin_sumup_u32, 0x0001, ((void*)0) } ,
9121#endif // sumup_sup
9122/***************************
9123 * Math
9124 ***************************/
9125#if 0
9126#line 620
9127SIMD_INTRIN_DEF(sqrt_u32){ "sqrt_u32", simd__intrin_sqrt_u32, 0x0001, ((void*)0) } ,
9128
9129#line 620
9130SIMD_INTRIN_DEF(recip_u32){ "recip_u32", simd__intrin_recip_u32, 0x0001, ((void*)0) } ,
9131
9132#line 620
9133SIMD_INTRIN_DEF(abs_u32){ "abs_u32", simd__intrin_abs_u32, 0x0001, ((void*)0) } ,
9134
9135#line 620
9136SIMD_INTRIN_DEF(square_u32){ "square_u32", simd__intrin_square_u32, 0x0001, ((void*)0) }
,
9137
9138#endif
9139
9140#line 627
9141SIMD_INTRIN_DEF(max_u32){ "max_u32", simd__intrin_max_u32, 0x0001, ((void*)0) } ,
9142
9143#line 627
9144SIMD_INTRIN_DEF(min_u32){ "min_u32", simd__intrin_min_u32, 0x0001, ((void*)0) } ,
9145
9146
9147#if 0
9148#line 634
9149SIMD_INTRIN_DEF(maxp_u32){ "maxp_u32", simd__intrin_maxp_u32, 0x0001, ((void*)0) } ,
9150
9151#line 634
9152SIMD_INTRIN_DEF(minp_u32){ "minp_u32", simd__intrin_minp_u32, 0x0001, ((void*)0) } ,
9153
9154#endif
9155
9156/***************************
9157 * Mask operations
9158 ***************************/
9159#line 644
9160 SIMD_INTRIN_DEF(ifadd_u32){ "ifadd_u32", simd__intrin_ifadd_u32, 0x0001, ((void*)0) } ,
9161
9162#line 644
9163 SIMD_INTRIN_DEF(ifsub_u32){ "ifsub_u32", simd__intrin_ifsub_u32, 0x0001, ((void*)0) } ,
9164
9165
9166#endif // simd_sup
9167
9168#line 489
9169#if 1
9170
9171/***************************
9172 * Memory
9173 ***************************/
9174#line 497
9175SIMD_INTRIN_DEF(load_s32){ "load_s32", simd__intrin_load_s32, 0x0001, ((void*)0) } ,
9176
9177#line 497
9178SIMD_INTRIN_DEF(loada_s32){ "loada_s32", simd__intrin_loada_s32, 0x0001, ((void*)0) } ,
9179
9180#line 497
9181SIMD_INTRIN_DEF(loads_s32){ "loads_s32", simd__intrin_loads_s32, 0x0001, ((void*)0) } ,
9182
9183#line 497
9184SIMD_INTRIN_DEF(loadl_s32){ "loadl_s32", simd__intrin_loadl_s32, 0x0001, ((void*)0) } ,
9185
9186#line 497
9187SIMD_INTRIN_DEF(store_s32){ "store_s32", simd__intrin_store_s32, 0x0001, ((void*)0) } ,
9188
9189#line 497
9190SIMD_INTRIN_DEF(storea_s32){ "storea_s32", simd__intrin_storea_s32, 0x0001, ((void*)0) }
,
9191
9192#line 497
9193SIMD_INTRIN_DEF(stores_s32){ "stores_s32", simd__intrin_stores_s32, 0x0001, ((void*)0) }
,
9194
9195#line 497
9196SIMD_INTRIN_DEF(storel_s32){ "storel_s32", simd__intrin_storel_s32, 0x0001, ((void*)0) }
,
9197
9198#line 497
9199SIMD_INTRIN_DEF(storeh_s32){ "storeh_s32", simd__intrin_storeh_s32, 0x0001, ((void*)0) }
,
9200
9201
9202/****************************************
9203 * Non-contiguous/Partial Memory access
9204 ****************************************/
9205#if 1
9206#line 508
9207SIMD_INTRIN_DEF(load_till_s32){ "load_till_s32", simd__intrin_load_till_s32, 0x0001, ((void
*)0) } ,
9208
9209#line 508
9210SIMD_INTRIN_DEF(load_tillz_s32){ "load_tillz_s32", simd__intrin_load_tillz_s32, 0x0001, ((void
*)0) } ,
9211
9212#line 508
9213SIMD_INTRIN_DEF(loadn_s32){ "loadn_s32", simd__intrin_loadn_s32, 0x0001, ((void*)0) } ,
9214
9215#line 508
9216SIMD_INTRIN_DEF(loadn_till_s32){ "loadn_till_s32", simd__intrin_loadn_till_s32, 0x0001, ((void
*)0) } ,
9217
9218#line 508
9219SIMD_INTRIN_DEF(loadn_tillz_s32){ "loadn_tillz_s32", simd__intrin_loadn_tillz_s32, 0x0001, ((
void*)0) } ,
9220
9221#line 508
9222SIMD_INTRIN_DEF(store_till_s32){ "store_till_s32", simd__intrin_store_till_s32, 0x0001, ((void
*)0) } ,
9223
9224#line 508
9225SIMD_INTRIN_DEF(storen_s32){ "storen_s32", simd__intrin_storen_s32, 0x0001, ((void*)0) }
,
9226
9227#line 508
9228SIMD_INTRIN_DEF(storen_till_s32){ "storen_till_s32", simd__intrin_storen_till_s32, 0x0001, ((
void*)0) } ,
9229
9230#endif // ncont_sup
9231
9232/***************************
9233 * Misc
9234 ***************************/
9235#line 519
9236#if 1
9237SIMD_INTRIN_DEF(reinterpret_u8_s32){ "reinterpret_u8_s32", simd__intrin_reinterpret_u8_s32, 0x0001
, ((void*)0) } ,
9238#endif // simd_sup2
9239
9240#line 519
9241#if 1
9242SIMD_INTRIN_DEF(reinterpret_s8_s32){ "reinterpret_s8_s32", simd__intrin_reinterpret_s8_s32, 0x0001
, ((void*)0) } ,
9243#endif // simd_sup2
9244
9245#line 519
9246#if 1
9247SIMD_INTRIN_DEF(reinterpret_u16_s32){ "reinterpret_u16_s32", simd__intrin_reinterpret_u16_s32, 0x0001
, ((void*)0) } ,
9248#endif // simd_sup2
9249
9250#line 519
9251#if 1
9252SIMD_INTRIN_DEF(reinterpret_s16_s32){ "reinterpret_s16_s32", simd__intrin_reinterpret_s16_s32, 0x0001
, ((void*)0) } ,
9253#endif // simd_sup2
9254
9255#line 519
9256#if 1
9257SIMD_INTRIN_DEF(reinterpret_u32_s32){ "reinterpret_u32_s32", simd__intrin_reinterpret_u32_s32, 0x0001
, ((void*)0) } ,
9258#endif // simd_sup2
9259
9260#line 519
9261#if 1
9262SIMD_INTRIN_DEF(reinterpret_s32_s32){ "reinterpret_s32_s32", simd__intrin_reinterpret_s32_s32, 0x0001
, ((void*)0) } ,
9263#endif // simd_sup2
9264
9265#line 519
9266#if 1
9267SIMD_INTRIN_DEF(reinterpret_u64_s32){ "reinterpret_u64_s32", simd__intrin_reinterpret_u64_s32, 0x0001
, ((void*)0) } ,
9268#endif // simd_sup2
9269
9270#line 519
9271#if 1
9272SIMD_INTRIN_DEF(reinterpret_s64_s32){ "reinterpret_s64_s32", simd__intrin_reinterpret_s64_s32, 0x0001
, ((void*)0) } ,
9273#endif // simd_sup2
9274
9275#line 519
9276#if 1
9277SIMD_INTRIN_DEF(reinterpret_f32_s32){ "reinterpret_f32_s32", simd__intrin_reinterpret_f32_s32, 0x0001
, ((void*)0) } ,
9278#endif // simd_sup2
9279
9280#line 519
9281#if NPY_SIMD_F641
9282SIMD_INTRIN_DEF(reinterpret_f64_s32){ "reinterpret_f64_s32", simd__intrin_reinterpret_f64_s32, 0x0001
, ((void*)0) } ,
9283#endif // simd_sup2
9284
9285
9286#line 527
9287SIMD_INTRIN_DEF(set_s32){ "set_s32", simd__intrin_set_s32, 0x0001, ((void*)0) } ,
9288
9289#line 527
9290SIMD_INTRIN_DEF(setf_s32){ "setf_s32", simd__intrin_setf_s32, 0x0001, ((void*)0) } ,
9291
9292#line 527
9293SIMD_INTRIN_DEF(setall_s32){ "setall_s32", simd__intrin_setall_s32, 0x0001, ((void*)0) }
,
9294
9295#line 527
9296SIMD_INTRIN_DEF(zero_s32){ "zero_s32", simd__intrin_zero_s32, 0x0001, ((void*)0) } ,
9297
9298#line 527
9299SIMD_INTRIN_DEF(select_s32){ "select_s32", simd__intrin_select_s32, 0x0001, ((void*)0) }
,
9300
9301
9302/***************************
9303 * Reorder
9304 ***************************/
9305#line 536
9306SIMD_INTRIN_DEF(combinel_s32){ "combinel_s32", simd__intrin_combinel_s32, 0x0001, ((void*)
0) } ,
9307
9308#line 536
9309SIMD_INTRIN_DEF(combineh_s32){ "combineh_s32", simd__intrin_combineh_s32, 0x0001, ((void*)
0) } ,
9310
9311#line 536
9312SIMD_INTRIN_DEF(combine_s32){ "combine_s32", simd__intrin_combine_s32, 0x0001, ((void*)0)
} ,
9313
9314#line 536
9315SIMD_INTRIN_DEF(zip_s32){ "zip_s32", simd__intrin_zip_s32, 0x0001, ((void*)0) } ,
9316
9317
9318#if 1
9319SIMD_INTRIN_DEF(rev64_s32){ "rev64_s32", simd__intrin_rev64_s32, 0x0001, ((void*)0) } ,
9320#endif
9321
9322/***************************
9323 * Operators
9324 ***************************/
9325#if 31 > 0
9326#line 550
9327SIMD_INTRIN_DEF(shl_s32){ "shl_s32", simd__intrin_shl_s32, 0x0001, ((void*)0) } ,
9328
9329#line 550
9330SIMD_INTRIN_DEF(shr_s32){ "shr_s32", simd__intrin_shr_s32, 0x0001, ((void*)0) } ,
9331
9332#line 550
9333SIMD_INTRIN_DEF(shli_s32){ "shli_s32", simd__intrin_shli_s32, 0x0001, ((void*)0) } ,
9334
9335#line 550
9336SIMD_INTRIN_DEF(shri_s32){ "shri_s32", simd__intrin_shri_s32, 0x0001, ((void*)0) } ,
9337
9338#endif // shl_imm
9339
9340#line 557
9341SIMD_INTRIN_DEF(and_s32){ "and_s32", simd__intrin_and_s32, 0x0001, ((void*)0) } ,
9342
9343#line 557
9344SIMD_INTRIN_DEF(or_s32){ "or_s32", simd__intrin_or_s32, 0x0001, ((void*)0) } ,
9345
9346#line 557
9347SIMD_INTRIN_DEF(xor_s32){ "xor_s32", simd__intrin_xor_s32, 0x0001, ((void*)0) } ,
9348
9349#line 557
9350SIMD_INTRIN_DEF(not_s32){ "not_s32", simd__intrin_not_s32, 0x0001, ((void*)0) } ,
9351
9352#line 557
9353SIMD_INTRIN_DEF(cmpeq_s32){ "cmpeq_s32", simd__intrin_cmpeq_s32, 0x0001, ((void*)0) } ,
9354
9355#line 557
9356SIMD_INTRIN_DEF(cmpneq_s32){ "cmpneq_s32", simd__intrin_cmpneq_s32, 0x0001, ((void*)0) }
,
9357
9358#line 557
9359SIMD_INTRIN_DEF(cmpgt_s32){ "cmpgt_s32", simd__intrin_cmpgt_s32, 0x0001, ((void*)0) } ,
9360
9361#line 557
9362SIMD_INTRIN_DEF(cmpge_s32){ "cmpge_s32", simd__intrin_cmpge_s32, 0x0001, ((void*)0) } ,
9363
9364#line 557
9365SIMD_INTRIN_DEF(cmplt_s32){ "cmplt_s32", simd__intrin_cmplt_s32, 0x0001, ((void*)0) } ,
9366
9367#line 557
9368SIMD_INTRIN_DEF(cmple_s32){ "cmple_s32", simd__intrin_cmple_s32, 0x0001, ((void*)0) } ,
9369
9370
9371/***************************
9372 * Conversion
9373 ***************************/
9374SIMD_INTRIN_DEF(cvt_s32_b32){ "cvt_s32_b32", simd__intrin_cvt_s32_b32, 0x0001, ((void*)0)
} ,
9375SIMD_INTRIN_DEF(cvt_b32_s32){ "cvt_b32_s32", simd__intrin_cvt_b32_s32, 0x0001, ((void*)0)
} ,
9376#if 0
9377SIMD_INTRIN_DEF(expand_s32_s32){ "expand_s32_s32", simd__intrin_expand_s32_s32, 0x0001, ((void
*)0) } ,
9378#endif // expand_sup
9379/***************************
9380 * Arithmetic
9381 ***************************/
9382#line 574
9383SIMD_INTRIN_DEF(add_s32){ "add_s32", simd__intrin_add_s32, 0x0001, ((void*)0) } ,
9384
9385#line 574
9386SIMD_INTRIN_DEF(sub_s32){ "sub_s32", simd__intrin_sub_s32, 0x0001, ((void*)0) } ,
9387
9388
9389#if 0
9390#line 581
9391SIMD_INTRIN_DEF(adds_s32){ "adds_s32", simd__intrin_adds_s32, 0x0001, ((void*)0) } ,
9392
9393#line 581
9394SIMD_INTRIN_DEF(subs_s32){ "subs_s32", simd__intrin_subs_s32, 0x0001, ((void*)0) } ,
9395
9396#endif // sat_sup
9397
9398#if 1
9399SIMD_INTRIN_DEF(mul_s32){ "mul_s32", simd__intrin_mul_s32, 0x0001, ((void*)0) } ,
9400#endif // mul_sup
9401
9402#if 0
9403SIMD_INTRIN_DEF(div_s32){ "div_s32", simd__intrin_div_s32, 0x0001, ((void*)0) } ,
9404#endif // div_sup
9405
9406#if 1
9407SIMD_INTRIN_DEF(divisor_s32){ "divisor_s32", simd__intrin_divisor_s32, 0x0001, ((void*)0)
} ,
9408SIMD_INTRIN_DEF(divc_s32){ "divc_s32", simd__intrin_divc_s32, 0x0001, ((void*)0) } ,
9409#endif // intdiv_sup
9410
9411#if 0
9412#line 602
9413SIMD_INTRIN_DEF(muladd_s32){ "muladd_s32", simd__intrin_muladd_s32, 0x0001, ((void*)0) }
,
9414
9415#line 602
9416SIMD_INTRIN_DEF(mulsub_s32){ "mulsub_s32", simd__intrin_mulsub_s32, 0x0001, ((void*)0) }
,
9417
9418#line 602
9419SIMD_INTRIN_DEF(nmuladd_s32){ "nmuladd_s32", simd__intrin_nmuladd_s32, 0x0001, ((void*)0)
} ,
9420
9421#line 602
9422SIMD_INTRIN_DEF(nmulsub_s32){ "nmulsub_s32", simd__intrin_nmulsub_s32, 0x0001, ((void*)0)
} ,
9423
9424#endif // fused_sup
9425
9426#if 0
9427SIMD_INTRIN_DEF(sum_s32){ "sum_s32", simd__intrin_sum_s32, 0x0001, ((void*)0) } ,
9428#endif // sum_sup
9429
9430#if 0
9431SIMD_INTRIN_DEF(sumup_s32){ "sumup_s32", simd__intrin_sumup_s32, 0x0001, ((void*)0) } ,
9432#endif // sumup_sup
9433/***************************
9434 * Math
9435 ***************************/
9436#if 0
9437#line 620
9438SIMD_INTRIN_DEF(sqrt_s32){ "sqrt_s32", simd__intrin_sqrt_s32, 0x0001, ((void*)0) } ,
9439
9440#line 620
9441SIMD_INTRIN_DEF(recip_s32){ "recip_s32", simd__intrin_recip_s32, 0x0001, ((void*)0) } ,
9442
9443#line 620
9444SIMD_INTRIN_DEF(abs_s32){ "abs_s32", simd__intrin_abs_s32, 0x0001, ((void*)0) } ,
9445
9446#line 620
9447SIMD_INTRIN_DEF(square_s32){ "square_s32", simd__intrin_square_s32, 0x0001, ((void*)0) }
,
9448
9449#endif
9450
9451#line 627
9452SIMD_INTRIN_DEF(max_s32){ "max_s32", simd__intrin_max_s32, 0x0001, ((void*)0) } ,
9453
9454#line 627
9455SIMD_INTRIN_DEF(min_s32){ "min_s32", simd__intrin_min_s32, 0x0001, ((void*)0) } ,
9456
9457
9458#if 0
9459#line 634
9460SIMD_INTRIN_DEF(maxp_s32){ "maxp_s32", simd__intrin_maxp_s32, 0x0001, ((void*)0) } ,
9461
9462#line 634
9463SIMD_INTRIN_DEF(minp_s32){ "minp_s32", simd__intrin_minp_s32, 0x0001, ((void*)0) } ,
9464
9465#endif
9466
9467/***************************
9468 * Mask operations
9469 ***************************/
9470#line 644
9471 SIMD_INTRIN_DEF(ifadd_s32){ "ifadd_s32", simd__intrin_ifadd_s32, 0x0001, ((void*)0) } ,
9472
9473#line 644
9474 SIMD_INTRIN_DEF(ifsub_s32){ "ifsub_s32", simd__intrin_ifsub_s32, 0x0001, ((void*)0) } ,
9475
9476
9477#endif // simd_sup
9478
9479#line 489
9480#if 1
9481
9482/***************************
9483 * Memory
9484 ***************************/
9485#line 497
9486SIMD_INTRIN_DEF(load_u64){ "load_u64", simd__intrin_load_u64, 0x0001, ((void*)0) } ,
9487
9488#line 497
9489SIMD_INTRIN_DEF(loada_u64){ "loada_u64", simd__intrin_loada_u64, 0x0001, ((void*)0) } ,
9490
9491#line 497
9492SIMD_INTRIN_DEF(loads_u64){ "loads_u64", simd__intrin_loads_u64, 0x0001, ((void*)0) } ,
9493
9494#line 497
9495SIMD_INTRIN_DEF(loadl_u64){ "loadl_u64", simd__intrin_loadl_u64, 0x0001, ((void*)0) } ,
9496
9497#line 497
9498SIMD_INTRIN_DEF(store_u64){ "store_u64", simd__intrin_store_u64, 0x0001, ((void*)0) } ,
9499
9500#line 497
9501SIMD_INTRIN_DEF(storea_u64){ "storea_u64", simd__intrin_storea_u64, 0x0001, ((void*)0) }
,
9502
9503#line 497
9504SIMD_INTRIN_DEF(stores_u64){ "stores_u64", simd__intrin_stores_u64, 0x0001, ((void*)0) }
,
9505
9506#line 497
9507SIMD_INTRIN_DEF(storel_u64){ "storel_u64", simd__intrin_storel_u64, 0x0001, ((void*)0) }
,
9508
9509#line 497
9510SIMD_INTRIN_DEF(storeh_u64){ "storeh_u64", simd__intrin_storeh_u64, 0x0001, ((void*)0) }
,
9511
9512
9513/****************************************
9514 * Non-contiguous/Partial Memory access
9515 ****************************************/
9516#if 1
9517#line 508
9518SIMD_INTRIN_DEF(load_till_u64){ "load_till_u64", simd__intrin_load_till_u64, 0x0001, ((void
*)0) } ,
9519
9520#line 508
9521SIMD_INTRIN_DEF(load_tillz_u64){ "load_tillz_u64", simd__intrin_load_tillz_u64, 0x0001, ((void
*)0) } ,
9522
9523#line 508
9524SIMD_INTRIN_DEF(loadn_u64){ "loadn_u64", simd__intrin_loadn_u64, 0x0001, ((void*)0) } ,
9525
9526#line 508
9527SIMD_INTRIN_DEF(loadn_till_u64){ "loadn_till_u64", simd__intrin_loadn_till_u64, 0x0001, ((void
*)0) } ,
9528
9529#line 508
9530SIMD_INTRIN_DEF(loadn_tillz_u64){ "loadn_tillz_u64", simd__intrin_loadn_tillz_u64, 0x0001, ((
void*)0) } ,
9531
9532#line 508
9533SIMD_INTRIN_DEF(store_till_u64){ "store_till_u64", simd__intrin_store_till_u64, 0x0001, ((void
*)0) } ,
9534
9535#line 508
9536SIMD_INTRIN_DEF(storen_u64){ "storen_u64", simd__intrin_storen_u64, 0x0001, ((void*)0) }
,
9537
9538#line 508
9539SIMD_INTRIN_DEF(storen_till_u64){ "storen_till_u64", simd__intrin_storen_till_u64, 0x0001, ((
void*)0) } ,
9540
9541#endif // ncont_sup
9542
9543/***************************
9544 * Misc
9545 ***************************/
9546#line 519
9547#if 1
9548SIMD_INTRIN_DEF(reinterpret_u8_u64){ "reinterpret_u8_u64", simd__intrin_reinterpret_u8_u64, 0x0001
, ((void*)0) } ,
9549#endif // simd_sup2
9550
9551#line 519
9552#if 1
9553SIMD_INTRIN_DEF(reinterpret_s8_u64){ "reinterpret_s8_u64", simd__intrin_reinterpret_s8_u64, 0x0001
, ((void*)0) } ,
9554#endif // simd_sup2
9555
9556#line 519
9557#if 1
9558SIMD_INTRIN_DEF(reinterpret_u16_u64){ "reinterpret_u16_u64", simd__intrin_reinterpret_u16_u64, 0x0001
, ((void*)0) } ,
9559#endif // simd_sup2
9560
9561#line 519
9562#if 1
9563SIMD_INTRIN_DEF(reinterpret_s16_u64){ "reinterpret_s16_u64", simd__intrin_reinterpret_s16_u64, 0x0001
, ((void*)0) } ,
9564#endif // simd_sup2
9565
9566#line 519
9567#if 1
9568SIMD_INTRIN_DEF(reinterpret_u32_u64){ "reinterpret_u32_u64", simd__intrin_reinterpret_u32_u64, 0x0001
, ((void*)0) } ,
9569#endif // simd_sup2
9570
9571#line 519
9572#if 1
9573SIMD_INTRIN_DEF(reinterpret_s32_u64){ "reinterpret_s32_u64", simd__intrin_reinterpret_s32_u64, 0x0001
, ((void*)0) } ,
9574#endif // simd_sup2
9575
9576#line 519
9577#if 1
9578SIMD_INTRIN_DEF(reinterpret_u64_u64){ "reinterpret_u64_u64", simd__intrin_reinterpret_u64_u64, 0x0001
, ((void*)0) } ,
9579#endif // simd_sup2
9580
9581#line 519
9582#if 1
9583SIMD_INTRIN_DEF(reinterpret_s64_u64){ "reinterpret_s64_u64", simd__intrin_reinterpret_s64_u64, 0x0001
, ((void*)0) } ,
9584#endif // simd_sup2
9585
9586#line 519
9587#if 1
9588SIMD_INTRIN_DEF(reinterpret_f32_u64){ "reinterpret_f32_u64", simd__intrin_reinterpret_f32_u64, 0x0001
, ((void*)0) } ,
9589#endif // simd_sup2
9590
9591#line 519
9592#if NPY_SIMD_F641
9593SIMD_INTRIN_DEF(reinterpret_f64_u64){ "reinterpret_f64_u64", simd__intrin_reinterpret_f64_u64, 0x0001
, ((void*)0) } ,
9594#endif // simd_sup2
9595
9596
9597#line 527
9598SIMD_INTRIN_DEF(set_u64){ "set_u64", simd__intrin_set_u64, 0x0001, ((void*)0) } ,
9599
9600#line 527
9601SIMD_INTRIN_DEF(setf_u64){ "setf_u64", simd__intrin_setf_u64, 0x0001, ((void*)0) } ,
9602
9603#line 527
9604SIMD_INTRIN_DEF(setall_u64){ "setall_u64", simd__intrin_setall_u64, 0x0001, ((void*)0) }
,
9605
9606#line 527
9607SIMD_INTRIN_DEF(zero_u64){ "zero_u64", simd__intrin_zero_u64, 0x0001, ((void*)0) } ,
9608
9609#line 527
9610SIMD_INTRIN_DEF(select_u64){ "select_u64", simd__intrin_select_u64, 0x0001, ((void*)0) }
,
9611
9612
9613/***************************
9614 * Reorder
9615 ***************************/
9616#line 536
9617SIMD_INTRIN_DEF(combinel_u64){ "combinel_u64", simd__intrin_combinel_u64, 0x0001, ((void*)
0) } ,
9618
9619#line 536
9620SIMD_INTRIN_DEF(combineh_u64){ "combineh_u64", simd__intrin_combineh_u64, 0x0001, ((void*)
0) } ,
9621
9622#line 536
9623SIMD_INTRIN_DEF(combine_u64){ "combine_u64", simd__intrin_combine_u64, 0x0001, ((void*)0)
} ,
9624
9625#line 536
9626SIMD_INTRIN_DEF(zip_u64){ "zip_u64", simd__intrin_zip_u64, 0x0001, ((void*)0) } ,
9627
9628
9629#if 0
9630SIMD_INTRIN_DEF(rev64_u64){ "rev64_u64", simd__intrin_rev64_u64, 0x0001, ((void*)0) } ,
9631#endif
9632
9633/***************************
9634 * Operators
9635 ***************************/
9636#if 63 > 0
9637#line 550
9638SIMD_INTRIN_DEF(shl_u64){ "shl_u64", simd__intrin_shl_u64, 0x0001, ((void*)0) } ,
9639
9640#line 550
9641SIMD_INTRIN_DEF(shr_u64){ "shr_u64", simd__intrin_shr_u64, 0x0001, ((void*)0) } ,
9642
9643#line 550
9644SIMD_INTRIN_DEF(shli_u64){ "shli_u64", simd__intrin_shli_u64, 0x0001, ((void*)0) } ,
9645
9646#line 550
9647SIMD_INTRIN_DEF(shri_u64){ "shri_u64", simd__intrin_shri_u64, 0x0001, ((void*)0) } ,
9648
9649#endif // shl_imm
9650
9651#line 557
9652SIMD_INTRIN_DEF(and_u64){ "and_u64", simd__intrin_and_u64, 0x0001, ((void*)0) } ,
9653
9654#line 557
9655SIMD_INTRIN_DEF(or_u64){ "or_u64", simd__intrin_or_u64, 0x0001, ((void*)0) } ,
9656
9657#line 557
9658SIMD_INTRIN_DEF(xor_u64){ "xor_u64", simd__intrin_xor_u64, 0x0001, ((void*)0) } ,
9659
9660#line 557
9661SIMD_INTRIN_DEF(not_u64){ "not_u64", simd__intrin_not_u64, 0x0001, ((void*)0) } ,
9662
9663#line 557
9664SIMD_INTRIN_DEF(cmpeq_u64){ "cmpeq_u64", simd__intrin_cmpeq_u64, 0x0001, ((void*)0) } ,
9665
9666#line 557
9667SIMD_INTRIN_DEF(cmpneq_u64){ "cmpneq_u64", simd__intrin_cmpneq_u64, 0x0001, ((void*)0) }
,
9668
9669#line 557
9670SIMD_INTRIN_DEF(cmpgt_u64){ "cmpgt_u64", simd__intrin_cmpgt_u64, 0x0001, ((void*)0) } ,
9671
9672#line 557
9673SIMD_INTRIN_DEF(cmpge_u64){ "cmpge_u64", simd__intrin_cmpge_u64, 0x0001, ((void*)0) } ,
9674
9675#line 557
9676SIMD_INTRIN_DEF(cmplt_u64){ "cmplt_u64", simd__intrin_cmplt_u64, 0x0001, ((void*)0) } ,
9677
9678#line 557
9679SIMD_INTRIN_DEF(cmple_u64){ "cmple_u64", simd__intrin_cmple_u64, 0x0001, ((void*)0) } ,
9680
9681
9682/***************************
9683 * Conversion
9684 ***************************/
9685SIMD_INTRIN_DEF(cvt_u64_b64){ "cvt_u64_b64", simd__intrin_cvt_u64_b64, 0x0001, ((void*)0)
} ,
9686SIMD_INTRIN_DEF(cvt_b64_u64){ "cvt_b64_u64", simd__intrin_cvt_b64_u64, 0x0001, ((void*)0)
} ,
9687#if 0
9688SIMD_INTRIN_DEF(expand_u64_u64){ "expand_u64_u64", simd__intrin_expand_u64_u64, 0x0001, ((void
*)0) } ,
9689#endif // expand_sup
9690/***************************
9691 * Arithmetic
9692 ***************************/
9693#line 574
9694SIMD_INTRIN_DEF(add_u64){ "add_u64", simd__intrin_add_u64, 0x0001, ((void*)0) } ,
9695
9696#line 574
9697SIMD_INTRIN_DEF(sub_u64){ "sub_u64", simd__intrin_sub_u64, 0x0001, ((void*)0) } ,
9698
9699
9700#if 0
9701#line 581
9702SIMD_INTRIN_DEF(adds_u64){ "adds_u64", simd__intrin_adds_u64, 0x0001, ((void*)0) } ,
9703
9704#line 581
9705SIMD_INTRIN_DEF(subs_u64){ "subs_u64", simd__intrin_subs_u64, 0x0001, ((void*)0) } ,
9706
9707#endif // sat_sup
9708
9709#if 0
9710SIMD_INTRIN_DEF(mul_u64){ "mul_u64", simd__intrin_mul_u64, 0x0001, ((void*)0) } ,
9711#endif // mul_sup
9712
9713#if 0
9714SIMD_INTRIN_DEF(div_u64){ "div_u64", simd__intrin_div_u64, 0x0001, ((void*)0) } ,
9715#endif // div_sup
9716
9717#if 1
9718SIMD_INTRIN_DEF(divisor_u64){ "divisor_u64", simd__intrin_divisor_u64, 0x0001, ((void*)0)
} ,
9719SIMD_INTRIN_DEF(divc_u64){ "divc_u64", simd__intrin_divc_u64, 0x0001, ((void*)0) } ,
9720#endif // intdiv_sup
9721
9722#if 0
9723#line 602
9724SIMD_INTRIN_DEF(muladd_u64){ "muladd_u64", simd__intrin_muladd_u64, 0x0001, ((void*)0) }
,
9725
9726#line 602
9727SIMD_INTRIN_DEF(mulsub_u64){ "mulsub_u64", simd__intrin_mulsub_u64, 0x0001, ((void*)0) }
,
9728
9729#line 602
9730SIMD_INTRIN_DEF(nmuladd_u64){ "nmuladd_u64", simd__intrin_nmuladd_u64, 0x0001, ((void*)0)
} ,
9731
9732#line 602
9733SIMD_INTRIN_DEF(nmulsub_u64){ "nmulsub_u64", simd__intrin_nmulsub_u64, 0x0001, ((void*)0)
} ,
9734
9735#endif // fused_sup
9736
9737#if 1
9738SIMD_INTRIN_DEF(sum_u64){ "sum_u64", simd__intrin_sum_u64, 0x0001, ((void*)0) } ,
9739#endif // sum_sup
9740
9741#if 0
9742SIMD_INTRIN_DEF(sumup_u64){ "sumup_u64", simd__intrin_sumup_u64, 0x0001, ((void*)0) } ,
9743#endif // sumup_sup
9744/***************************
9745 * Math
9746 ***************************/
9747#if 0
9748#line 620
9749SIMD_INTRIN_DEF(sqrt_u64){ "sqrt_u64", simd__intrin_sqrt_u64, 0x0001, ((void*)0) } ,
9750
9751#line 620
9752SIMD_INTRIN_DEF(recip_u64){ "recip_u64", simd__intrin_recip_u64, 0x0001, ((void*)0) } ,
9753
9754#line 620
9755SIMD_INTRIN_DEF(abs_u64){ "abs_u64", simd__intrin_abs_u64, 0x0001, ((void*)0) } ,
9756
9757#line 620
9758SIMD_INTRIN_DEF(square_u64){ "square_u64", simd__intrin_square_u64, 0x0001, ((void*)0) }
,
9759
9760#endif
9761
9762#line 627
9763SIMD_INTRIN_DEF(max_u64){ "max_u64", simd__intrin_max_u64, 0x0001, ((void*)0) } ,
9764
9765#line 627
9766SIMD_INTRIN_DEF(min_u64){ "min_u64", simd__intrin_min_u64, 0x0001, ((void*)0) } ,
9767
9768
9769#if 0
9770#line 634
9771SIMD_INTRIN_DEF(maxp_u64){ "maxp_u64", simd__intrin_maxp_u64, 0x0001, ((void*)0) } ,
9772
9773#line 634
9774SIMD_INTRIN_DEF(minp_u64){ "minp_u64", simd__intrin_minp_u64, 0x0001, ((void*)0) } ,
9775
9776#endif
9777
9778/***************************
9779 * Mask operations
9780 ***************************/
9781#line 644
9782 SIMD_INTRIN_DEF(ifadd_u64){ "ifadd_u64", simd__intrin_ifadd_u64, 0x0001, ((void*)0) } ,
9783
9784#line 644
9785 SIMD_INTRIN_DEF(ifsub_u64){ "ifsub_u64", simd__intrin_ifsub_u64, 0x0001, ((void*)0) } ,
9786
9787
9788#endif // simd_sup
9789
9790#line 489
9791#if 1
9792
9793/***************************
9794 * Memory
9795 ***************************/
9796#line 497
9797SIMD_INTRIN_DEF(load_s64){ "load_s64", simd__intrin_load_s64, 0x0001, ((void*)0) } ,
9798
9799#line 497
9800SIMD_INTRIN_DEF(loada_s64){ "loada_s64", simd__intrin_loada_s64, 0x0001, ((void*)0) } ,
9801
9802#line 497
9803SIMD_INTRIN_DEF(loads_s64){ "loads_s64", simd__intrin_loads_s64, 0x0001, ((void*)0) } ,
9804
9805#line 497
9806SIMD_INTRIN_DEF(loadl_s64){ "loadl_s64", simd__intrin_loadl_s64, 0x0001, ((void*)0) } ,
9807
9808#line 497
9809SIMD_INTRIN_DEF(store_s64){ "store_s64", simd__intrin_store_s64, 0x0001, ((void*)0) } ,
9810
9811#line 497
9812SIMD_INTRIN_DEF(storea_s64){ "storea_s64", simd__intrin_storea_s64, 0x0001, ((void*)0) }
,
9813
9814#line 497
9815SIMD_INTRIN_DEF(stores_s64){ "stores_s64", simd__intrin_stores_s64, 0x0001, ((void*)0) }
,
9816
9817#line 497
9818SIMD_INTRIN_DEF(storel_s64){ "storel_s64", simd__intrin_storel_s64, 0x0001, ((void*)0) }
,
9819
9820#line 497
9821SIMD_INTRIN_DEF(storeh_s64){ "storeh_s64", simd__intrin_storeh_s64, 0x0001, ((void*)0) }
,
9822
9823
9824/****************************************
9825 * Non-contiguous/Partial Memory access
9826 ****************************************/
9827#if 1
9828#line 508
9829SIMD_INTRIN_DEF(load_till_s64){ "load_till_s64", simd__intrin_load_till_s64, 0x0001, ((void
*)0) } ,
9830
9831#line 508
9832SIMD_INTRIN_DEF(load_tillz_s64){ "load_tillz_s64", simd__intrin_load_tillz_s64, 0x0001, ((void
*)0) } ,
9833
9834#line 508
9835SIMD_INTRIN_DEF(loadn_s64){ "loadn_s64", simd__intrin_loadn_s64, 0x0001, ((void*)0) } ,
9836
9837#line 508
9838SIMD_INTRIN_DEF(loadn_till_s64){ "loadn_till_s64", simd__intrin_loadn_till_s64, 0x0001, ((void
*)0) } ,
9839
9840#line 508
9841SIMD_INTRIN_DEF(loadn_tillz_s64){ "loadn_tillz_s64", simd__intrin_loadn_tillz_s64, 0x0001, ((
void*)0) } ,
9842
9843#line 508
9844SIMD_INTRIN_DEF(store_till_s64){ "store_till_s64", simd__intrin_store_till_s64, 0x0001, ((void
*)0) } ,
9845
9846#line 508
9847SIMD_INTRIN_DEF(storen_s64){ "storen_s64", simd__intrin_storen_s64, 0x0001, ((void*)0) }
,
9848
9849#line 508
9850SIMD_INTRIN_DEF(storen_till_s64){ "storen_till_s64", simd__intrin_storen_till_s64, 0x0001, ((
void*)0) } ,
9851
9852#endif // ncont_sup
9853
9854/***************************
9855 * Misc
9856 ***************************/
9857#line 519
9858#if 1
9859SIMD_INTRIN_DEF(reinterpret_u8_s64){ "reinterpret_u8_s64", simd__intrin_reinterpret_u8_s64, 0x0001
, ((void*)0) } ,
9860#endif // simd_sup2
9861
9862#line 519
9863#if 1
9864SIMD_INTRIN_DEF(reinterpret_s8_s64){ "reinterpret_s8_s64", simd__intrin_reinterpret_s8_s64, 0x0001
, ((void*)0) } ,
9865#endif // simd_sup2
9866
9867#line 519
9868#if 1
9869SIMD_INTRIN_DEF(reinterpret_u16_s64){ "reinterpret_u16_s64", simd__intrin_reinterpret_u16_s64, 0x0001
, ((void*)0) } ,
9870#endif // simd_sup2
9871
9872#line 519
9873#if 1
9874SIMD_INTRIN_DEF(reinterpret_s16_s64){ "reinterpret_s16_s64", simd__intrin_reinterpret_s16_s64, 0x0001
, ((void*)0) } ,
9875#endif // simd_sup2
9876
9877#line 519
9878#if 1
9879SIMD_INTRIN_DEF(reinterpret_u32_s64){ "reinterpret_u32_s64", simd__intrin_reinterpret_u32_s64, 0x0001
, ((void*)0) } ,
9880#endif // simd_sup2
9881
9882#line 519
9883#if 1
9884SIMD_INTRIN_DEF(reinterpret_s32_s64){ "reinterpret_s32_s64", simd__intrin_reinterpret_s32_s64, 0x0001
, ((void*)0) } ,
9885#endif // simd_sup2
9886
9887#line 519
9888#if 1
9889SIMD_INTRIN_DEF(reinterpret_u64_s64){ "reinterpret_u64_s64", simd__intrin_reinterpret_u64_s64, 0x0001
, ((void*)0) } ,
9890#endif // simd_sup2
9891
9892#line 519
9893#if 1
9894SIMD_INTRIN_DEF(reinterpret_s64_s64){ "reinterpret_s64_s64", simd__intrin_reinterpret_s64_s64, 0x0001
, ((void*)0) } ,
9895#endif // simd_sup2
9896
9897#line 519
9898#if 1
9899SIMD_INTRIN_DEF(reinterpret_f32_s64){ "reinterpret_f32_s64", simd__intrin_reinterpret_f32_s64, 0x0001
, ((void*)0) } ,
9900#endif // simd_sup2
9901
9902#line 519
9903#if NPY_SIMD_F641
9904SIMD_INTRIN_DEF(reinterpret_f64_s64){ "reinterpret_f64_s64", simd__intrin_reinterpret_f64_s64, 0x0001
, ((void*)0) } ,
9905#endif // simd_sup2
9906
9907
9908#line 527
9909SIMD_INTRIN_DEF(set_s64){ "set_s64", simd__intrin_set_s64, 0x0001, ((void*)0) } ,
9910
9911#line 527
9912SIMD_INTRIN_DEF(setf_s64){ "setf_s64", simd__intrin_setf_s64, 0x0001, ((void*)0) } ,
9913
9914#line 527
9915SIMD_INTRIN_DEF(setall_s64){ "setall_s64", simd__intrin_setall_s64, 0x0001, ((void*)0) }
,
9916
9917#line 527
9918SIMD_INTRIN_DEF(zero_s64){ "zero_s64", simd__intrin_zero_s64, 0x0001, ((void*)0) } ,
9919
9920#line 527
9921SIMD_INTRIN_DEF(select_s64){ "select_s64", simd__intrin_select_s64, 0x0001, ((void*)0) }
,
9922
9923
9924/***************************
9925 * Reorder
9926 ***************************/
9927#line 536
9928SIMD_INTRIN_DEF(combinel_s64){ "combinel_s64", simd__intrin_combinel_s64, 0x0001, ((void*)
0) } ,
9929
9930#line 536
9931SIMD_INTRIN_DEF(combineh_s64){ "combineh_s64", simd__intrin_combineh_s64, 0x0001, ((void*)
0) } ,
9932
9933#line 536
9934SIMD_INTRIN_DEF(combine_s64){ "combine_s64", simd__intrin_combine_s64, 0x0001, ((void*)0)
} ,
9935
9936#line 536
9937SIMD_INTRIN_DEF(zip_s64){ "zip_s64", simd__intrin_zip_s64, 0x0001, ((void*)0) } ,
9938
9939
9940#if 0
9941SIMD_INTRIN_DEF(rev64_s64){ "rev64_s64", simd__intrin_rev64_s64, 0x0001, ((void*)0) } ,
9942#endif
9943
9944/***************************
9945 * Operators
9946 ***************************/
9947#if 63 > 0
9948#line 550
9949SIMD_INTRIN_DEF(shl_s64){ "shl_s64", simd__intrin_shl_s64, 0x0001, ((void*)0) } ,
9950
9951#line 550
9952SIMD_INTRIN_DEF(shr_s64){ "shr_s64", simd__intrin_shr_s64, 0x0001, ((void*)0) } ,
9953
9954#line 550
9955SIMD_INTRIN_DEF(shli_s64){ "shli_s64", simd__intrin_shli_s64, 0x0001, ((void*)0) } ,
9956
9957#line 550
9958SIMD_INTRIN_DEF(shri_s64){ "shri_s64", simd__intrin_shri_s64, 0x0001, ((void*)0) } ,
9959
9960#endif // shl_imm
9961
9962#line 557
9963SIMD_INTRIN_DEF(and_s64){ "and_s64", simd__intrin_and_s64, 0x0001, ((void*)0) } ,
9964
9965#line 557
9966SIMD_INTRIN_DEF(or_s64){ "or_s64", simd__intrin_or_s64, 0x0001, ((void*)0) } ,
9967
9968#line 557
9969SIMD_INTRIN_DEF(xor_s64){ "xor_s64", simd__intrin_xor_s64, 0x0001, ((void*)0) } ,
9970
9971#line 557
9972SIMD_INTRIN_DEF(not_s64){ "not_s64", simd__intrin_not_s64, 0x0001, ((void*)0) } ,
9973
9974#line 557
9975SIMD_INTRIN_DEF(cmpeq_s64){ "cmpeq_s64", simd__intrin_cmpeq_s64, 0x0001, ((void*)0) } ,
9976
9977#line 557
9978SIMD_INTRIN_DEF(cmpneq_s64){ "cmpneq_s64", simd__intrin_cmpneq_s64, 0x0001, ((void*)0) }
,
9979
9980#line 557
9981SIMD_INTRIN_DEF(cmpgt_s64){ "cmpgt_s64", simd__intrin_cmpgt_s64, 0x0001, ((void*)0) } ,
9982
9983#line 557
9984SIMD_INTRIN_DEF(cmpge_s64){ "cmpge_s64", simd__intrin_cmpge_s64, 0x0001, ((void*)0) } ,
9985
9986#line 557
9987SIMD_INTRIN_DEF(cmplt_s64){ "cmplt_s64", simd__intrin_cmplt_s64, 0x0001, ((void*)0) } ,
9988
9989#line 557
9990SIMD_INTRIN_DEF(cmple_s64){ "cmple_s64", simd__intrin_cmple_s64, 0x0001, ((void*)0) } ,
9991
9992
9993/***************************
9994 * Conversion
9995 ***************************/
9996SIMD_INTRIN_DEF(cvt_s64_b64){ "cvt_s64_b64", simd__intrin_cvt_s64_b64, 0x0001, ((void*)0)
} ,
9997SIMD_INTRIN_DEF(cvt_b64_s64){ "cvt_b64_s64", simd__intrin_cvt_b64_s64, 0x0001, ((void*)0)
} ,
9998#if 0
9999SIMD_INTRIN_DEF(expand_s64_s64){ "expand_s64_s64", simd__intrin_expand_s64_s64, 0x0001, ((void
*)0) } ,
10000#endif // expand_sup
10001/***************************
10002 * Arithmetic
10003 ***************************/
10004#line 574
10005SIMD_INTRIN_DEF(add_s64){ "add_s64", simd__intrin_add_s64, 0x0001, ((void*)0) } ,
10006
10007#line 574
10008SIMD_INTRIN_DEF(sub_s64){ "sub_s64", simd__intrin_sub_s64, 0x0001, ((void*)0) } ,
10009
10010
10011#if 0
10012#line 581
10013SIMD_INTRIN_DEF(adds_s64){ "adds_s64", simd__intrin_adds_s64, 0x0001, ((void*)0) } ,
10014
10015#line 581
10016SIMD_INTRIN_DEF(subs_s64){ "subs_s64", simd__intrin_subs_s64, 0x0001, ((void*)0) } ,
10017
10018#endif // sat_sup
10019
10020#if 0
10021SIMD_INTRIN_DEF(mul_s64){ "mul_s64", simd__intrin_mul_s64, 0x0001, ((void*)0) } ,
10022#endif // mul_sup
10023
10024#if 0
10025SIMD_INTRIN_DEF(div_s64){ "div_s64", simd__intrin_div_s64, 0x0001, ((void*)0) } ,
10026#endif // div_sup
10027
10028#if 1
10029SIMD_INTRIN_DEF(divisor_s64){ "divisor_s64", simd__intrin_divisor_s64, 0x0001, ((void*)0)
} ,
10030SIMD_INTRIN_DEF(divc_s64){ "divc_s64", simd__intrin_divc_s64, 0x0001, ((void*)0) } ,
10031#endif // intdiv_sup
10032
10033#if 0
10034#line 602
10035SIMD_INTRIN_DEF(muladd_s64){ "muladd_s64", simd__intrin_muladd_s64, 0x0001, ((void*)0) }
,
10036
10037#line 602
10038SIMD_INTRIN_DEF(mulsub_s64){ "mulsub_s64", simd__intrin_mulsub_s64, 0x0001, ((void*)0) }
,
10039
10040#line 602
10041SIMD_INTRIN_DEF(nmuladd_s64){ "nmuladd_s64", simd__intrin_nmuladd_s64, 0x0001, ((void*)0)
} ,
10042
10043#line 602
10044SIMD_INTRIN_DEF(nmulsub_s64){ "nmulsub_s64", simd__intrin_nmulsub_s64, 0x0001, ((void*)0)
} ,
10045
10046#endif // fused_sup
10047
10048#if 0
10049SIMD_INTRIN_DEF(sum_s64){ "sum_s64", simd__intrin_sum_s64, 0x0001, ((void*)0) } ,
10050#endif // sum_sup
10051
10052#if 0
10053SIMD_INTRIN_DEF(sumup_s64){ "sumup_s64", simd__intrin_sumup_s64, 0x0001, ((void*)0) } ,
10054#endif // sumup_sup
10055/***************************
10056 * Math
10057 ***************************/
10058#if 0
10059#line 620
10060SIMD_INTRIN_DEF(sqrt_s64){ "sqrt_s64", simd__intrin_sqrt_s64, 0x0001, ((void*)0) } ,
10061
10062#line 620
10063SIMD_INTRIN_DEF(recip_s64){ "recip_s64", simd__intrin_recip_s64, 0x0001, ((void*)0) } ,
10064
10065#line 620
10066SIMD_INTRIN_DEF(abs_s64){ "abs_s64", simd__intrin_abs_s64, 0x0001, ((void*)0) } ,
10067
10068#line 620
10069SIMD_INTRIN_DEF(square_s64){ "square_s64", simd__intrin_square_s64, 0x0001, ((void*)0) }
,
10070
10071#endif
10072
10073#line 627
10074SIMD_INTRIN_DEF(max_s64){ "max_s64", simd__intrin_max_s64, 0x0001, ((void*)0) } ,
10075
10076#line 627
10077SIMD_INTRIN_DEF(min_s64){ "min_s64", simd__intrin_min_s64, 0x0001, ((void*)0) } ,
10078
10079
10080#if 0
10081#line 634
10082SIMD_INTRIN_DEF(maxp_s64){ "maxp_s64", simd__intrin_maxp_s64, 0x0001, ((void*)0) } ,
10083
10084#line 634
10085SIMD_INTRIN_DEF(minp_s64){ "minp_s64", simd__intrin_minp_s64, 0x0001, ((void*)0) } ,
10086
10087#endif
10088
10089/***************************
10090 * Mask operations
10091 ***************************/
10092#line 644
10093 SIMD_INTRIN_DEF(ifadd_s64){ "ifadd_s64", simd__intrin_ifadd_s64, 0x0001, ((void*)0) } ,
10094
10095#line 644
10096 SIMD_INTRIN_DEF(ifsub_s64){ "ifsub_s64", simd__intrin_ifsub_s64, 0x0001, ((void*)0) } ,
10097
10098
10099#endif // simd_sup
10100
10101#line 489
10102#if 1
10103
10104/***************************
10105 * Memory
10106 ***************************/
10107#line 497
10108SIMD_INTRIN_DEF(load_f32){ "load_f32", simd__intrin_load_f32, 0x0001, ((void*)0) } ,
10109
10110#line 497
10111SIMD_INTRIN_DEF(loada_f32){ "loada_f32", simd__intrin_loada_f32, 0x0001, ((void*)0) } ,
10112
10113#line 497
10114SIMD_INTRIN_DEF(loads_f32){ "loads_f32", simd__intrin_loads_f32, 0x0001, ((void*)0) } ,
10115
10116#line 497
10117SIMD_INTRIN_DEF(loadl_f32){ "loadl_f32", simd__intrin_loadl_f32, 0x0001, ((void*)0) } ,
10118
10119#line 497
10120SIMD_INTRIN_DEF(store_f32){ "store_f32", simd__intrin_store_f32, 0x0001, ((void*)0) } ,
10121
10122#line 497
10123SIMD_INTRIN_DEF(storea_f32){ "storea_f32", simd__intrin_storea_f32, 0x0001, ((void*)0) }
,
10124
10125#line 497
10126SIMD_INTRIN_DEF(stores_f32){ "stores_f32", simd__intrin_stores_f32, 0x0001, ((void*)0) }
,
10127
10128#line 497
10129SIMD_INTRIN_DEF(storel_f32){ "storel_f32", simd__intrin_storel_f32, 0x0001, ((void*)0) }
,
10130
10131#line 497
10132SIMD_INTRIN_DEF(storeh_f32){ "storeh_f32", simd__intrin_storeh_f32, 0x0001, ((void*)0) }
,
10133
10134
10135/****************************************
10136 * Non-contiguous/Partial Memory access
10137 ****************************************/
10138#if 1
10139#line 508
10140SIMD_INTRIN_DEF(load_till_f32){ "load_till_f32", simd__intrin_load_till_f32, 0x0001, ((void
*)0) } ,
10141
10142#line 508
10143SIMD_INTRIN_DEF(load_tillz_f32){ "load_tillz_f32", simd__intrin_load_tillz_f32, 0x0001, ((void
*)0) } ,
10144
10145#line 508
10146SIMD_INTRIN_DEF(loadn_f32){ "loadn_f32", simd__intrin_loadn_f32, 0x0001, ((void*)0) } ,
10147
10148#line 508
10149SIMD_INTRIN_DEF(loadn_till_f32){ "loadn_till_f32", simd__intrin_loadn_till_f32, 0x0001, ((void
*)0) } ,
10150
10151#line 508
10152SIMD_INTRIN_DEF(loadn_tillz_f32){ "loadn_tillz_f32", simd__intrin_loadn_tillz_f32, 0x0001, ((
void*)0) } ,
10153
10154#line 508
10155SIMD_INTRIN_DEF(store_till_f32){ "store_till_f32", simd__intrin_store_till_f32, 0x0001, ((void
*)0) } ,
10156
10157#line 508
10158SIMD_INTRIN_DEF(storen_f32){ "storen_f32", simd__intrin_storen_f32, 0x0001, ((void*)0) }
,
10159
10160#line 508
10161SIMD_INTRIN_DEF(storen_till_f32){ "storen_till_f32", simd__intrin_storen_till_f32, 0x0001, ((
void*)0) } ,
10162
10163#endif // ncont_sup
10164
10165/***************************
10166 * Misc
10167 ***************************/
10168#line 519
10169#if 1
10170SIMD_INTRIN_DEF(reinterpret_u8_f32){ "reinterpret_u8_f32", simd__intrin_reinterpret_u8_f32, 0x0001
, ((void*)0) } ,
10171#endif // simd_sup2
10172
10173#line 519
10174#if 1
10175SIMD_INTRIN_DEF(reinterpret_s8_f32){ "reinterpret_s8_f32", simd__intrin_reinterpret_s8_f32, 0x0001
, ((void*)0) } ,
10176#endif // simd_sup2
10177
10178#line 519
10179#if 1
10180SIMD_INTRIN_DEF(reinterpret_u16_f32){ "reinterpret_u16_f32", simd__intrin_reinterpret_u16_f32, 0x0001
, ((void*)0) } ,
10181#endif // simd_sup2
10182
10183#line 519
10184#if 1
10185SIMD_INTRIN_DEF(reinterpret_s16_f32){ "reinterpret_s16_f32", simd__intrin_reinterpret_s16_f32, 0x0001
, ((void*)0) } ,
10186#endif // simd_sup2
10187
10188#line 519
10189#if 1
10190SIMD_INTRIN_DEF(reinterpret_u32_f32){ "reinterpret_u32_f32", simd__intrin_reinterpret_u32_f32, 0x0001
, ((void*)0) } ,
10191#endif // simd_sup2
10192
10193#line 519
10194#if 1
10195SIMD_INTRIN_DEF(reinterpret_s32_f32){ "reinterpret_s32_f32", simd__intrin_reinterpret_s32_f32, 0x0001
, ((void*)0) } ,
10196#endif // simd_sup2
10197
10198#line 519
10199#if 1
10200SIMD_INTRIN_DEF(reinterpret_u64_f32){ "reinterpret_u64_f32", simd__intrin_reinterpret_u64_f32, 0x0001
, ((void*)0) } ,
10201#endif // simd_sup2
10202
10203#line 519
10204#if 1
10205SIMD_INTRIN_DEF(reinterpret_s64_f32){ "reinterpret_s64_f32", simd__intrin_reinterpret_s64_f32, 0x0001
, ((void*)0) } ,
10206#endif // simd_sup2
10207
10208#line 519
10209#if 1
10210SIMD_INTRIN_DEF(reinterpret_f32_f32){ "reinterpret_f32_f32", simd__intrin_reinterpret_f32_f32, 0x0001
, ((void*)0) } ,
10211#endif // simd_sup2
10212
10213#line 519
10214#if NPY_SIMD_F641
10215SIMD_INTRIN_DEF(reinterpret_f64_f32){ "reinterpret_f64_f32", simd__intrin_reinterpret_f64_f32, 0x0001
, ((void*)0) } ,
10216#endif // simd_sup2
10217
10218
10219#line 527
10220SIMD_INTRIN_DEF(set_f32){ "set_f32", simd__intrin_set_f32, 0x0001, ((void*)0) } ,
10221
10222#line 527
10223SIMD_INTRIN_DEF(setf_f32){ "setf_f32", simd__intrin_setf_f32, 0x0001, ((void*)0) } ,
10224
10225#line 527
10226SIMD_INTRIN_DEF(setall_f32){ "setall_f32", simd__intrin_setall_f32, 0x0001, ((void*)0) }
,
10227
10228#line 527
10229SIMD_INTRIN_DEF(zero_f32){ "zero_f32", simd__intrin_zero_f32, 0x0001, ((void*)0) } ,
10230
10231#line 527
10232SIMD_INTRIN_DEF(select_f32){ "select_f32", simd__intrin_select_f32, 0x0001, ((void*)0) }
,
10233
10234
10235/***************************
10236 * Reorder
10237 ***************************/
10238#line 536
10239SIMD_INTRIN_DEF(combinel_f32){ "combinel_f32", simd__intrin_combinel_f32, 0x0001, ((void*)
0) } ,
10240
10241#line 536
10242SIMD_INTRIN_DEF(combineh_f32){ "combineh_f32", simd__intrin_combineh_f32, 0x0001, ((void*)
0) } ,
10243
10244#line 536
10245SIMD_INTRIN_DEF(combine_f32){ "combine_f32", simd__intrin_combine_f32, 0x0001, ((void*)0)
} ,
10246
10247#line 536
10248SIMD_INTRIN_DEF(zip_f32){ "zip_f32", simd__intrin_zip_f32, 0x0001, ((void*)0) } ,
10249
10250
10251#if 1
10252SIMD_INTRIN_DEF(rev64_f32){ "rev64_f32", simd__intrin_rev64_f32, 0x0001, ((void*)0) } ,
10253#endif
10254
10255/***************************
10256 * Operators
10257 ***************************/
10258#if 0 > 0
10259#line 550
10260SIMD_INTRIN_DEF(shl_f32){ "shl_f32", simd__intrin_shl_f32, 0x0001, ((void*)0) } ,
10261
10262#line 550
10263SIMD_INTRIN_DEF(shr_f32){ "shr_f32", simd__intrin_shr_f32, 0x0001, ((void*)0) } ,
10264
10265#line 550
10266SIMD_INTRIN_DEF(shli_f32){ "shli_f32", simd__intrin_shli_f32, 0x0001, ((void*)0) } ,
10267
10268#line 550
10269SIMD_INTRIN_DEF(shri_f32){ "shri_f32", simd__intrin_shri_f32, 0x0001, ((void*)0) } ,
10270
10271#endif // shl_imm
10272
10273#line 557
10274SIMD_INTRIN_DEF(and_f32){ "and_f32", simd__intrin_and_f32, 0x0001, ((void*)0) } ,
10275
10276#line 557
10277SIMD_INTRIN_DEF(or_f32){ "or_f32", simd__intrin_or_f32, 0x0001, ((void*)0) } ,
10278
10279#line 557
10280SIMD_INTRIN_DEF(xor_f32){ "xor_f32", simd__intrin_xor_f32, 0x0001, ((void*)0) } ,
10281
10282#line 557
10283SIMD_INTRIN_DEF(not_f32){ "not_f32", simd__intrin_not_f32, 0x0001, ((void*)0) } ,
10284
10285#line 557
10286SIMD_INTRIN_DEF(cmpeq_f32){ "cmpeq_f32", simd__intrin_cmpeq_f32, 0x0001, ((void*)0) } ,
10287
10288#line 557
10289SIMD_INTRIN_DEF(cmpneq_f32){ "cmpneq_f32", simd__intrin_cmpneq_f32, 0x0001, ((void*)0) }
,
10290
10291#line 557
10292SIMD_INTRIN_DEF(cmpgt_f32){ "cmpgt_f32", simd__intrin_cmpgt_f32, 0x0001, ((void*)0) } ,
10293
10294#line 557
10295SIMD_INTRIN_DEF(cmpge_f32){ "cmpge_f32", simd__intrin_cmpge_f32, 0x0001, ((void*)0) } ,
10296
10297#line 557
10298SIMD_INTRIN_DEF(cmplt_f32){ "cmplt_f32", simd__intrin_cmplt_f32, 0x0001, ((void*)0) } ,
10299
10300#line 557
10301SIMD_INTRIN_DEF(cmple_f32){ "cmple_f32", simd__intrin_cmple_f32, 0x0001, ((void*)0) } ,
10302
10303
10304/***************************
10305 * Conversion
10306 ***************************/
10307SIMD_INTRIN_DEF(cvt_f32_b32){ "cvt_f32_b32", simd__intrin_cvt_f32_b32, 0x0001, ((void*)0)
} ,
10308SIMD_INTRIN_DEF(cvt_b32_f32){ "cvt_b32_f32", simd__intrin_cvt_b32_f32, 0x0001, ((void*)0)
} ,
10309#if 0
10310SIMD_INTRIN_DEF(expand_f32_f32){ "expand_f32_f32", simd__intrin_expand_f32_f32, 0x0001, ((void
*)0) } ,
10311#endif // expand_sup
10312/***************************
10313 * Arithmetic
10314 ***************************/
10315#line 574
10316SIMD_INTRIN_DEF(add_f32){ "add_f32", simd__intrin_add_f32, 0x0001, ((void*)0) } ,
10317
10318#line 574
10319SIMD_INTRIN_DEF(sub_f32){ "sub_f32", simd__intrin_sub_f32, 0x0001, ((void*)0) } ,
10320
10321
10322#if 0
10323#line 581
10324SIMD_INTRIN_DEF(adds_f32){ "adds_f32", simd__intrin_adds_f32, 0x0001, ((void*)0) } ,
10325
10326#line 581
10327SIMD_INTRIN_DEF(subs_f32){ "subs_f32", simd__intrin_subs_f32, 0x0001, ((void*)0) } ,
10328
10329#endif // sat_sup
10330
10331#if 1
10332SIMD_INTRIN_DEF(mul_f32){ "mul_f32", simd__intrin_mul_f32, 0x0001, ((void*)0) } ,
10333#endif // mul_sup
10334
10335#if 1
10336SIMD_INTRIN_DEF(div_f32){ "div_f32", simd__intrin_div_f32, 0x0001, ((void*)0) } ,
10337#endif // div_sup
10338
10339#if 0
10340SIMD_INTRIN_DEF(divisor_f32){ "divisor_f32", simd__intrin_divisor_f32, 0x0001, ((void*)0)
} ,
10341SIMD_INTRIN_DEF(divc_f32){ "divc_f32", simd__intrin_divc_f32, 0x0001, ((void*)0) } ,
10342#endif // intdiv_sup
10343
10344#if 1
10345#line 602
10346SIMD_INTRIN_DEF(muladd_f32){ "muladd_f32", simd__intrin_muladd_f32, 0x0001, ((void*)0) }
,
10347
10348#line 602
10349SIMD_INTRIN_DEF(mulsub_f32){ "mulsub_f32", simd__intrin_mulsub_f32, 0x0001, ((void*)0) }
,
10350
10351#line 602
10352SIMD_INTRIN_DEF(nmuladd_f32){ "nmuladd_f32", simd__intrin_nmuladd_f32, 0x0001, ((void*)0)
} ,
10353
10354#line 602
10355SIMD_INTRIN_DEF(nmulsub_f32){ "nmulsub_f32", simd__intrin_nmulsub_f32, 0x0001, ((void*)0)
} ,
10356
10357#endif // fused_sup
10358
10359#if 1
10360SIMD_INTRIN_DEF(sum_f32){ "sum_f32", simd__intrin_sum_f32, 0x0001, ((void*)0) } ,
10361#endif // sum_sup
10362
10363#if 0
10364SIMD_INTRIN_DEF(sumup_f32){ "sumup_f32", simd__intrin_sumup_f32, 0x0001, ((void*)0) } ,
10365#endif // sumup_sup
10366/***************************
10367 * Math
10368 ***************************/
10369#if 1
10370#line 620
10371SIMD_INTRIN_DEF(sqrt_f32){ "sqrt_f32", simd__intrin_sqrt_f32, 0x0001, ((void*)0) } ,
10372
10373#line 620
10374SIMD_INTRIN_DEF(recip_f32){ "recip_f32", simd__intrin_recip_f32, 0x0001, ((void*)0) } ,
10375
10376#line 620
10377SIMD_INTRIN_DEF(abs_f32){ "abs_f32", simd__intrin_abs_f32, 0x0001, ((void*)0) } ,
10378
10379#line 620
10380SIMD_INTRIN_DEF(square_f32){ "square_f32", simd__intrin_square_f32, 0x0001, ((void*)0) }
,
10381
10382#endif
10383
10384#line 627
10385SIMD_INTRIN_DEF(max_f32){ "max_f32", simd__intrin_max_f32, 0x0001, ((void*)0) } ,
10386
10387#line 627
10388SIMD_INTRIN_DEF(min_f32){ "min_f32", simd__intrin_min_f32, 0x0001, ((void*)0) } ,
10389
10390
10391#if 1
10392#line 634
10393SIMD_INTRIN_DEF(maxp_f32){ "maxp_f32", simd__intrin_maxp_f32, 0x0001, ((void*)0) } ,
10394
10395#line 634
10396SIMD_INTRIN_DEF(minp_f32){ "minp_f32", simd__intrin_minp_f32, 0x0001, ((void*)0) } ,
10397
10398#endif
10399
10400/***************************
10401 * Mask operations
10402 ***************************/
10403#line 644
10404 SIMD_INTRIN_DEF(ifadd_f32){ "ifadd_f32", simd__intrin_ifadd_f32, 0x0001, ((void*)0) } ,
10405
10406#line 644
10407 SIMD_INTRIN_DEF(ifsub_f32){ "ifsub_f32", simd__intrin_ifsub_f32, 0x0001, ((void*)0) } ,
10408
10409
10410#endif // simd_sup
10411
10412#line 489
10413#if NPY_SIMD_F641
10414
10415/***************************
10416 * Memory
10417 ***************************/
10418#line 497
10419SIMD_INTRIN_DEF(load_f64){ "load_f64", simd__intrin_load_f64, 0x0001, ((void*)0) } ,
10420
10421#line 497
10422SIMD_INTRIN_DEF(loada_f64){ "loada_f64", simd__intrin_loada_f64, 0x0001, ((void*)0) } ,
10423
10424#line 497
10425SIMD_INTRIN_DEF(loads_f64){ "loads_f64", simd__intrin_loads_f64, 0x0001, ((void*)0) } ,
10426
10427#line 497
10428SIMD_INTRIN_DEF(loadl_f64){ "loadl_f64", simd__intrin_loadl_f64, 0x0001, ((void*)0) } ,
10429
10430#line 497
10431SIMD_INTRIN_DEF(store_f64){ "store_f64", simd__intrin_store_f64, 0x0001, ((void*)0) } ,
10432
10433#line 497
10434SIMD_INTRIN_DEF(storea_f64){ "storea_f64", simd__intrin_storea_f64, 0x0001, ((void*)0) }
,
10435
10436#line 497
10437SIMD_INTRIN_DEF(stores_f64){ "stores_f64", simd__intrin_stores_f64, 0x0001, ((void*)0) }
,
10438
10439#line 497
10440SIMD_INTRIN_DEF(storel_f64){ "storel_f64", simd__intrin_storel_f64, 0x0001, ((void*)0) }
,
10441
10442#line 497
10443SIMD_INTRIN_DEF(storeh_f64){ "storeh_f64", simd__intrin_storeh_f64, 0x0001, ((void*)0) }
,
10444
10445
10446/****************************************
10447 * Non-contiguous/Partial Memory access
10448 ****************************************/
10449#if 1
10450#line 508
10451SIMD_INTRIN_DEF(load_till_f64){ "load_till_f64", simd__intrin_load_till_f64, 0x0001, ((void
*)0) } ,
10452
10453#line 508
10454SIMD_INTRIN_DEF(load_tillz_f64){ "load_tillz_f64", simd__intrin_load_tillz_f64, 0x0001, ((void
*)0) } ,
10455
10456#line 508
10457SIMD_INTRIN_DEF(loadn_f64){ "loadn_f64", simd__intrin_loadn_f64, 0x0001, ((void*)0) } ,
10458
10459#line 508
10460SIMD_INTRIN_DEF(loadn_till_f64){ "loadn_till_f64", simd__intrin_loadn_till_f64, 0x0001, ((void
*)0) } ,
10461
10462#line 508
10463SIMD_INTRIN_DEF(loadn_tillz_f64){ "loadn_tillz_f64", simd__intrin_loadn_tillz_f64, 0x0001, ((
void*)0) } ,
10464
10465#line 508
10466SIMD_INTRIN_DEF(store_till_f64){ "store_till_f64", simd__intrin_store_till_f64, 0x0001, ((void
*)0) } ,
10467
10468#line 508
10469SIMD_INTRIN_DEF(storen_f64){ "storen_f64", simd__intrin_storen_f64, 0x0001, ((void*)0) }
,
10470
10471#line 508
10472SIMD_INTRIN_DEF(storen_till_f64){ "storen_till_f64", simd__intrin_storen_till_f64, 0x0001, ((
void*)0) } ,
10473
10474#endif // ncont_sup
10475
10476/***************************
10477 * Misc
10478 ***************************/
10479#line 519
10480#if 1
10481SIMD_INTRIN_DEF(reinterpret_u8_f64){ "reinterpret_u8_f64", simd__intrin_reinterpret_u8_f64, 0x0001
, ((void*)0) } ,
10482#endif // simd_sup2
10483
10484#line 519
10485#if 1
10486SIMD_INTRIN_DEF(reinterpret_s8_f64){ "reinterpret_s8_f64", simd__intrin_reinterpret_s8_f64, 0x0001
, ((void*)0) } ,
10487#endif // simd_sup2
10488
10489#line 519
10490#if 1
10491SIMD_INTRIN_DEF(reinterpret_u16_f64){ "reinterpret_u16_f64", simd__intrin_reinterpret_u16_f64, 0x0001
, ((void*)0) } ,
10492#endif // simd_sup2
10493
10494#line 519
10495#if 1
10496SIMD_INTRIN_DEF(reinterpret_s16_f64){ "reinterpret_s16_f64", simd__intrin_reinterpret_s16_f64, 0x0001
, ((void*)0) } ,
10497#endif // simd_sup2
10498
10499#line 519
10500#if 1
10501SIMD_INTRIN_DEF(reinterpret_u32_f64){ "reinterpret_u32_f64", simd__intrin_reinterpret_u32_f64, 0x0001
, ((void*)0) } ,
10502#endif // simd_sup2
10503
10504#line 519
10505#if 1
10506SIMD_INTRIN_DEF(reinterpret_s32_f64){ "reinterpret_s32_f64", simd__intrin_reinterpret_s32_f64, 0x0001
, ((void*)0) } ,
10507#endif // simd_sup2
10508
10509#line 519
10510#if 1
10511SIMD_INTRIN_DEF(reinterpret_u64_f64){ "reinterpret_u64_f64", simd__intrin_reinterpret_u64_f64, 0x0001
, ((void*)0) } ,
10512#endif // simd_sup2
10513
10514#line 519
10515#if 1
10516SIMD_INTRIN_DEF(reinterpret_s64_f64){ "reinterpret_s64_f64", simd__intrin_reinterpret_s64_f64, 0x0001
, ((void*)0) } ,
10517#endif // simd_sup2
10518
10519#line 519
10520#if 1
10521SIMD_INTRIN_DEF(reinterpret_f32_f64){ "reinterpret_f32_f64", simd__intrin_reinterpret_f32_f64, 0x0001
, ((void*)0) } ,
10522#endif // simd_sup2
10523
10524#line 519
10525#if NPY_SIMD_F641
10526SIMD_INTRIN_DEF(reinterpret_f64_f64){ "reinterpret_f64_f64", simd__intrin_reinterpret_f64_f64, 0x0001
, ((void*)0) } ,
10527#endif // simd_sup2
10528
10529
10530#line 527
10531SIMD_INTRIN_DEF(set_f64){ "set_f64", simd__intrin_set_f64, 0x0001, ((void*)0) } ,
10532
10533#line 527
10534SIMD_INTRIN_DEF(setf_f64){ "setf_f64", simd__intrin_setf_f64, 0x0001, ((void*)0) } ,
10535
10536#line 527
10537SIMD_INTRIN_DEF(setall_f64){ "setall_f64", simd__intrin_setall_f64, 0x0001, ((void*)0) }
,
10538
10539#line 527
10540SIMD_INTRIN_DEF(zero_f64){ "zero_f64", simd__intrin_zero_f64, 0x0001, ((void*)0) } ,
10541
10542#line 527
10543SIMD_INTRIN_DEF(select_f64){ "select_f64", simd__intrin_select_f64, 0x0001, ((void*)0) }
,
10544
10545
10546/***************************
10547 * Reorder
10548 ***************************/
10549#line 536
10550SIMD_INTRIN_DEF(combinel_f64){ "combinel_f64", simd__intrin_combinel_f64, 0x0001, ((void*)
0) } ,
10551
10552#line 536
10553SIMD_INTRIN_DEF(combineh_f64){ "combineh_f64", simd__intrin_combineh_f64, 0x0001, ((void*)
0) } ,
10554
10555#line 536
10556SIMD_INTRIN_DEF(combine_f64){ "combine_f64", simd__intrin_combine_f64, 0x0001, ((void*)0)
} ,
10557
10558#line 536
10559SIMD_INTRIN_DEF(zip_f64){ "zip_f64", simd__intrin_zip_f64, 0x0001, ((void*)0) } ,
10560
10561
10562#if 0
10563SIMD_INTRIN_DEF(rev64_f64){ "rev64_f64", simd__intrin_rev64_f64, 0x0001, ((void*)0) } ,
10564#endif
10565
10566/***************************
10567 * Operators
10568 ***************************/
10569#if 0 > 0
10570#line 550
10571SIMD_INTRIN_DEF(shl_f64){ "shl_f64", simd__intrin_shl_f64, 0x0001, ((void*)0) } ,
10572
10573#line 550
10574SIMD_INTRIN_DEF(shr_f64){ "shr_f64", simd__intrin_shr_f64, 0x0001, ((void*)0) } ,
10575
10576#line 550
10577SIMD_INTRIN_DEF(shli_f64){ "shli_f64", simd__intrin_shli_f64, 0x0001, ((void*)0) } ,
10578
10579#line 550
10580SIMD_INTRIN_DEF(shri_f64){ "shri_f64", simd__intrin_shri_f64, 0x0001, ((void*)0) } ,
10581
10582#endif // shl_imm
10583
10584#line 557
10585SIMD_INTRIN_DEF(and_f64){ "and_f64", simd__intrin_and_f64, 0x0001, ((void*)0) } ,
10586
10587#line 557
10588SIMD_INTRIN_DEF(or_f64){ "or_f64", simd__intrin_or_f64, 0x0001, ((void*)0) } ,
10589
10590#line 557
10591SIMD_INTRIN_DEF(xor_f64){ "xor_f64", simd__intrin_xor_f64, 0x0001, ((void*)0) } ,
10592
10593#line 557
10594SIMD_INTRIN_DEF(not_f64){ "not_f64", simd__intrin_not_f64, 0x0001, ((void*)0) } ,
10595
10596#line 557
10597SIMD_INTRIN_DEF(cmpeq_f64){ "cmpeq_f64", simd__intrin_cmpeq_f64, 0x0001, ((void*)0) } ,
10598
10599#line 557
10600SIMD_INTRIN_DEF(cmpneq_f64){ "cmpneq_f64", simd__intrin_cmpneq_f64, 0x0001, ((void*)0) }
,
10601
10602#line 557
10603SIMD_INTRIN_DEF(cmpgt_f64){ "cmpgt_f64", simd__intrin_cmpgt_f64, 0x0001, ((void*)0) } ,
10604
10605#line 557
10606SIMD_INTRIN_DEF(cmpge_f64){ "cmpge_f64", simd__intrin_cmpge_f64, 0x0001, ((void*)0) } ,
10607
10608#line 557
10609SIMD_INTRIN_DEF(cmplt_f64){ "cmplt_f64", simd__intrin_cmplt_f64, 0x0001, ((void*)0) } ,
10610
10611#line 557
10612SIMD_INTRIN_DEF(cmple_f64){ "cmple_f64", simd__intrin_cmple_f64, 0x0001, ((void*)0) } ,
10613
10614
10615/***************************
10616 * Conversion
10617 ***************************/
10618SIMD_INTRIN_DEF(cvt_f64_b64){ "cvt_f64_b64", simd__intrin_cvt_f64_b64, 0x0001, ((void*)0)
} ,
10619SIMD_INTRIN_DEF(cvt_b64_f64){ "cvt_b64_f64", simd__intrin_cvt_b64_f64, 0x0001, ((void*)0)
} ,
10620#if 0
10621SIMD_INTRIN_DEF(expand_f64_f64){ "expand_f64_f64", simd__intrin_expand_f64_f64, 0x0001, ((void
*)0) } ,
10622#endif // expand_sup
10623/***************************
10624 * Arithmetic
10625 ***************************/
10626#line 574
10627SIMD_INTRIN_DEF(add_f64){ "add_f64", simd__intrin_add_f64, 0x0001, ((void*)0) } ,
10628
10629#line 574
10630SIMD_INTRIN_DEF(sub_f64){ "sub_f64", simd__intrin_sub_f64, 0x0001, ((void*)0) } ,
10631
10632
10633#if 0
10634#line 581
10635SIMD_INTRIN_DEF(adds_f64){ "adds_f64", simd__intrin_adds_f64, 0x0001, ((void*)0) } ,
10636
10637#line 581
10638SIMD_INTRIN_DEF(subs_f64){ "subs_f64", simd__intrin_subs_f64, 0x0001, ((void*)0) } ,
10639
10640#endif // sat_sup
10641
10642#if 1
10643SIMD_INTRIN_DEF(mul_f64){ "mul_f64", simd__intrin_mul_f64, 0x0001, ((void*)0) } ,
10644#endif // mul_sup
10645
10646#if 1
10647SIMD_INTRIN_DEF(div_f64){ "div_f64", simd__intrin_div_f64, 0x0001, ((void*)0) } ,
10648#endif // div_sup
10649
10650#if 0
10651SIMD_INTRIN_DEF(divisor_f64){ "divisor_f64", simd__intrin_divisor_f64, 0x0001, ((void*)0)
} ,
10652SIMD_INTRIN_DEF(divc_f64){ "divc_f64", simd__intrin_divc_f64, 0x0001, ((void*)0) } ,
10653#endif // intdiv_sup
10654
10655#if 1
10656#line 602
10657SIMD_INTRIN_DEF(muladd_f64){ "muladd_f64", simd__intrin_muladd_f64, 0x0001, ((void*)0) }
,
10658
10659#line 602
10660SIMD_INTRIN_DEF(mulsub_f64){ "mulsub_f64", simd__intrin_mulsub_f64, 0x0001, ((void*)0) }
,
10661
10662#line 602
10663SIMD_INTRIN_DEF(nmuladd_f64){ "nmuladd_f64", simd__intrin_nmuladd_f64, 0x0001, ((void*)0)
} ,
10664
10665#line 602
10666SIMD_INTRIN_DEF(nmulsub_f64){ "nmulsub_f64", simd__intrin_nmulsub_f64, 0x0001, ((void*)0)
} ,
10667
10668#endif // fused_sup
10669
10670#if 1
10671SIMD_INTRIN_DEF(sum_f64){ "sum_f64", simd__intrin_sum_f64, 0x0001, ((void*)0) } ,
10672#endif // sum_sup
10673
10674#if 0
10675SIMD_INTRIN_DEF(sumup_f64){ "sumup_f64", simd__intrin_sumup_f64, 0x0001, ((void*)0) } ,
10676#endif // sumup_sup
10677/***************************
10678 * Math
10679 ***************************/
10680#if 1
10681#line 620
10682SIMD_INTRIN_DEF(sqrt_f64){ "sqrt_f64", simd__intrin_sqrt_f64, 0x0001, ((void*)0) } ,
10683
10684#line 620
10685SIMD_INTRIN_DEF(recip_f64){ "recip_f64", simd__intrin_recip_f64, 0x0001, ((void*)0) } ,
10686
10687#line 620
10688SIMD_INTRIN_DEF(abs_f64){ "abs_f64", simd__intrin_abs_f64, 0x0001, ((void*)0) } ,
10689
10690#line 620
10691SIMD_INTRIN_DEF(square_f64){ "square_f64", simd__intrin_square_f64, 0x0001, ((void*)0) }
,
10692
10693#endif
10694
10695#line 627
10696SIMD_INTRIN_DEF(max_f64){ "max_f64", simd__intrin_max_f64, 0x0001, ((void*)0) } ,
10697
10698#line 627
10699SIMD_INTRIN_DEF(min_f64){ "min_f64", simd__intrin_min_f64, 0x0001, ((void*)0) } ,
10700
10701
10702#if 1
10703#line 634
10704SIMD_INTRIN_DEF(maxp_f64){ "maxp_f64", simd__intrin_maxp_f64, 0x0001, ((void*)0) } ,
10705
10706#line 634
10707SIMD_INTRIN_DEF(minp_f64){ "minp_f64", simd__intrin_minp_f64, 0x0001, ((void*)0) } ,
10708
10709#endif
10710
10711/***************************
10712 * Mask operations
10713 ***************************/
10714#line 644
10715 SIMD_INTRIN_DEF(ifadd_f64){ "ifadd_f64", simd__intrin_ifadd_f64, 0x0001, ((void*)0) } ,
10716
10717#line 644
10718 SIMD_INTRIN_DEF(ifsub_f64){ "ifsub_f64", simd__intrin_ifsub_f64, 0x0001, ((void*)0) } ,
10719
10720
10721#endif // simd_sup
10722
10723/*************************************************************************
10724 * Variant
10725 ************************************************************************/
10726SIMD_INTRIN_DEF(cleanup){ "cleanup", simd__intrin_cleanup, 0x0001, ((void*)0) } ,
10727
10728/*************************************************************************
10729 * A special section for f32/f64 intrinsics outside the main repeater
10730 ************************************************************************/
10731/***************************
10732 * Operators
10733 ***************************/
10734// check special cases
10735SIMD_INTRIN_DEF(notnan_f32){ "notnan_f32", simd__intrin_notnan_f32, 0x0001, ((void*)0) }
,
10736#if NPY_SIMD_F641
10737 SIMD_INTRIN_DEF(notnan_f64){ "notnan_f64", simd__intrin_notnan_f64, 0x0001, ((void*)0) }
,
10738#endif
10739/***************************
10740 * Conversions
10741 ***************************/
10742// round to nearest integer (assume even)
10743SIMD_INTRIN_DEF(round_s32_f32){ "round_s32_f32", simd__intrin_round_s32_f32, 0x0001, ((void
*)0) } ,
10744#if NPY_SIMD_F641
10745 SIMD_INTRIN_DEF(round_s32_f64){ "round_s32_f64", simd__intrin_round_s32_f64, 0x0001, ((void
*)0) } ,
10746#endif
10747
10748/*************************************************************************
10749 * A special section for boolean intrinsics outside the main repeater
10750 ************************************************************************/
10751/***************************
10752 * Operators
10753 ***************************/
10754// Logical
10755#line 684
10756SIMD_INTRIN_DEF(and_b8){ "and_b8", simd__intrin_and_b8, 0x0001, ((void*)0) } ,
10757SIMD_INTRIN_DEF(or_b8){ "or_b8", simd__intrin_or_b8, 0x0001, ((void*)0) } ,
10758SIMD_INTRIN_DEF(xor_b8){ "xor_b8", simd__intrin_xor_b8, 0x0001, ((void*)0) } ,
10759SIMD_INTRIN_DEF(not_b8){ "not_b8", simd__intrin_not_b8, 0x0001, ((void*)0) } ,
10760
10761#line 684
10762SIMD_INTRIN_DEF(and_b16){ "and_b16", simd__intrin_and_b16, 0x0001, ((void*)0) } ,
10763SIMD_INTRIN_DEF(or_b16){ "or_b16", simd__intrin_or_b16, 0x0001, ((void*)0) } ,
10764SIMD_INTRIN_DEF(xor_b16){ "xor_b16", simd__intrin_xor_b16, 0x0001, ((void*)0) } ,
10765SIMD_INTRIN_DEF(not_b16){ "not_b16", simd__intrin_not_b16, 0x0001, ((void*)0) } ,
10766
10767#line 684
10768SIMD_INTRIN_DEF(and_b32){ "and_b32", simd__intrin_and_b32, 0x0001, ((void*)0) } ,
10769SIMD_INTRIN_DEF(or_b32){ "or_b32", simd__intrin_or_b32, 0x0001, ((void*)0) } ,
10770SIMD_INTRIN_DEF(xor_b32){ "xor_b32", simd__intrin_xor_b32, 0x0001, ((void*)0) } ,
10771SIMD_INTRIN_DEF(not_b32){ "not_b32", simd__intrin_not_b32, 0x0001, ((void*)0) } ,
10772
10773#line 684
10774SIMD_INTRIN_DEF(and_b64){ "and_b64", simd__intrin_and_b64, 0x0001, ((void*)0) } ,
10775SIMD_INTRIN_DEF(or_b64){ "or_b64", simd__intrin_or_b64, 0x0001, ((void*)0) } ,
10776SIMD_INTRIN_DEF(xor_b64){ "xor_b64", simd__intrin_xor_b64, 0x0001, ((void*)0) } ,
10777SIMD_INTRIN_DEF(not_b64){ "not_b64", simd__intrin_not_b64, 0x0001, ((void*)0) } ,
10778
10779/***************************
10780 * Conversions
10781 ***************************/
10782// Convert mask vector to integer bitfield
10783#line 696
10784SIMD_INTRIN_DEF(tobits_b8){ "tobits_b8", simd__intrin_tobits_b8, 0x0001, ((void*)0) } ,
10785
10786#line 696
10787SIMD_INTRIN_DEF(tobits_b16){ "tobits_b16", simd__intrin_tobits_b16, 0x0001, ((void*)0) }
,
10788
10789#line 696
10790SIMD_INTRIN_DEF(tobits_b32){ "tobits_b32", simd__intrin_tobits_b32, 0x0001, ((void*)0) }
,
10791
10792#line 696
10793SIMD_INTRIN_DEF(tobits_b64){ "tobits_b64", simd__intrin_tobits_b64, 0x0001, ((void*)0) }
,
10794
10795
10796/************************************************************************/
10797{NULL((void*)0), NULL((void*)0), 0, NULL((void*)0)}
10798}; // PyMethodDef
10799
10800#endif // NPY_SIMD
10801
10802//#########################################################################
10803//## Defining a separate module for each target
10804//#########################################################################
10805NPY_VISIBILITY_HIDDEN__attribute__((visibility("hidden"))) PyObject *
10806NPY_CPU_DISPATCH_CURFX(simd_create_module)simd_create_module(void)
10807{
10808 static struct PyModuleDef defs = {
10809 .m_base = PyModuleDef_HEAD_INIT{ { 1, ((void*)0) }, ((void*)0), 0, ((void*)0), },
10810 .m_size = -1,
10811 #ifdef NPY__CPU_TARGET_CURRENT
10812 .m_name = "numpy.core._simd." NPY_TOSTRING(NPY__CPU_TARGET_CURRENT)"NPY__CPU_TARGET_CURRENT",
10813 #else
10814 .m_name = "numpy.core._simd.baseline",
10815 #endif
10816 #if NPY_SIMD128
10817 .m_methods = simd__intrinsics_methods
10818 #else
10819 .m_methods = NULL((void*)0)
10820 #endif
10821 };
10822 PyObject *m = PyModule_Create(&defs)PyModule_Create2(&defs, 1013);
10823 if (m == NULL((void*)0)) {
10824 return NULL((void*)0);
10825 }
10826 if (PyModule_AddIntConstant(m, "simd", NPY_SIMD128)) {
10827 goto err;
10828 }
10829 if (PyModule_AddIntConstant(m, "simd_f64", NPY_SIMD_F641)) {
10830 goto err;
10831 }
10832 if (PyModule_AddIntConstant(m, "simd_fma3", NPY_SIMD_FMA30)) {
10833 goto err;
10834 }
10835 if (PyModule_AddIntConstant(m, "simd_width", NPY_SIMD_WIDTH16)) {
10836 goto err;
10837 }
10838#if NPY_SIMD128
10839 if (PySIMDVectorType_Init(m)) {
10840 goto err;
10841 }
10842 #line 748
10843 if (PyModule_AddIntConstant(m, "nlanes_u8", npyv_nlanes_u816)) {
10844 goto err;
10845 }
10846
10847#line 748
10848 if (PyModule_AddIntConstant(m, "nlanes_s8", npyv_nlanes_s816)) {
10849 goto err;
10850 }
10851
10852#line 748
10853 if (PyModule_AddIntConstant(m, "nlanes_u16", npyv_nlanes_u168)) {
10854 goto err;
10855 }
10856
10857#line 748
10858 if (PyModule_AddIntConstant(m, "nlanes_s16", npyv_nlanes_s168)) {
10859 goto err;
10860 }
10861
10862#line 748
10863 if (PyModule_AddIntConstant(m, "nlanes_u32", npyv_nlanes_u324)) {
10864 goto err;
10865 }
10866
10867#line 748
10868 if (PyModule_AddIntConstant(m, "nlanes_s32", npyv_nlanes_s324)) {
10869 goto err;
10870 }
10871
10872#line 748
10873 if (PyModule_AddIntConstant(m, "nlanes_u64", npyv_nlanes_u642)) {
10874 goto err;
10875 }
10876
10877#line 748
10878 if (PyModule_AddIntConstant(m, "nlanes_s64", npyv_nlanes_s642)) {
10879 goto err;
10880 }
10881
10882#line 748
10883 if (PyModule_AddIntConstant(m, "nlanes_f32", npyv_nlanes_f324)) {
10884 goto err;
10885 }
10886
10887#line 748
10888 if (PyModule_AddIntConstant(m, "nlanes_f64", npyv_nlanes_f642)) {
10889 goto err;
10890 }
10891
10892#endif // NPY_SIMD
10893 return m;
10894err:
10895 Py_DECREF(m)_Py_DECREF(((PyObject*)(m)));
10896 return NULL((void*)0);
10897}
10898

numpy/core/src/_simd/_simd_convert.inc

1/**
2 * This file is included by `_simd.dispatch.c.src`. Its contents are affected by the simd configuration, and
3 * therefore must be built multiple times. Making it a standalone `.c` file with `NPY_VISIBILITY_HIDDEN`
4 * symbols would require judicious use of `NPY_CPU_DISPATCH_DECLARE` and `NPY_CPU_DISPATCH_CURFX`, which was
5 * deemed too harmful to readability.
6 */
7/************************************
8 ** Protected Definitions
9 ************************************/
10static simd_data
11simd_scalar_from_number(PyObject *obj, simd_data_type dtype)
12{
13 const simd_data_info *info = simd_data_getinfo(dtype);
14 assert(info->is_scalar && info->lane_size > 0)((void) (0));
15 simd_data data;
16 if (info->is_float) {
17 data.f64 = PyFloat_AsDouble(obj);
18 if (dtype == simd_data_f32){
19 data.f32 = (float)data.f64;
20 }
21 } else {
22 data.u64 = PyLong_AsUnsignedLongLongMask(obj);
23 }
24 return data;
25}
26
27static PyObject *
28simd_scalar_to_number(simd_data data, simd_data_type dtype)
29{
30 const simd_data_info *info = simd_data_getinfo(dtype);
31 assert(info->is_scalar && info->lane_size > 0)((void) (0));
32 if (info->is_float) {
33 if (dtype == simd_data_f32) {
34 return PyFloat_FromDouble(data.f32);
35 }
36 return PyFloat_FromDouble(data.f64);
37 }
38 int leftb = (sizeof(npyv_lanetype_u64) - info->lane_size) * 8;
39 data.u64 <<= leftb;
40 if (info->is_signed) {
41 return PyLong_FromLongLong(data.s64 >> leftb);
42 }
43 return PyLong_FromUnsignedLongLong(data.u64 >> leftb);
44}
45
46typedef struct {
47 Py_ssize_t len;
48 void *ptr;
49} simd__alloc_data;
50
51static void *
52simd_sequence_new(Py_ssize_t len, simd_data_type dtype)
53{
54 const simd_data_info *info = simd_data_getinfo(dtype);
55 assert(len > 0 && info->is_sequence && info->lane_size > 0)((void) (0));
56 size_t size = sizeof(simd__alloc_data) + len * info->lane_size + NPY_SIMD_WIDTH16;
57 void *ptr = malloc(size);
58 if (ptr == NULL((void*)0)) {
59 return PyErr_NoMemory();
60 }
61 // align the pointer
62 simd__alloc_data *a_ptr = (simd__alloc_data *)(
63 ((uintptr_t)ptr + sizeof(simd__alloc_data) + NPY_SIMD_WIDTH16) & ~(uintptr_t)(NPY_SIMD_WIDTH16-1)
64 );
65 a_ptr[-1].len = len;
66 a_ptr[-1].ptr = ptr;
67 return a_ptr;
68}
69
70static Py_ssize_t
71simd_sequence_len(void const *ptr)
72{
73 return ((simd__alloc_data const*)ptr)[-1].len;
74}
75
76static void
77simd_sequence_free(void *ptr)
78{
79 free(((simd__alloc_data *)ptr)[-1].ptr);
80}
81
82static void *
83simd_sequence_from_iterable(PyObject *obj, simd_data_type dtype, Py_ssize_t min_size)
84{
85 const simd_data_info *info = simd_data_getinfo(dtype);
86 assert(info->is_sequence && info->lane_size > 0)((void) (0));
87 PyObject *seq_obj = PySequence_Fast(obj, "expected a sequence");
2
Calling 'PySequence_Fast'
4
Returning from 'PySequence_Fast'
11
PyObject ownership leak with reference count of 1
88 if (seq_obj == NULL((void*)0)) {
5
Assuming 'seq_obj' is not equal to NULL
6
Taking false branch
89 return NULL((void*)0);
90 }
91 Py_ssize_t seq_size = PySequence_Fast_GET_SIZE(seq_obj)(((((((PyObject*)(seq_obj))->ob_type))->tp_flags & (
(1UL << 25))) != 0) ? (((void) (0)), (((PyVarObject*)(seq_obj
))->ob_size)) : (((PyVarObject*)((((void) (0)), (PyTupleObject
*)(seq_obj))))->ob_size))
;
7
Assuming the condition is false
8
'?' condition is false
92 if (seq_size < min_size) {
9
Assuming 'seq_size' is < 'min_size'
10
Taking true branch
93 PyErr_Format(PyExc_ValueError,
94 "minimum acceptable size of the required sequence is %d, given(%d)",
95 min_size, seq_size
96 );
97 return NULL((void*)0);
98 }
99 npyv_lanetype_u8 *dst = simd_sequence_new(seq_size, dtype);
100 if (dst == NULL((void*)0)) {
101 return NULL((void*)0);
102 }
103 PyObject **seq_items = PySequence_Fast_ITEMS(seq_obj)(((((((PyObject*)(seq_obj))->ob_type))->tp_flags & (
(1UL << 25))) != 0) ? ((PyListObject *)(seq_obj))->ob_item
: ((PyTupleObject *)(seq_obj))->ob_item)
;
104 for (Py_ssize_t i = 0; i < seq_size; ++i) {
105 simd_data data = simd_scalar_from_number(seq_items[i], info->to_scalar);
106 npyv_lanetype_u8 *sdst = dst + i * info->lane_size;
107 memcpy(sdst, &data.u64, info->lane_size);
108 }
109 Py_DECREF(seq_obj)_Py_DECREF(((PyObject*)(seq_obj)));
110
111 if (PyErr_Occurred()) {
112 simd_sequence_free(dst);
113 return NULL((void*)0);
114 }
115 return dst;
116}
117
118static int
119simd_sequence_fill_iterable(PyObject *obj, const void *ptr, simd_data_type dtype)
120{
121 const simd_data_info *info = simd_data_getinfo(dtype);
122 if (!PySequence_Check(obj)) {
123 PyErr_Format(PyExc_TypeError,
124 "a sequence object is required to fill %s", info->pyname
125 );
126 return -1;
127 }
128 const npyv_lanetype_u8 *src = ptr;
129 Py_ssize_t seq_len = simd_sequence_len(ptr);
130 for (Py_ssize_t i = 0; i < seq_len; ++i) {
131 const npyv_lanetype_u8 *ssrc = src + i * info->lane_size;
132 simd_data data;
133 memcpy(&data.u64, ssrc, info->lane_size);
134 PyObject *item = simd_scalar_to_number(data, info->to_scalar);
135 if (item == NULL((void*)0)) {
136 return -1;
137 }
138 int res = PySequence_SetItem(obj, i, item);
139 Py_DECREF(item)_Py_DECREF(((PyObject*)(item)));
140 if (res < 0) {
141 return -1;
142 }
143 }
144 return 0;
145}
146
147static PyObject *
148simd_sequence_to_list(const void *ptr, simd_data_type dtype)
149{
150 PyObject *list = PyList_New(simd_sequence_len(ptr));
151 if (list == NULL((void*)0)) {
152 return NULL((void*)0);
153 }
154 if (simd_sequence_fill_iterable(list, ptr, dtype) < 0) {
155 Py_DECREF(list)_Py_DECREF(((PyObject*)(list)));
156 return NULL((void*)0);
157 }
158 return list;
159}
160
161static simd_data
162simd_vectorx_from_tuple(PyObject *obj, simd_data_type dtype)
163{
164 const simd_data_info *info = simd_data_getinfo(dtype);
165 // NPYV currently only supports x2 and x3
166 assert(info->is_vectorx > 1 && info->is_vectorx < 4)((void) (0));
167
168 simd_data data = {.u64 = 0};
169 if (!PyTuple_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
|| PyTuple_GET_SIZE(obj)(((PyVarObject*)((((void) (0)), (PyTupleObject *)(obj))))->
ob_size)
!= info->is_vectorx) {
170 PyErr_Format(PyExc_TypeError,
171 "a tuple of %d vector type %s is required",
172 info->is_vectorx, simd_data_getinfo(info->to_vector)->pyname
173 );
174 return data;
175 }
176 for (int i = 0; i < info->is_vectorx; ++i) {
177 PyObject *item = PyTuple_GET_ITEM(obj, i)((((void) (0)), (PyTupleObject *)(obj))->ob_item[i]);
178 // get the max multi-vec and let the compiler do the rest
179 data.vu64x3.val[i] = PySIMDVector_AsData((PySIMDVectorObject*)item, info->to_vector).vu64;
180 if (PyErr_Occurred()) {
181 return data;
182 }
183 }
184 return data;
185}
186
187static PyObject *
188simd_vectorx_to_tuple(simd_data data, simd_data_type dtype)
189{
190 const simd_data_info *info = simd_data_getinfo(dtype);
191 // NPYV currently only supports x2 and x3
192 assert(info->is_vectorx > 1 && info->is_vectorx < 4)((void) (0));
193
194 PyObject *tuple = PyTuple_New(info->is_vectorx);
195 if (tuple == NULL((void*)0)) {
196 return NULL((void*)0);
197 }
198 for (int i = 0; i < info->is_vectorx; ++i) {
199 // get the max multi-vector and let the compiler handle the rest
200 simd_data vdata = {.vu64 = data.vu64x3.val[i]};
201 PyObject *item = (PyObject*)PySIMDVector_FromData(vdata, info->to_vector);
202 if (item == NULL((void*)0)) {
203 // TODO: improve log add item number
204 Py_DECREF(tuple)_Py_DECREF(((PyObject*)(tuple)));
205 return NULL((void*)0);
206 }
207 PyTuple_SET_ITEM(tuple, i, item)PyTuple_SetItem(tuple, i, item);
208 }
209 return tuple;
210}

/opt/pyrefcon/lib/pyrefcon/models/models/PySequence_Fast.model

1#ifndef PySequence_Fast
2struct _object;
3typedef struct _object PyObject;
4PyObject* clang_analyzer_PyObject_New_Reference();
5PyObject* PySequence_Fast(PyObject *o, const char *m) {
6 return clang_analyzer_PyObject_New_Reference();
3
Setting reference count to 1
7}
8#else
9#warning "API PySequence_Fast is defined as a macro."
10#endif