|
15 | 15 | NPY_FINLINE npyv_f32
|
16 | 16 | simd_heaviside_f32(npyv_f32 x, npyv_f32 h)
|
17 | 17 | {
|
18 |
| - // TODO: your implmentation |
| 18 | + npyv_f32 zeros = npyv_setall_f32(0.0); |
| 19 | + npyv_f32 ones = npyv_setall_f32(1.0); |
| 20 | + npyv_f32 lt_zero = npyv_cmplt_f32(x, zeros); |
| 21 | + npyv_f32 gt_zero = npyv_cmpgt_f32(x, zeros); |
| 22 | + npyv_f32 eq_zero = npyv_cmpeq_f32(x, zeros); |
| 23 | + npyv_f32 lt_zero_res = npyv_and_f32(lt_zero, zeros); |
| 24 | + npyv_f32 gt_zero_res = npyv_and_f32(gt_zero, ones); |
| 25 | + npyv_f32 eq_zero_res = npyv_and_f32(eq_zero, h); |
| 26 | + |
| 27 | + return npyv_or_f32(lt_zero_res, npyv_or_f32(gt_zero_res, eq_zero_res)); |
19 | 28 | }
|
20 | 29 | #endif
|
21 | 30 |
|
22 | 31 | #if NPY_SIMD_F64
|
23 | 32 | NPY_FINLINE npyv_f64
|
24 | 33 | simd_heaviside_f64(npyv_f64 x, npyv_f64 h)
|
25 | 34 | {
|
26 |
| - // TODO: your implmentation |
| 35 | + npyv_f64 zeros = npyv_setall_f64(0.0); |
| 36 | + npyv_f64 ones = npyv_setall_f64(1.0); |
| 37 | + npyv_f64 lt_zero = npyv_cmplt_f64(x, zeros); |
| 38 | + npyv_f64 gt_zero = npyv_cmpgt_f64(x, zeros); |
| 39 | + npyv_f64 eq_zero = npyv_cmpeq_f64(x, zeros); |
| 40 | + npyv_f64 lt_zero_res = npyv_and_f64(lt_zero, zeros); |
| 41 | + npyv_f64 gt_zero_res = npyv_and_f64(gt_zero, ones); |
| 42 | + npyv_f64 eq_zero_res = npyv_and_f64(eq_zero, h); |
| 43 | + |
| 44 | + return npyv_or_f64(lt_zero_res, npyv_or_f64(gt_zero_res, eq_zero_res)); |
27 | 45 | }
|
28 | 46 | #endif
|
29 | 47 |
|
|
0 commit comments