@@ -54,6 +54,39 @@ void frolladaptivefun(rollfun_t rfun, unsigned int algo, const double *x, uint64
5454 snprintf (end (ans -> message [0 ]), 500 , _ ("%s: processing fun %d algo %u took %.3fs\n" ), __func__ , rfun , algo , omp_get_wtime ()- tic );
5555}
5656
57+ #undef MEAN_WINDOW_STEP_VALUE
58+ #define MEAN_WINDOW_STEP_VALUE \
59+ if (wn>0) { \
60+ if (narm) { \
61+ if (wpinf > 0) { \
62+ if (wninf > 0) { \
63+ ans->dbl_v[i] = R_NaN; \
64+ } else { \
65+ ans->dbl_v[i] = R_PosInf; \
66+ } \
67+ } else if (wninf > 0) { \
68+ ans->dbl_v[i] = R_NegInf; \
69+ } else { \
70+ int thisk = k[i] - ((int) wn); \
71+ ans->dbl_v[i] = thisk==0 ? R_NaN : ws/thisk; \
72+ } \
73+ } else { \
74+ ans->dbl_v[i] = NA_REAL; \
75+ } \
76+ } else { \
77+ if (wpinf > 0) { \
78+ if (wninf > 0) { \
79+ ans->dbl_v[i] = R_NaN; \
80+ } else { \
81+ ans->dbl_v[i] = R_PosInf; \
82+ } \
83+ } else if (wninf > 0) { \
84+ ans->dbl_v[i] = R_NegInf; \
85+ } else { \
86+ ans->dbl_v[i] = ws/k[i]; /* for k[i]==0 it turns into 0/0, as expected */ \
87+ } \
88+ }
89+
5790/* fast rolling adaptive mean - fast
5891 * when no info on NF (has.nf argument) then assume no NFs run faster version
5992 * adaptive rollmean implemented as cumsum first pass, then diff cumsum by indexes `i` to `i-k[i]`
@@ -128,40 +161,6 @@ void frolladaptivemeanFast(const double *x, uint64_t nx, ans_t *ans, const int *
128161 cpinf [i ] = pinf ;
129162 cninf [i ] = ninf ;
130163 }
131-
132- #undef MEAN_WINDOW_STEP_VALUE
133- #define MEAN_WINDOW_STEP_VALUE \
134- if (wn>0) { \
135- if (narm) { \
136- if (wpinf > 0) { \
137- if (wninf > 0) { \
138- ans->dbl_v[i] = R_NaN; \
139- } else { \
140- ans->dbl_v[i] = R_PosInf; \
141- } \
142- } else if (wninf > 0) { \
143- ans->dbl_v[i] = R_NegInf; \
144- } else { \
145- int thisk = k[i] - ((int) wn); \
146- ans->dbl_v[i] = thisk==0 ? R_NaN : ws/thisk; \
147- } \
148- } else { \
149- ans->dbl_v[i] = NA_REAL; \
150- } \
151- } else { \
152- if (wpinf > 0) { \
153- if (wninf > 0) { \
154- ans->dbl_v[i] = R_NaN; \
155- } else { \
156- ans->dbl_v[i] = R_PosInf; \
157- } \
158- } else if (wninf > 0) { \
159- ans->dbl_v[i] = R_NegInf; \
160- } else { \
161- ans->dbl_v[i] = ws/k[i]; /* for k[i]==0 it turns into 0/0, as expected */ \
162- } \
163- }
164-
165164 #pragma omp parallel for num_threads(getDTthreads(nx, true))
166165 for (uint64_t i = 0 ; i < nx ; i ++ ) { // loop over observations to calculate final answer
167166 uint64_t wn , wpinf , wninf ;
@@ -279,6 +278,39 @@ void frolladaptivemeanExact(const double *x, uint64_t nx, ans_t *ans, const int
279278 } // end of truehasnf
280279}
281280
281+ #undef SUM_WINDOW_STEP_VALUE
282+ #define SUM_WINDOW_STEP_VALUE \
283+ if (wn>0) { \
284+ if (narm) { \
285+ if (wpinf > 0) { \
286+ if (wninf > 0) { \
287+ ans->dbl_v[i] = R_NaN; \
288+ } else { \
289+ ans->dbl_v[i] = R_PosInf; \
290+ } \
291+ } else if (wninf > 0) { \
292+ ans->dbl_v[i] = R_NegInf; \
293+ } else { \
294+ int thisk = k[i] - ((int) wn); \
295+ ans->dbl_v[i] = thisk==0 ? 0.0 : ws; \
296+ } \
297+ } else { \
298+ ans->dbl_v[i] = NA_REAL; \
299+ } \
300+ } else { \
301+ if (wpinf > 0) { \
302+ if (wninf > 0) { \
303+ ans->dbl_v[i] = R_NaN; \
304+ } else { \
305+ ans->dbl_v[i] = R_PosInf; \
306+ } \
307+ } else if (wninf > 0) { \
308+ ans->dbl_v[i] = R_NegInf; \
309+ } else { \
310+ ans->dbl_v[i] = ws; /* for k[i]==0 it turns into 0, as expected */ \
311+ } \
312+ }
313+
282314/* fast rolling adaptive sum - fast
283315 * same as adaptive mean fast
284316 */
@@ -351,40 +383,6 @@ void frolladaptivesumFast(const double *x, uint64_t nx, ans_t *ans, const int *k
351383 cpinf [i ] = pinf ;
352384 cninf [i ] = ninf ;
353385 }
354-
355- #undef SUM_WINDOW_STEP_VALUE
356- #define SUM_WINDOW_STEP_VALUE \
357- if (wn>0) { \
358- if (narm) { \
359- if (wpinf > 0) { \
360- if (wninf > 0) { \
361- ans->dbl_v[i] = R_NaN; \
362- } else { \
363- ans->dbl_v[i] = R_PosInf; \
364- } \
365- } else if (wninf > 0) { \
366- ans->dbl_v[i] = R_NegInf; \
367- } else { \
368- int thisk = k[i] - ((int) wn); \
369- ans->dbl_v[i] = thisk==0 ? 0.0 : ws; \
370- } \
371- } else { \
372- ans->dbl_v[i] = NA_REAL; \
373- } \
374- } else { \
375- if (wpinf > 0) { \
376- if (wninf > 0) { \
377- ans->dbl_v[i] = R_NaN; \
378- } else { \
379- ans->dbl_v[i] = R_PosInf; \
380- } \
381- } else if (wninf > 0) { \
382- ans->dbl_v[i] = R_NegInf; \
383- } else { \
384- ans->dbl_v[i] = ws; /* for k[i]==0 it turns into 0, as expected */ \
385- } \
386- }
387-
388386 #pragma omp parallel for num_threads(getDTthreads(nx, true))
389387 for (uint64_t i = 0 ; i < nx ; i ++ ) {
390388 uint64_t wn , wpinf , wninf ;
@@ -637,6 +635,27 @@ void frolladaptiveminExact(const double *x, uint64_t nx, ans_t *ans, const int *
637635 }
638636}
639637
638+ #undef PROD_WINDOW_STEP_VALUE
639+ #define PROD_WINDOW_STEP_VALUE \
640+ if (wn>0) { \
641+ if (narm) { \
642+ if (wpinf == 0 && wninf == 0) { \
643+ int thisk = k[i] - ((int) wn); \
644+ ans->dbl_v[i] = thisk==0 ? 1.0 : (double) ws; \
645+ } else { \
646+ ans->dbl_v[i] = (wninf+(ws<0))%2 ? R_NegInf : R_PosInf; \
647+ } \
648+ } else { \
649+ ans->dbl_v[i] = NA_REAL; \
650+ } \
651+ } else { \
652+ if (wpinf == 0 && wninf == 0) { \
653+ ans->dbl_v[i] = (double) ws; /* k[i]==0 produces 1.0 */ \
654+ } else { \
655+ ans ->dbl_v[i] = (wninf+(ws<0))%2 ? R_NegInf : R_PosInf; \
656+ } \
657+ }
658+
640659/* fast rolling adaptive prod - fast
641660 * same as adaptive mean fast
642661 */
@@ -709,28 +728,6 @@ void frolladaptiveprodFast(const double *x, uint64_t nx, ans_t *ans, const int *
709728 cpinf [i ] = pinf ;
710729 cninf [i ] = ninf ;
711730 }
712-
713- #undef PROD_WINDOW_STEP_VALUE
714- #define PROD_WINDOW_STEP_VALUE \
715- if (wn>0) { \
716- if (narm) { \
717- if (wpinf == 0 && wninf == 0) { \
718- int thisk = k[i] - ((int) wn); \
719- ans->dbl_v[i] = thisk==0 ? 1.0 : (double) ws; \
720- } else { \
721- ans->dbl_v[i] = (wninf+(ws<0))%2 ? R_NegInf : R_PosInf; \
722- } \
723- } else { \
724- ans->dbl_v[i] = NA_REAL; \
725- } \
726- } else { \
727- if (wpinf == 0 && wninf == 0) { \
728- ans->dbl_v[i] = (double) ws; /* k[i]==0 produces 1.0 */ \
729- } else { \
730- ans -> dbl_v [i ] = (wninf + (ws < 0 ))%2 ? R_NegInf : R_PosInf ; \
731- } \
732- }
733-
734731 #pragma omp parallel for num_threads(getDTthreads(nx, true))
735732 for (uint64_t i = 0 ; i < nx ; i ++ ) {
736733 uint64_t wn , wpinf , wninf ;
0 commit comments