OpenShot Audio Library | OpenShotAudio 0.4.0
Loading...
Searching...
No Matches
juce_Oversampling.cpp
1/*
2 ==============================================================================
3
4 This file is part of the JUCE library.
5 Copyright (c) 2022 - Raw Material Software Limited
6
7 JUCE is an open source library subject to commercial or open-source
8 licensing.
9
10 By using JUCE, you agree to the terms of both the JUCE 7 End-User License
11 Agreement and JUCE Privacy Policy.
12
13 End User License Agreement: www.juce.com/juce-7-licence
14 Privacy Policy: www.juce.com/juce-privacy-policy
15
16 Or: You may also use this code under the terms of the GPL v3 (see
17 www.gnu.org/licenses).
18
19 JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
20 EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
21 DISCLAIMED.
22
23 ==============================================================================
24*/
25
26namespace juce::dsp
27{
28
32template <typename SampleType>
34{
35 OversamplingStage (size_t numChans, size_t newFactor) : numChannels (numChans), factor (newFactor) {}
36 virtual ~OversamplingStage() {}
37
38 //==============================================================================
39 virtual SampleType getLatencyInSamples() const = 0;
40
41 virtual void initProcessing (size_t maximumNumberOfSamplesBeforeOversampling)
42 {
43 buffer.setSize (static_cast<int> (numChannels),
44 static_cast<int> (maximumNumberOfSamplesBeforeOversampling * factor),
45 false, false, true);
46 }
47
48 virtual void reset()
49 {
50 buffer.clear();
51 }
52
53 AudioBlock<SampleType> getProcessedSamples (size_t numSamples)
54 {
55 return AudioBlock<SampleType> (buffer).getSubBlock (0, numSamples);
56 }
57
58 virtual void processSamplesUp (const AudioBlock<const SampleType>&) = 0;
60
62 size_t numChannels, factor;
63};
64
65
66//==============================================================================
70template <typename SampleType>
71struct OversamplingDummy final : public Oversampling<SampleType>::OversamplingStage
72{
73 using ParentType = typename Oversampling<SampleType>::OversamplingStage;
74
75 OversamplingDummy (size_t numChans) : ParentType (numChans, 1) {}
76
77 //==============================================================================
78 SampleType getLatencyInSamples() const override
79 {
80 return 0;
81 }
82
83 void processSamplesUp (const AudioBlock<const SampleType>& inputBlock) override
84 {
85 jassert (inputBlock.getNumChannels() <= static_cast<size_t> (ParentType::buffer.getNumChannels()));
86 jassert (inputBlock.getNumSamples() * ParentType::factor <= static_cast<size_t> (ParentType::buffer.getNumSamples()));
87
88 for (size_t channel = 0; channel < inputBlock.getNumChannels(); ++channel)
89 ParentType::buffer.copyFrom (static_cast<int> (channel), 0,
90 inputBlock.getChannelPointer (channel), static_cast<int> (inputBlock.getNumSamples()));
91 }
92
93 void processSamplesDown (AudioBlock<SampleType>& outputBlock) override
94 {
95 jassert (outputBlock.getNumChannels() <= static_cast<size_t> (ParentType::buffer.getNumChannels()));
96 jassert (outputBlock.getNumSamples() * ParentType::factor <= static_cast<size_t> (ParentType::buffer.getNumSamples()));
97
98 outputBlock.copyFrom (ParentType::getProcessedSamples (outputBlock.getNumSamples()));
99 }
100
101 JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (OversamplingDummy)
102};
103
104//==============================================================================
110template <typename SampleType>
111struct Oversampling2TimesEquirippleFIR final : public Oversampling<SampleType>::OversamplingStage
112{
113 using ParentType = typename Oversampling<SampleType>::OversamplingStage;
114
115 Oversampling2TimesEquirippleFIR (size_t numChans,
116 SampleType normalisedTransitionWidthUp,
117 SampleType stopbandAmplitudedBUp,
118 SampleType normalisedTransitionWidthDown,
119 SampleType stopbandAmplitudedBDown)
120 : ParentType (numChans, 2)
121 {
122 coefficientsUp = *FilterDesign<SampleType>::designFIRLowpassHalfBandEquirippleMethod (normalisedTransitionWidthUp, stopbandAmplitudedBUp);
123 coefficientsDown = *FilterDesign<SampleType>::designFIRLowpassHalfBandEquirippleMethod (normalisedTransitionWidthDown, stopbandAmplitudedBDown);
124
125 auto N = coefficientsUp.getFilterOrder() + 1;
126 stateUp.setSize (static_cast<int> (this->numChannels), static_cast<int> (N));
127
128 N = coefficientsDown.getFilterOrder() + 1;
129 auto Ndiv2 = N / 2;
130 auto Ndiv4 = Ndiv2 / 2;
131
132 stateDown.setSize (static_cast<int> (this->numChannels), static_cast<int> (N));
133 stateDown2.setSize (static_cast<int> (this->numChannels), static_cast<int> (Ndiv4 + 1));
134
135 position.resize (static_cast<int> (this->numChannels));
136 }
137
138 //==============================================================================
139 SampleType getLatencyInSamples() const override
140 {
141 return static_cast<SampleType> (coefficientsUp.getFilterOrder() + coefficientsDown.getFilterOrder()) * 0.5f;
142 }
143
144 void reset() override
145 {
146 ParentType::reset();
147
148 stateUp.clear();
149 stateDown.clear();
150 stateDown2.clear();
151
152 position.fill (0);
153 }
154
155 void processSamplesUp (const AudioBlock<const SampleType>& inputBlock) override
156 {
157 jassert (inputBlock.getNumChannels() <= static_cast<size_t> (ParentType::buffer.getNumChannels()));
158 jassert (inputBlock.getNumSamples() * ParentType::factor <= static_cast<size_t> (ParentType::buffer.getNumSamples()));
159
160 // Initialization
161 auto fir = coefficientsUp.getRawCoefficients();
162 auto N = coefficientsUp.getFilterOrder() + 1;
163 auto Ndiv2 = N / 2;
164 auto numSamples = inputBlock.getNumSamples();
165
166 // Processing
167 for (size_t channel = 0; channel < inputBlock.getNumChannels(); ++channel)
168 {
169 auto bufferSamples = ParentType::buffer.getWritePointer (static_cast<int> (channel));
170 auto buf = stateUp.getWritePointer (static_cast<int> (channel));
171 auto samples = inputBlock.getChannelPointer (channel);
172
173 for (size_t i = 0; i < numSamples; ++i)
174 {
175 // Input
176 buf[N - 1] = 2 * samples[i];
177
178 // Convolution
179 auto out = static_cast<SampleType> (0.0);
180
181 for (size_t k = 0; k < Ndiv2; k += 2)
182 out += (buf[k] + buf[N - k - 1]) * fir[k];
183
184 // Outputs
185 bufferSamples[i << 1] = out;
186 bufferSamples[(i << 1) + 1] = buf[Ndiv2 + 1] * fir[Ndiv2];
187
188 // Shift data
189 for (size_t k = 0; k < N - 2; k += 2)
190 buf[k] = buf[k + 2];
191 }
192 }
193 }
194
195 void processSamplesDown (AudioBlock<SampleType>& outputBlock) override
196 {
197 jassert (outputBlock.getNumChannels() <= static_cast<size_t> (ParentType::buffer.getNumChannels()));
198 jassert (outputBlock.getNumSamples() * ParentType::factor <= static_cast<size_t> (ParentType::buffer.getNumSamples()));
199
200 // Initialization
201 auto fir = coefficientsDown.getRawCoefficients();
202 auto N = coefficientsDown.getFilterOrder() + 1;
203 auto Ndiv2 = N / 2;
204 auto Ndiv4 = Ndiv2 / 2;
205 auto numSamples = outputBlock.getNumSamples();
206
207 // Processing
208 for (size_t channel = 0; channel < outputBlock.getNumChannels(); ++channel)
209 {
210 auto bufferSamples = ParentType::buffer.getWritePointer (static_cast<int> (channel));
211 auto buf = stateDown.getWritePointer (static_cast<int> (channel));
212 auto buf2 = stateDown2.getWritePointer (static_cast<int> (channel));
213 auto samples = outputBlock.getChannelPointer (channel);
214 auto pos = position.getUnchecked (static_cast<int> (channel));
215
216 for (size_t i = 0; i < numSamples; ++i)
217 {
218 // Input
219 buf[N - 1] = bufferSamples[i << 1];
220
221 // Convolution
222 auto out = static_cast<SampleType> (0.0);
223
224 for (size_t k = 0; k < Ndiv2; k += 2)
225 out += (buf[k] + buf[N - k - 1]) * fir[k];
226
227 // Output
228 out += buf2[pos] * fir[Ndiv2];
229 buf2[pos] = bufferSamples[(i << 1) + 1];
230
231 samples[i] = out;
232
233 // Shift data
234 for (size_t k = 0; k < N - 2; ++k)
235 buf[k] = buf[k + 2];
236
237 // Circular buffer
238 pos = (pos == 0 ? Ndiv4 : pos - 1);
239 }
240
241 position.setUnchecked (static_cast<int> (channel), pos);
242 }
243
244 }
245
246private:
247 //==============================================================================
248 FIR::Coefficients<SampleType> coefficientsUp, coefficientsDown;
249 AudioBuffer<SampleType> stateUp, stateDown, stateDown2;
250 Array<size_t> position;
251
252 //==============================================================================
253 JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Oversampling2TimesEquirippleFIR)
254};
255
256
257//==============================================================================
262template <typename SampleType>
263struct Oversampling2TimesPolyphaseIIR final : public Oversampling<SampleType>::OversamplingStage
264{
265 using ParentType = typename Oversampling<SampleType>::OversamplingStage;
266
267 Oversampling2TimesPolyphaseIIR (size_t numChans,
268 SampleType normalisedTransitionWidthUp,
269 SampleType stopbandAmplitudedBUp,
270 SampleType normalisedTransitionWidthDown,
271 SampleType stopbandAmplitudedBDown)
272 : ParentType (numChans, 2)
273 {
274 auto structureUp = FilterDesign<SampleType>::designIIRLowpassHalfBandPolyphaseAllpassMethod (normalisedTransitionWidthUp, stopbandAmplitudedBUp);
275 auto coeffsUp = getCoefficients (structureUp);
276 latency = static_cast<SampleType> (-(coeffsUp.getPhaseForFrequency (0.0001, 1.0)) / (0.0001 * MathConstants<double>::twoPi));
277
278 auto structureDown = FilterDesign<SampleType>::designIIRLowpassHalfBandPolyphaseAllpassMethod (normalisedTransitionWidthDown, stopbandAmplitudedBDown);
279 auto coeffsDown = getCoefficients (structureDown);
280 latency += static_cast<SampleType> (-(coeffsDown.getPhaseForFrequency (0.0001, 1.0)) / (0.0001 * MathConstants<double>::twoPi));
281
282 for (auto i = 0; i < structureUp.directPath.size(); ++i)
283 coefficientsUp.add (structureUp.directPath.getObjectPointer (i)->coefficients[0]);
284
285 for (auto i = 1; i < structureUp.delayedPath.size(); ++i)
286 coefficientsUp.add (structureUp.delayedPath.getObjectPointer (i)->coefficients[0]);
287
288 for (auto i = 0; i < structureDown.directPath.size(); ++i)
289 coefficientsDown.add (structureDown.directPath.getObjectPointer (i)->coefficients[0]);
290
291 for (auto i = 1; i < structureDown.delayedPath.size(); ++i)
292 coefficientsDown.add (structureDown.delayedPath.getObjectPointer (i)->coefficients[0]);
293
294 v1Up.setSize (static_cast<int> (this->numChannels), coefficientsUp.size());
295 v1Down.setSize (static_cast<int> (this->numChannels), coefficientsDown.size());
296 delayDown.resize (static_cast<int> (this->numChannels));
297 }
298
299 //==============================================================================
300 SampleType getLatencyInSamples() const override
301 {
302 return latency;
303 }
304
305 void reset() override
306 {
307 ParentType::reset();
308 v1Up.clear();
309 v1Down.clear();
310 delayDown.fill (0);
311 }
312
313 void processSamplesUp (const AudioBlock<const SampleType>& inputBlock) override
314 {
315 jassert (inputBlock.getNumChannels() <= static_cast<size_t> (ParentType::buffer.getNumChannels()));
316 jassert (inputBlock.getNumSamples() * ParentType::factor <= static_cast<size_t> (ParentType::buffer.getNumSamples()));
317
318 // Initialization
319 auto coeffs = coefficientsUp.getRawDataPointer();
320 auto numStages = coefficientsUp.size();
321 auto delayedStages = numStages / 2;
322 auto directStages = numStages - delayedStages;
323 auto numSamples = inputBlock.getNumSamples();
324
325 // Processing
326 for (size_t channel = 0; channel < inputBlock.getNumChannels(); ++channel)
327 {
328 auto bufferSamples = ParentType::buffer.getWritePointer (static_cast<int> (channel));
329 auto lv1 = v1Up.getWritePointer (static_cast<int> (channel));
330 auto samples = inputBlock.getChannelPointer (channel);
331
332 for (size_t i = 0; i < numSamples; ++i)
333 {
334 // Direct path cascaded allpass filters
335 auto input = samples[i];
336
337 for (auto n = 0; n < directStages; ++n)
338 {
339 auto alpha = coeffs[n];
340 auto output = alpha * input + lv1[n];
341 lv1[n] = input - alpha * output;
342 input = output;
343 }
344
345 // Output
346 bufferSamples[i << 1] = input;
347
348 // Delayed path cascaded allpass filters
349 input = samples[i];
350
351 for (auto n = directStages; n < numStages; ++n)
352 {
353 auto alpha = coeffs[n];
354 auto output = alpha * input + lv1[n];
355 lv1[n] = input - alpha * output;
356 input = output;
357 }
358
359 // Output
360 bufferSamples[(i << 1) + 1] = input;
361 }
362 }
363
364 #if JUCE_DSP_ENABLE_SNAP_TO_ZERO
365 snapToZero (true);
366 #endif
367 }
368
369 void processSamplesDown (AudioBlock<SampleType>& outputBlock) override
370 {
371 jassert (outputBlock.getNumChannels() <= static_cast<size_t> (ParentType::buffer.getNumChannels()));
372 jassert (outputBlock.getNumSamples() * ParentType::factor <= static_cast<size_t> (ParentType::buffer.getNumSamples()));
373
374 // Initialization
375 auto coeffs = coefficientsDown.getRawDataPointer();
376 auto numStages = coefficientsDown.size();
377 auto delayedStages = numStages / 2;
378 auto directStages = numStages - delayedStages;
379 auto numSamples = outputBlock.getNumSamples();
380
381 // Processing
382 for (size_t channel = 0; channel < outputBlock.getNumChannels(); ++channel)
383 {
384 auto bufferSamples = ParentType::buffer.getWritePointer (static_cast<int> (channel));
385 auto lv1 = v1Down.getWritePointer (static_cast<int> (channel));
386 auto samples = outputBlock.getChannelPointer (channel);
387 auto delay = delayDown.getUnchecked (static_cast<int> (channel));
388
389 for (size_t i = 0; i < numSamples; ++i)
390 {
391 // Direct path cascaded allpass filters
392 auto input = bufferSamples[i << 1];
393
394 for (auto n = 0; n < directStages; ++n)
395 {
396 auto alpha = coeffs[n];
397 auto output = alpha * input + lv1[n];
398 lv1[n] = input - alpha * output;
399 input = output;
400 }
401
402 auto directOut = input;
403
404 // Delayed path cascaded allpass filters
405 input = bufferSamples[(i << 1) + 1];
406
407 for (auto n = directStages; n < numStages; ++n)
408 {
409 auto alpha = coeffs[n];
410 auto output = alpha * input + lv1[n];
411 lv1[n] = input - alpha * output;
412 input = output;
413 }
414
415 // Output
416 samples[i] = (delay + directOut) * static_cast<SampleType> (0.5);
417 delay = input;
418 }
419
420 delayDown.setUnchecked (static_cast<int> (channel), delay);
421 }
422
423 #if JUCE_DSP_ENABLE_SNAP_TO_ZERO
424 snapToZero (false);
425 #endif
426 }
427
428 void snapToZero (bool snapUpProcessing)
429 {
430 if (snapUpProcessing)
431 {
432 for (auto channel = 0; channel < ParentType::buffer.getNumChannels(); ++channel)
433 {
434 auto lv1 = v1Up.getWritePointer (channel);
435 auto numStages = coefficientsUp.size();
436
437 for (auto n = 0; n < numStages; ++n)
438 util::snapToZero (lv1[n]);
439 }
440 }
441 else
442 {
443 for (auto channel = 0; channel < ParentType::buffer.getNumChannels(); ++channel)
444 {
445 auto lv1 = v1Down.getWritePointer (channel);
446 auto numStages = coefficientsDown.size();
447
448 for (auto n = 0; n < numStages; ++n)
449 util::snapToZero (lv1[n]);
450 }
451 }
452 }
453
454private:
455 //==============================================================================
460 {
461 constexpr auto one = static_cast<SampleType> (1.0);
462
463 Polynomial<SampleType> numerator1 ({ one }), denominator1 ({ one }),
464 numerator2 ({ one }), denominator2 ({ one });
465
466 for (auto* i : structure.directPath)
467 {
468 auto coeffs = i->getRawCoefficients();
469
470 if (i->getFilterOrder() == 1)
471 {
472 numerator1 = numerator1 .getProductWith (Polynomial<SampleType> ({ coeffs[0], coeffs[1] }));
473 denominator1 = denominator1.getProductWith (Polynomial<SampleType> ({ one, coeffs[2] }));
474 }
475 else
476 {
477 numerator1 = numerator1 .getProductWith (Polynomial<SampleType> ({ coeffs[0], coeffs[1], coeffs[2] }));
478 denominator1 = denominator1.getProductWith (Polynomial<SampleType> ({ one, coeffs[3], coeffs[4] }));
479 }
480 }
481
482 for (auto* i : structure.delayedPath)
483 {
484 auto coeffs = i->getRawCoefficients();
485
486 if (i->getFilterOrder() == 1)
487 {
488 numerator2 = numerator2 .getProductWith (Polynomial<SampleType> ({ coeffs[0], coeffs[1] }));
489 denominator2 = denominator2.getProductWith (Polynomial<SampleType> ({ one, coeffs[2] }));
490 }
491 else
492 {
493 numerator2 = numerator2 .getProductWith (Polynomial<SampleType> ({ coeffs[0], coeffs[1], coeffs[2] }));
494 denominator2 = denominator2.getProductWith (Polynomial<SampleType> ({ one, coeffs[3], coeffs[4] }));
495 }
496 }
497
498 auto numeratorf1 = numerator1.getProductWith (denominator2);
499 auto numeratorf2 = numerator2.getProductWith (denominator1);
500 auto numerator = numeratorf1.getSumWith (numeratorf2);
501 auto denominator = denominator1.getProductWith (denominator2);
502
504
505 coeffs.coefficients.clear();
506 auto inversion = one / denominator[0];
507
508 for (int i = 0; i <= numerator.getOrder(); ++i)
509 coeffs.coefficients.add (numerator[i] * inversion);
510
511 for (int i = 1; i <= denominator.getOrder(); ++i)
512 coeffs.coefficients.add (denominator[i] * inversion);
513
514 return coeffs;
515 }
516
517 //==============================================================================
518 Array<SampleType> coefficientsUp, coefficientsDown;
519 SampleType latency;
520
521 AudioBuffer<SampleType> v1Up, v1Down;
522 Array<SampleType> delayDown;
523
524 //==============================================================================
525 JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Oversampling2TimesPolyphaseIIR)
526};
527
528
529//==============================================================================
530template <typename SampleType>
532 : numChannels (newNumChannels)
533{
534 jassert (numChannels > 0);
535
537}
538
539template <typename SampleType>
540Oversampling<SampleType>::Oversampling (size_t newNumChannels, size_t newFactor,
541 FilterType newType, bool isMaximumQuality,
542 bool useIntegerLatency)
543 : numChannels (newNumChannels), shouldUseIntegerLatency (useIntegerLatency)
544{
545 jassert (isPositiveAndBelow (newFactor, 5) && numChannels > 0);
546
547 if (newFactor == 0)
548 {
550 }
551 else if (newType == FilterType::filterHalfBandPolyphaseIIR)
552 {
553 for (size_t n = 0; n < newFactor; ++n)
554 {
555 auto twUp = (isMaximumQuality ? 0.10f : 0.12f) * (n == 0 ? 0.5f : 1.0f);
556 auto twDown = (isMaximumQuality ? 0.12f : 0.15f) * (n == 0 ? 0.5f : 1.0f);
557
558 auto gaindBStartUp = (isMaximumQuality ? -90.0f : -70.0f);
559 auto gaindBStartDown = (isMaximumQuality ? -75.0f : -60.0f);
560 auto gaindBFactorUp = (isMaximumQuality ? 10.0f : 8.0f);
561 auto gaindBFactorDown = (isMaximumQuality ? 10.0f : 8.0f);
562
563 addOversamplingStage (FilterType::filterHalfBandPolyphaseIIR,
564 twUp, gaindBStartUp + gaindBFactorUp * (float) n,
565 twDown, gaindBStartDown + gaindBFactorDown * (float) n);
566 }
567 }
568 else if (newType == FilterType::filterHalfBandFIREquiripple)
569 {
570 for (size_t n = 0; n < newFactor; ++n)
571 {
572 auto twUp = (isMaximumQuality ? 0.10f : 0.12f) * (n == 0 ? 0.5f : 1.0f);
573 auto twDown = (isMaximumQuality ? 0.12f : 0.15f) * (n == 0 ? 0.5f : 1.0f);
574
575 auto gaindBStartUp = (isMaximumQuality ? -90.0f : -70.0f);
576 auto gaindBStartDown = (isMaximumQuality ? -75.0f : -60.0f);
577 auto gaindBFactorUp = (isMaximumQuality ? 10.0f : 8.0f);
578 auto gaindBFactorDown = (isMaximumQuality ? 10.0f : 8.0f);
579
580 addOversamplingStage (FilterType::filterHalfBandFIREquiripple,
581 twUp, gaindBStartUp + gaindBFactorUp * (float) n,
582 twDown, gaindBStartDown + gaindBFactorDown * (float) n);
583 }
584 }
585}
586
587template <typename SampleType>
589{
590 stages.clear();
591}
592
593//==============================================================================
594template <typename SampleType>
599
600template <typename SampleType>
602 float normalisedTransitionWidthUp,
603 float stopbandAmplitudedBUp,
604 float normalisedTransitionWidthDown,
605 float stopbandAmplitudedBDown)
606{
607 if (type == FilterType::filterHalfBandPolyphaseIIR)
608 {
609 stages.add (new Oversampling2TimesPolyphaseIIR<SampleType> (numChannels,
610 normalisedTransitionWidthUp, stopbandAmplitudedBUp,
611 normalisedTransitionWidthDown, stopbandAmplitudedBDown));
612 }
613 else
614 {
615 stages.add (new Oversampling2TimesEquirippleFIR<SampleType> (numChannels,
616 normalisedTransitionWidthUp, stopbandAmplitudedBUp,
617 normalisedTransitionWidthDown, stopbandAmplitudedBDown));
618 }
619
620 factorOversampling *= 2;
621}
622
623template <typename SampleType>
625{
626 stages.clear();
627 factorOversampling = 1u;
628}
629
630//==============================================================================
631template <typename SampleType>
632void Oversampling<SampleType>::setUsingIntegerLatency (bool useIntegerLatency) noexcept
633{
634 shouldUseIntegerLatency = useIntegerLatency;
635}
636
637template <typename SampleType>
639{
640 auto latency = getUncompensatedLatency();
641 return shouldUseIntegerLatency ? latency + fractionalDelay : latency;
642}
643
644template <typename SampleType>
646{
647 auto latency = static_cast<SampleType> (0);
648 size_t order = 1;
649
650 for (auto* stage : stages)
651 {
652 order *= stage->factor;
653 latency += stage->getLatencyInSamples() / static_cast<SampleType> (order);
654 }
655
656 return latency;
657}
658
659template <typename SampleType>
661{
662 return factorOversampling;
663}
664
665//==============================================================================
666template <typename SampleType>
667void Oversampling<SampleType>::initProcessing (size_t maximumNumberOfSamplesBeforeOversampling)
668{
669 jassert (! stages.isEmpty());
670 auto currentNumSamples = maximumNumberOfSamplesBeforeOversampling;
671
672 for (auto* stage : stages)
673 {
674 stage->initProcessing (currentNumSamples);
675 currentNumSamples *= stage->factor;
676 }
677
678 ProcessSpec spec = { 0.0, (uint32) maximumNumberOfSamplesBeforeOversampling, (uint32) numChannels };
679 delay.prepare (spec);
680 updateDelayLine();
681
682 isReady = true;
683 reset();
684}
685
686template <typename SampleType>
688{
689 jassert (! stages.isEmpty());
690
691 if (isReady)
692 for (auto* stage : stages)
693 stage->reset();
694
695 delay.reset();
696}
697
698template <typename SampleType>
700{
701 jassert (! stages.isEmpty());
702
703 if (! isReady)
704 return {};
705
706 auto* firstStage = stages.getUnchecked (0);
707 firstStage->processSamplesUp (inputBlock);
708 auto block = firstStage->getProcessedSamples (inputBlock.getNumSamples() * firstStage->factor);
709
710 for (int i = 1; i < stages.size(); ++i)
711 {
712 stages[i]->processSamplesUp (block);
713 block = stages[i]->getProcessedSamples (block.getNumSamples() * stages[i]->factor);
714 }
715
716 return block;
717}
718
719template <typename SampleType>
721{
722 jassert (! stages.isEmpty());
723
724 if (! isReady)
725 return;
726
727 auto currentNumSamples = outputBlock.getNumSamples();
728
729 for (int n = 0; n < stages.size() - 1; ++n)
730 currentNumSamples *= stages.getUnchecked (n)->factor;
731
732 for (int n = stages.size() - 1; n > 0; --n)
733 {
734 auto& stage = *stages.getUnchecked (n);
735 auto audioBlock = stages.getUnchecked (n - 1)->getProcessedSamples (currentNumSamples);
736 stage.processSamplesDown (audioBlock);
737
738 currentNumSamples /= stage.factor;
739 }
740
741 stages.getFirst()->processSamplesDown (outputBlock);
742
743 if (shouldUseIntegerLatency && fractionalDelay > static_cast<SampleType> (0.0))
744 {
745 auto context = ProcessContextReplacing<SampleType> (outputBlock);
746 delay.process (context);
747 }
748}
749
750template <typename SampleType>
752{
753 auto latency = getUncompensatedLatency();
754 fractionalDelay = static_cast<SampleType> (1.0) - (latency - std::floor (latency));
755
756 if (approximatelyEqual (fractionalDelay, static_cast<SampleType> (1.0)))
757 fractionalDelay = static_cast<SampleType> (0.0);
758 else if (fractionalDelay < static_cast<SampleType> (0.618))
759 fractionalDelay += static_cast<SampleType> (1.0);
760
761 delay.setDelay (fractionalDelay);
762}
763
764template class Oversampling<float>;
765template class Oversampling<double>;
766
767} // namespace juce::dsp
void setUnchecked(int indexToChange, ParameterType newValue)
Definition juce_Array.h:568
ElementType getUnchecked(int index) const
Definition juce_Array.h:252
int size() const noexcept
Definition juce_Array.h:215
void fill(const ParameterType &newValue) noexcept
Definition juce_Array.h:205
ElementType * getRawDataPointer() noexcept
Definition juce_Array.h:310
void add(const ElementType &newElement)
Definition juce_Array.h:418
void resize(int targetNumItems)
Definition juce_Array.h:670
void clear()
Definition juce_Array.h:188
void setSize(int newNumChannels, int newNumSamples, bool keepExistingContent=false, bool clearExtraSpace=false, bool avoidReallocating=false)
Type * getWritePointer(int channelNumber) noexcept
AudioBlock getSubBlock(size_t newOffset, size_t newLength) const noexcept
constexpr size_t getNumChannels() const noexcept
SampleType * getChannelPointer(size_t channel) const noexcept
AudioBlock & copyFrom(const AudioBlock< OtherSampleType > &src) noexcept
constexpr size_t getNumSamples() const noexcept
void processSamplesDown(AudioBlock< SampleType > &outputBlock) noexcept
void initProcessing(size_t maximumNumberOfSamplesBeforeOversampling)
SampleType getLatencyInSamples() const noexcept
AudioBlock< SampleType > processSamplesUp(const AudioBlock< const SampleType > &inputBlock) noexcept
Oversampling(size_t numChannels=1)
void addOversamplingStage(FilterType, float normalisedTransitionWidthUp, float stopbandAmplitudedBUp, float normalisedTransitionWidthDown, float stopbandAmplitudedBDown)
size_t getOversamplingFactor() const noexcept
Polynomial< FloatingType > getProductWith(const Polynomial< FloatingType > &other) const
size_t getFilterOrder() const noexcept
NumericType * getRawCoefficients() noexcept
static IIRPolyphaseAllpassStructure designIIRLowpassHalfBandPolyphaseAllpassMethod(FloatType normalisedTransitionWidth, FloatType stopbandAmplitudedB)
static FIRCoefficientsPtr designFIRLowpassHalfBandEquirippleMethod(FloatType normalisedTransitionWidth, FloatType amplitudedB)