blob: 435d676f13ac81dfffed1d72f620088369dd2bae [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org02d71742012-04-24 19:47:00 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Henrik Kjellanderff761fb2015-11-04 08:31:52 +010011#include "webrtc/modules/include/module_common_types.h"
12#include "webrtc/modules/utility/include/audio_frame_operations.h"
solenberg1c2af8e2016-03-24 10:36:00 -070013#include "webrtc/base/checks.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000014
15namespace webrtc {
solenberg1c2af8e2016-03-24 10:36:00 -070016namespace {
17
18// 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz.
19const size_t kMuteFadeFrames = 128;
20const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
21
22} // namespace {
niklase@google.com470e71d2011-07-07 08:21:25 +000023
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000024void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
Peter Kastingdce40cf2015-08-24 14:52:23 -070025 size_t samples_per_channel,
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000026 int16_t* dst_audio) {
Peter Kastingdce40cf2015-08-24 14:52:23 -070027 for (size_t i = 0; i < samples_per_channel; i++) {
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000028 dst_audio[2 * i] = src_audio[i];
29 dst_audio[2 * i + 1] = src_audio[i];
30 }
31}
32
33int AudioFrameOperations::MonoToStereo(AudioFrame* frame) {
34 if (frame->num_channels_ != 1) {
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000035 return -1;
36 }
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000037 if ((frame->samples_per_channel_ * 2) >= AudioFrame::kMaxDataSizeSamples) {
38 // Not enough memory to expand from mono to stereo.
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000039 return -1;
40 }
niklase@google.com470e71d2011-07-07 08:21:25 +000041
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000042 int16_t data_copy[AudioFrame::kMaxDataSizeSamples];
43 memcpy(data_copy, frame->data_,
44 sizeof(int16_t) * frame->samples_per_channel_);
45 MonoToStereo(data_copy, frame->samples_per_channel_, frame->data_);
46 frame->num_channels_ = 2;
niklase@google.com470e71d2011-07-07 08:21:25 +000047
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000048 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +000049}
50
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000051void AudioFrameOperations::StereoToMono(const int16_t* src_audio,
Peter Kastingdce40cf2015-08-24 14:52:23 -070052 size_t samples_per_channel,
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000053 int16_t* dst_audio) {
Peter Kastingdce40cf2015-08-24 14:52:23 -070054 for (size_t i = 0; i < samples_per_channel; i++) {
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000055 dst_audio[i] = (src_audio[2 * i] + src_audio[2 * i + 1]) >> 1;
56 }
57}
58
59int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
60 if (frame->num_channels_ != 2) {
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000061 return -1;
62 }
niklase@google.com470e71d2011-07-07 08:21:25 +000063
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000064 StereoToMono(frame->data_, frame->samples_per_channel_, frame->data_);
65 frame->num_channels_ = 1;
niklase@google.com470e71d2011-07-07 08:21:25 +000066
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000067 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +000068}
69
andrew@webrtc.org02d71742012-04-24 19:47:00 +000070void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +000071 if (frame->num_channels_ != 2) return;
andrew@webrtc.org1c7bfe02012-04-26 00:20:28 +000072
Peter Kastingdce40cf2015-08-24 14:52:23 -070073 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +000074 int16_t temp_data = frame->data_[i];
75 frame->data_[i] = frame->data_[i + 1];
76 frame->data_[i + 1] = temp_data;
andrew@webrtc.org02d71742012-04-24 19:47:00 +000077 }
78}
79
solenberg1c2af8e2016-03-24 10:36:00 -070080void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted,
81 bool current_frame_muted) {
82 RTC_DCHECK(frame);
83 RTC_DCHECK(frame->interleaved_);
84 if (!previous_frame_muted && !current_frame_muted) {
85 // Not muted, don't touch.
86 } else if (previous_frame_muted && current_frame_muted) {
87 // Frame fully muted.
88 size_t total_samples = frame->samples_per_channel_ * frame->num_channels_;
89 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples);
90 memset(frame->data_, 0, sizeof(frame->data_[0]) * total_samples);
91 } else {
92 // Limit number of samples to fade, if frame isn't long enough.
93 size_t count = kMuteFadeFrames;
94 float inc = kMuteFadeInc;
95 if (frame->samples_per_channel_ < kMuteFadeFrames) {
96 count = frame->samples_per_channel_;
97 if (count > 0) {
98 inc = 1.0f / count;
99 }
100 }
101
102 size_t start = 0;
103 size_t end = count;
104 float start_g = 0.0f;
105 if (current_frame_muted) {
106 // Fade out the last |count| samples of frame.
107 RTC_DCHECK(!previous_frame_muted);
108 start = frame->samples_per_channel_ - count;
109 end = frame->samples_per_channel_;
110 start_g = 1.0f;
111 inc = -inc;
112 } else {
113 // Fade in the first |count| samples of frame.
114 RTC_DCHECK(previous_frame_muted);
115 }
116
117 // Perform fade.
118 size_t channels = frame->num_channels_;
119 for (size_t j = 0; j < channels; ++j) {
120 float g = start_g;
121 for (size_t i = start * channels; i < end * channels; i += channels) {
122 g += inc;
123 frame->data_[i + j] *= g;
124 }
125 }
126 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000127}
128
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000129int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000130 if (frame.num_channels_ != 2) {
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000131 return -1;
132 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000133
Peter Kastingdce40cf2015-08-24 14:52:23 -0700134 for (size_t i = 0; i < frame.samples_per_channel_; i++) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000135 frame.data_[2 * i] =
136 static_cast<int16_t>(left * frame.data_[2 * i]);
137 frame.data_[2 * i + 1] =
138 static_cast<int16_t>(right * frame.data_[2 * i + 1]);
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000139 }
140 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +0000141}
142
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000143int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
144 int32_t temp_data = 0;
niklase@google.com470e71d2011-07-07 08:21:25 +0000145
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000146 // Ensure that the output result is saturated [-32768, +32767].
Peter Kastingdce40cf2015-08-24 14:52:23 -0700147 for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000148 i++) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000149 temp_data = static_cast<int32_t>(scale * frame.data_[i]);
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000150 if (temp_data < -32768) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000151 frame.data_[i] = -32768;
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000152 } else if (temp_data > 32767) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000153 frame.data_[i] = 32767;
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000154 } else {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000155 frame.data_[i] = static_cast<int16_t>(temp_data);
niklase@google.com470e71d2011-07-07 08:21:25 +0000156 }
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000157 }
158 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +0000159}
160
pbos@webrtc.orgd900e8b2013-07-03 15:12:26 +0000161} // namespace webrtc