blob: 102407d0f0f0fd223f6eaf9836dfa2b18278a5f7 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org02d71742012-04-24 19:47:00 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Henrik Kjellanderff761fb2015-11-04 08:31:52 +010011#include "webrtc/modules/include/module_common_types.h"
12#include "webrtc/modules/utility/include/audio_frame_operations.h"
solenberg1c2af8e2016-03-24 10:36:00 -070013#include "webrtc/base/checks.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000014
15namespace webrtc {
solenberg1c2af8e2016-03-24 10:36:00 -070016namespace {
17
18// 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz.
19const size_t kMuteFadeFrames = 128;
20const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
21
22} // namespace {
niklase@google.com470e71d2011-07-07 08:21:25 +000023
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000024void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
Peter Kastingdce40cf2015-08-24 14:52:23 -070025 size_t samples_per_channel,
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000026 int16_t* dst_audio) {
Peter Kastingdce40cf2015-08-24 14:52:23 -070027 for (size_t i = 0; i < samples_per_channel; i++) {
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000028 dst_audio[2 * i] = src_audio[i];
29 dst_audio[2 * i + 1] = src_audio[i];
30 }
31}
32
33int AudioFrameOperations::MonoToStereo(AudioFrame* frame) {
34 if (frame->num_channels_ != 1) {
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000035 return -1;
36 }
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000037 if ((frame->samples_per_channel_ * 2) >= AudioFrame::kMaxDataSizeSamples) {
38 // Not enough memory to expand from mono to stereo.
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000039 return -1;
40 }
niklase@google.com470e71d2011-07-07 08:21:25 +000041
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000042 int16_t data_copy[AudioFrame::kMaxDataSizeSamples];
43 memcpy(data_copy, frame->data_,
44 sizeof(int16_t) * frame->samples_per_channel_);
45 MonoToStereo(data_copy, frame->samples_per_channel_, frame->data_);
46 frame->num_channels_ = 2;
niklase@google.com470e71d2011-07-07 08:21:25 +000047
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000048 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +000049}
50
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000051void AudioFrameOperations::StereoToMono(const int16_t* src_audio,
Peter Kastingdce40cf2015-08-24 14:52:23 -070052 size_t samples_per_channel,
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000053 int16_t* dst_audio) {
Peter Kastingdce40cf2015-08-24 14:52:23 -070054 for (size_t i = 0; i < samples_per_channel; i++) {
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000055 dst_audio[i] = (src_audio[2 * i] + src_audio[2 * i + 1]) >> 1;
56 }
57}
58
59int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
60 if (frame->num_channels_ != 2) {
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000061 return -1;
62 }
niklase@google.com470e71d2011-07-07 08:21:25 +000063
andrew@webrtc.org4ecea3e2012-06-27 03:25:31 +000064 StereoToMono(frame->data_, frame->samples_per_channel_, frame->data_);
65 frame->num_channels_ = 1;
niklase@google.com470e71d2011-07-07 08:21:25 +000066
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +000067 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +000068}
69
andrew@webrtc.org02d71742012-04-24 19:47:00 +000070void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +000071 if (frame->num_channels_ != 2) return;
andrew@webrtc.org1c7bfe02012-04-26 00:20:28 +000072
Peter Kastingdce40cf2015-08-24 14:52:23 -070073 for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +000074 int16_t temp_data = frame->data_[i];
75 frame->data_[i] = frame->data_[i + 1];
76 frame->data_[i + 1] = temp_data;
andrew@webrtc.org02d71742012-04-24 19:47:00 +000077 }
78}
79
solenberg1c2af8e2016-03-24 10:36:00 -070080void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted,
81 bool current_frame_muted) {
82 RTC_DCHECK(frame);
solenberg1c2af8e2016-03-24 10:36:00 -070083 if (!previous_frame_muted && !current_frame_muted) {
84 // Not muted, don't touch.
85 } else if (previous_frame_muted && current_frame_muted) {
86 // Frame fully muted.
87 size_t total_samples = frame->samples_per_channel_ * frame->num_channels_;
88 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples);
89 memset(frame->data_, 0, sizeof(frame->data_[0]) * total_samples);
90 } else {
91 // Limit number of samples to fade, if frame isn't long enough.
92 size_t count = kMuteFadeFrames;
93 float inc = kMuteFadeInc;
94 if (frame->samples_per_channel_ < kMuteFadeFrames) {
95 count = frame->samples_per_channel_;
96 if (count > 0) {
97 inc = 1.0f / count;
98 }
99 }
100
101 size_t start = 0;
102 size_t end = count;
103 float start_g = 0.0f;
104 if (current_frame_muted) {
105 // Fade out the last |count| samples of frame.
106 RTC_DCHECK(!previous_frame_muted);
107 start = frame->samples_per_channel_ - count;
108 end = frame->samples_per_channel_;
109 start_g = 1.0f;
110 inc = -inc;
111 } else {
112 // Fade in the first |count| samples of frame.
113 RTC_DCHECK(previous_frame_muted);
114 }
115
116 // Perform fade.
117 size_t channels = frame->num_channels_;
118 for (size_t j = 0; j < channels; ++j) {
119 float g = start_g;
120 for (size_t i = start * channels; i < end * channels; i += channels) {
121 g += inc;
122 frame->data_[i + j] *= g;
123 }
124 }
125 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000126}
127
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000128int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000129 if (frame.num_channels_ != 2) {
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000130 return -1;
131 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000132
Peter Kastingdce40cf2015-08-24 14:52:23 -0700133 for (size_t i = 0; i < frame.samples_per_channel_; i++) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000134 frame.data_[2 * i] =
135 static_cast<int16_t>(left * frame.data_[2 * i]);
136 frame.data_[2 * i + 1] =
137 static_cast<int16_t>(right * frame.data_[2 * i + 1]);
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000138 }
139 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +0000140}
141
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000142int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
143 int32_t temp_data = 0;
niklase@google.com470e71d2011-07-07 08:21:25 +0000144
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000145 // Ensure that the output result is saturated [-32768, +32767].
Peter Kastingdce40cf2015-08-24 14:52:23 -0700146 for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000147 i++) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000148 temp_data = static_cast<int32_t>(scale * frame.data_[i]);
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000149 if (temp_data < -32768) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000150 frame.data_[i] = -32768;
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000151 } else if (temp_data > 32767) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000152 frame.data_[i] = 32767;
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000153 } else {
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000154 frame.data_[i] = static_cast<int16_t>(temp_data);
niklase@google.com470e71d2011-07-07 08:21:25 +0000155 }
andrew@webrtc.org9c4f6a52012-04-26 22:32:03 +0000156 }
157 return 0;
niklase@google.com470e71d2011-07-07 08:21:25 +0000158}
159
pbos@webrtc.orgd900e8b2013-07-03 15:12:26 +0000160} // namespace webrtc