henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1 | // libjingle |
| 2 | // Copyright 2010 Google Inc. |
| 3 | // |
| 4 | // Redistribution and use in source and binary forms, with or without |
| 5 | // modification, are permitted provided that the following conditions are met: |
| 6 | // |
| 7 | // 1. Redistributions of source code must retain the above copyright notice, |
| 8 | // this list of conditions and the following disclaimer. |
| 9 | // 2. Redistributions in binary form must reproduce the above copyright notice, |
| 10 | // this list of conditions and the following disclaimer in the documentation |
| 11 | // and/or other materials provided with the distribution. |
| 12 | // 3. The name of the author may not be used to endorse or promote products |
| 13 | // derived from this software without specific prior written permission. |
| 14 | // |
| 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 16 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 17 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
| 18 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 19 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 20 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
| 21 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 22 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
| 23 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
| 24 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 | |
| 26 | #include "talk/media/base/videoadapter.h" |
| 27 | |
| 28 | #include <limits.h> // For INT_MAX |
| 29 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 30 | #include "talk/base/logging.h" |
| 31 | #include "talk/base/timeutils.h" |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 32 | #include "talk/media/base/constants.h" |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 33 | #include "talk/media/base/videoframe.h" |
| 34 | |
| 35 | namespace cricket { |
| 36 | |
| 37 | // TODO(fbarchard): Make downgrades settable |
| 38 | static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU. |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 39 | // The number of milliseconds of data to require before acting on cpu sampling |
| 40 | // information. |
| 41 | static const size_t kCpuLoadMinSampleTime = 5000; |
| 42 | // The amount of weight to give to each new cpu load sample. The lower the |
| 43 | // value, the slower we'll adapt to changing cpu conditions. |
| 44 | static const float kCpuLoadWeightCoefficient = 0.4f; |
| 45 | // The seed value for the cpu load moving average. |
| 46 | static const float kCpuLoadInitialAverage = 0.5f; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 47 | |
| 48 | // TODO(fbarchard): Consider making scale factor table settable, to allow |
| 49 | // application to select quality vs performance tradeoff. |
| 50 | // TODO(fbarchard): Add framerate scaling to tables for 1/2 framerate. |
| 51 | // List of scale factors that adapter will scale by. |
| 52 | #if defined(IOS) || defined(ANDROID) |
| 53 | // Mobile needs 1/4 scale for VGA (640 x 360) to QQVGA (160 x 90) |
| 54 | // or 1/4 scale for HVGA (480 x 270) to QQHVGA (120 x 67) |
| 55 | static const int kMinNumPixels = 120 * 67; |
| 56 | static float kScaleFactors[] = { |
| 57 | 1.f / 1.f, // Full size. |
| 58 | 3.f / 4.f, // 3/4 scale. |
| 59 | 1.f / 2.f, // 1/2 scale. |
| 60 | 3.f / 8.f, // 3/8 scale. |
| 61 | 1.f / 4.f, // 1/4 scale. |
| 62 | }; |
| 63 | #else |
| 64 | // Desktop needs 1/8 scale for HD (1280 x 720) to QQVGA (160 x 90) |
| 65 | static const int kMinNumPixels = 160 * 100; |
| 66 | static float kScaleFactors[] = { |
| 67 | 1.f / 1.f, // Full size. |
| 68 | 3.f / 4.f, // 3/4 scale. |
| 69 | 1.f / 2.f, // 1/2 scale. |
| 70 | 3.f / 8.f, // 3/8 scale. |
| 71 | 1.f / 4.f, // 1/4 scale. |
| 72 | 3.f / 16.f, // 3/16 scale. |
| 73 | 1.f / 8.f // 1/8 scale. |
| 74 | }; |
| 75 | #endif |
| 76 | |
| 77 | static const int kNumScaleFactors = ARRAY_SIZE(kScaleFactors); |
| 78 | |
| 79 | // Find the scale factor that, when applied to width and height, is closest |
| 80 | // to num_pixels. |
| 81 | float VideoAdapter::FindClosestScale(int width, int height, |
| 82 | int target_num_pixels) { |
| 83 | if (!target_num_pixels) { |
| 84 | return 0.f; |
| 85 | } |
| 86 | int best_distance = INT_MAX; |
| 87 | int best_index = kNumScaleFactors - 1; // Default to max scale. |
| 88 | for (int i = 0; i < kNumScaleFactors; ++i) { |
| 89 | int test_num_pixels = static_cast<int>(width * kScaleFactors[i] * |
| 90 | height * kScaleFactors[i]); |
| 91 | int diff = test_num_pixels - target_num_pixels; |
| 92 | if (diff < 0) { |
| 93 | diff = -diff; |
| 94 | } |
| 95 | if (diff < best_distance) { |
| 96 | best_distance = diff; |
| 97 | best_index = i; |
| 98 | if (best_distance == 0) { // Found exact match. |
| 99 | break; |
| 100 | } |
| 101 | } |
| 102 | } |
| 103 | return kScaleFactors[best_index]; |
| 104 | } |
| 105 | |
| 106 | // Finds the scale factor that, when applied to width and height, produces |
| 107 | // fewer than num_pixels. |
| 108 | float VideoAdapter::FindLowerScale(int width, int height, |
| 109 | int target_num_pixels) { |
| 110 | if (!target_num_pixels) { |
| 111 | return 0.f; |
| 112 | } |
| 113 | int best_distance = INT_MAX; |
| 114 | int best_index = kNumScaleFactors - 1; // Default to max scale. |
| 115 | for (int i = 0; i < kNumScaleFactors; ++i) { |
| 116 | int test_num_pixels = static_cast<int>(width * kScaleFactors[i] * |
| 117 | height * kScaleFactors[i]); |
| 118 | int diff = target_num_pixels - test_num_pixels; |
| 119 | if (diff >= 0 && diff < best_distance) { |
| 120 | best_distance = diff; |
| 121 | best_index = i; |
| 122 | if (best_distance == 0) { // Found exact match. |
| 123 | break; |
| 124 | } |
| 125 | } |
| 126 | } |
| 127 | return kScaleFactors[best_index]; |
| 128 | } |
| 129 | |
| 130 | // There are several frame sizes used by Adapter. This explains them |
| 131 | // input_format - set once by server to frame size expected from the camera. |
| 132 | // output_format - size that output would like to be. Includes framerate. |
| 133 | // output_num_pixels - size that output should be constrained to. Used to |
| 134 | // compute output_format from in_frame. |
| 135 | // in_frame - actual camera captured frame size, which is typically the same |
| 136 | // as input_format. This can also be rotated or cropped for aspect ratio. |
| 137 | // out_frame - actual frame output by adapter. Should be a direct scale of |
| 138 | // in_frame maintaining rotation and aspect ratio. |
| 139 | // OnOutputFormatRequest - server requests you send this resolution based on |
| 140 | // view requests. |
| 141 | // OnEncoderResolutionRequest - encoder requests you send this resolution based |
| 142 | // on bandwidth |
| 143 | // OnCpuLoadUpdated - cpu monitor requests you send this resolution based on |
| 144 | // cpu load. |
| 145 | |
| 146 | /////////////////////////////////////////////////////////////////////// |
| 147 | // Implementation of VideoAdapter |
| 148 | VideoAdapter::VideoAdapter() |
| 149 | : output_num_pixels_(INT_MAX), |
| 150 | black_output_(false), |
| 151 | is_black_(false), |
| 152 | interval_next_frame_(0) { |
| 153 | } |
| 154 | |
| 155 | VideoAdapter::~VideoAdapter() { |
| 156 | } |
| 157 | |
| 158 | void VideoAdapter::SetInputFormat(const VideoFrame& in_frame) { |
| 159 | talk_base::CritScope cs(&critical_section_); |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 160 | input_format_.width = static_cast<int>(in_frame.GetWidth()); |
| 161 | input_format_.height = static_cast<int>(in_frame.GetHeight()); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | void VideoAdapter::SetInputFormat(const VideoFormat& format) { |
| 165 | talk_base::CritScope cs(&critical_section_); |
| 166 | input_format_ = format; |
| 167 | output_format_.interval = talk_base::_max( |
| 168 | output_format_.interval, input_format_.interval); |
| 169 | } |
| 170 | |
| 171 | void VideoAdapter::SetOutputFormat(const VideoFormat& format) { |
| 172 | talk_base::CritScope cs(&critical_section_); |
| 173 | output_format_ = format; |
| 174 | output_num_pixels_ = output_format_.width * output_format_.height; |
| 175 | output_format_.interval = talk_base::_max( |
| 176 | output_format_.interval, input_format_.interval); |
| 177 | } |
| 178 | |
| 179 | const VideoFormat& VideoAdapter::input_format() { |
| 180 | talk_base::CritScope cs(&critical_section_); |
| 181 | return input_format_; |
| 182 | } |
| 183 | |
| 184 | const VideoFormat& VideoAdapter::output_format() { |
| 185 | talk_base::CritScope cs(&critical_section_); |
| 186 | return output_format_; |
| 187 | } |
| 188 | |
| 189 | void VideoAdapter::SetBlackOutput(bool black) { |
| 190 | talk_base::CritScope cs(&critical_section_); |
| 191 | black_output_ = black; |
| 192 | } |
| 193 | |
| 194 | // Constrain output resolution to this many pixels overall |
| 195 | void VideoAdapter::SetOutputNumPixels(int num_pixels) { |
| 196 | output_num_pixels_ = num_pixels; |
| 197 | } |
| 198 | |
| 199 | int VideoAdapter::GetOutputNumPixels() const { |
| 200 | return output_num_pixels_; |
| 201 | } |
| 202 | |
| 203 | // TODO(fbarchard): Add AdaptFrameRate function that only drops frames but |
| 204 | // not resolution. |
| 205 | bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, |
| 206 | const VideoFrame** out_frame) { |
| 207 | talk_base::CritScope cs(&critical_section_); |
| 208 | if (!in_frame || !out_frame) { |
| 209 | return false; |
| 210 | } |
| 211 | |
| 212 | // Update input to actual frame dimensions. |
| 213 | SetInputFormat(*in_frame); |
| 214 | |
| 215 | // Drop the input frame if necessary. |
| 216 | bool should_drop = false; |
| 217 | if (!output_num_pixels_) { |
| 218 | // Drop all frames as the output format is 0x0. |
| 219 | should_drop = true; |
| 220 | } else { |
| 221 | // Drop some frames based on input fps and output fps. |
| 222 | // Normally output fps is less than input fps. |
| 223 | // TODO(fbarchard): Consider adjusting interval to reflect the adjusted |
| 224 | // interval between frames after dropping some frames. |
| 225 | interval_next_frame_ += input_format_.interval; |
| 226 | if (output_format_.interval > 0) { |
| 227 | if (interval_next_frame_ >= output_format_.interval) { |
| 228 | interval_next_frame_ %= output_format_.interval; |
| 229 | } else { |
| 230 | should_drop = true; |
| 231 | } |
| 232 | } |
| 233 | } |
| 234 | if (should_drop) { |
| 235 | *out_frame = NULL; |
| 236 | return true; |
| 237 | } |
| 238 | |
| 239 | if (output_num_pixels_) { |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 240 | float scale = VideoAdapter::FindClosestScale( |
| 241 | static_cast<int>(in_frame->GetWidth()), |
| 242 | static_cast<int>(in_frame->GetHeight()), |
| 243 | output_num_pixels_); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 244 | output_format_.width = static_cast<int>(in_frame->GetWidth() * scale + .5f); |
| 245 | output_format_.height = static_cast<int>(in_frame->GetHeight() * scale + |
| 246 | .5f); |
| 247 | } |
| 248 | |
| 249 | if (!StretchToOutputFrame(in_frame)) { |
| 250 | return false; |
| 251 | } |
| 252 | |
| 253 | *out_frame = output_frame_.get(); |
| 254 | return true; |
| 255 | } |
| 256 | |
| 257 | bool VideoAdapter::StretchToOutputFrame(const VideoFrame* in_frame) { |
| 258 | int output_width = output_format_.width; |
| 259 | int output_height = output_format_.height; |
| 260 | |
| 261 | // Create and stretch the output frame if it has not been created yet or its |
| 262 | // size is not same as the expected. |
| 263 | bool stretched = false; |
| 264 | if (!output_frame_ || |
| 265 | output_frame_->GetWidth() != static_cast<size_t>(output_width) || |
| 266 | output_frame_->GetHeight() != static_cast<size_t>(output_height)) { |
| 267 | output_frame_.reset( |
| 268 | in_frame->Stretch(output_width, output_height, true, true)); |
| 269 | if (!output_frame_) { |
| 270 | LOG(LS_WARNING) << "Adapter failed to stretch frame to " |
| 271 | << output_width << "x" << output_height; |
| 272 | return false; |
| 273 | } |
| 274 | stretched = true; |
| 275 | is_black_ = false; |
| 276 | } |
| 277 | |
| 278 | if (!black_output_) { |
| 279 | if (!stretched) { |
| 280 | // The output frame does not need to be blacken and has not been stretched |
| 281 | // from the input frame yet, stretch the input frame. This is the most |
| 282 | // common case. |
| 283 | in_frame->StretchToFrame(output_frame_.get(), true, true); |
| 284 | } |
| 285 | is_black_ = false; |
| 286 | } else { |
| 287 | if (!is_black_) { |
| 288 | output_frame_->SetToBlack(); |
| 289 | is_black_ = true; |
| 290 | } |
| 291 | output_frame_->SetElapsedTime(in_frame->GetElapsedTime()); |
| 292 | output_frame_->SetTimeStamp(in_frame->GetTimeStamp()); |
| 293 | } |
| 294 | |
| 295 | return true; |
| 296 | } |
| 297 | |
| 298 | /////////////////////////////////////////////////////////////////////// |
| 299 | // Implementation of CoordinatedVideoAdapter |
| 300 | CoordinatedVideoAdapter::CoordinatedVideoAdapter() |
| 301 | : cpu_adaptation_(false), |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 302 | cpu_smoothing_(false), |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 303 | gd_adaptation_(true), |
| 304 | view_adaptation_(true), |
| 305 | view_switch_(false), |
| 306 | cpu_downgrade_count_(0), |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 307 | cpu_adapt_wait_time_(0), |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 308 | high_system_threshold_(kHighSystemCpuThreshold), |
| 309 | low_system_threshold_(kLowSystemCpuThreshold), |
| 310 | process_threshold_(kProcessCpuThreshold), |
| 311 | view_desired_num_pixels_(INT_MAX), |
| 312 | view_desired_interval_(0), |
| 313 | encoder_desired_num_pixels_(INT_MAX), |
| 314 | cpu_desired_num_pixels_(INT_MAX), |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 315 | adapt_reason_(0), |
| 316 | system_load_average_(kCpuLoadInitialAverage) { |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | // Helper function to UPGRADE or DOWNGRADE a number of pixels |
| 320 | void CoordinatedVideoAdapter::StepPixelCount( |
| 321 | CoordinatedVideoAdapter::AdaptRequest request, |
| 322 | int* num_pixels) { |
| 323 | switch (request) { |
| 324 | case CoordinatedVideoAdapter::DOWNGRADE: |
| 325 | *num_pixels /= 2; |
| 326 | break; |
| 327 | |
| 328 | case CoordinatedVideoAdapter::UPGRADE: |
| 329 | *num_pixels *= 2; |
| 330 | break; |
| 331 | |
| 332 | default: // No change in pixel count |
| 333 | break; |
| 334 | } |
| 335 | return; |
| 336 | } |
| 337 | |
| 338 | // Find the adaptation request of the cpu based on the load. Return UPGRADE if |
| 339 | // the load is low, DOWNGRADE if the load is high, and KEEP otherwise. |
| 340 | CoordinatedVideoAdapter::AdaptRequest CoordinatedVideoAdapter::FindCpuRequest( |
| 341 | int current_cpus, int max_cpus, |
| 342 | float process_load, float system_load) { |
| 343 | // Downgrade if system is high and plugin is at least more than midrange. |
| 344 | if (system_load >= high_system_threshold_ * max_cpus && |
| 345 | process_load >= process_threshold_ * current_cpus) { |
| 346 | return CoordinatedVideoAdapter::DOWNGRADE; |
| 347 | // Upgrade if system is low. |
| 348 | } else if (system_load < low_system_threshold_ * max_cpus) { |
| 349 | return CoordinatedVideoAdapter::UPGRADE; |
| 350 | } |
| 351 | return CoordinatedVideoAdapter::KEEP; |
| 352 | } |
| 353 | |
| 354 | // A remote view request for a new resolution. |
| 355 | void CoordinatedVideoAdapter::OnOutputFormatRequest(const VideoFormat& format) { |
| 356 | talk_base::CritScope cs(&request_critical_section_); |
| 357 | if (!view_adaptation_) { |
| 358 | return; |
| 359 | } |
| 360 | // Set output for initial aspect ratio in mediachannel unittests. |
| 361 | int old_num_pixels = GetOutputNumPixels(); |
| 362 | SetOutputFormat(format); |
| 363 | SetOutputNumPixels(old_num_pixels); |
| 364 | view_desired_num_pixels_ = format.width * format.height; |
| 365 | view_desired_interval_ = format.interval; |
| 366 | int new_width, new_height; |
| 367 | bool changed = AdaptToMinimumFormat(&new_width, &new_height); |
| 368 | LOG(LS_INFO) << "VAdapt View Request: " |
| 369 | << format.width << "x" << format.height |
| 370 | << " Pixels: " << view_desired_num_pixels_ |
| 371 | << " Changed: " << (changed ? "true" : "false") |
| 372 | << " To: " << new_width << "x" << new_height; |
| 373 | } |
| 374 | |
| 375 | // A Bandwidth GD request for new resolution |
| 376 | void CoordinatedVideoAdapter::OnEncoderResolutionRequest( |
| 377 | int width, int height, AdaptRequest request) { |
| 378 | talk_base::CritScope cs(&request_critical_section_); |
| 379 | if (!gd_adaptation_) { |
| 380 | return; |
| 381 | } |
| 382 | int old_encoder_desired_num_pixels = encoder_desired_num_pixels_; |
| 383 | if (KEEP != request) { |
| 384 | int new_encoder_desired_num_pixels = width * height; |
| 385 | int old_num_pixels = GetOutputNumPixels(); |
| 386 | if (new_encoder_desired_num_pixels != old_num_pixels) { |
| 387 | LOG(LS_VERBOSE) << "VAdapt GD resolution stale. Ignored"; |
| 388 | } else { |
| 389 | // Update the encoder desired format based on the request. |
| 390 | encoder_desired_num_pixels_ = new_encoder_desired_num_pixels; |
| 391 | StepPixelCount(request, &encoder_desired_num_pixels_); |
| 392 | } |
| 393 | } |
| 394 | int new_width, new_height; |
| 395 | bool changed = AdaptToMinimumFormat(&new_width, &new_height); |
| 396 | |
| 397 | // Ignore up or keep if no change. |
| 398 | if (DOWNGRADE != request && view_switch_ && !changed) { |
| 399 | encoder_desired_num_pixels_ = old_encoder_desired_num_pixels; |
| 400 | LOG(LS_VERBOSE) << "VAdapt ignoring GD request."; |
| 401 | } |
| 402 | |
| 403 | LOG(LS_INFO) << "VAdapt GD Request: " |
| 404 | << (DOWNGRADE == request ? "down" : |
| 405 | (UPGRADE == request ? "up" : "keep")) |
| 406 | << " From: " << width << "x" << height |
| 407 | << " Pixels: " << encoder_desired_num_pixels_ |
| 408 | << " Changed: " << (changed ? "true" : "false") |
| 409 | << " To: " << new_width << "x" << new_height; |
| 410 | } |
| 411 | |
| 412 | // A CPU request for new resolution |
| 413 | void CoordinatedVideoAdapter::OnCpuLoadUpdated( |
| 414 | int current_cpus, int max_cpus, float process_load, float system_load) { |
| 415 | talk_base::CritScope cs(&request_critical_section_); |
| 416 | if (!cpu_adaptation_) { |
| 417 | return; |
| 418 | } |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 419 | // Update the moving average of system load. Even if we aren't smoothing, |
| 420 | // we'll still calculate this information, in case smoothing is later enabled. |
| 421 | system_load_average_ = kCpuLoadWeightCoefficient * system_load + |
| 422 | (1.0f - kCpuLoadWeightCoefficient) * system_load_average_; |
| 423 | if (cpu_smoothing_) { |
| 424 | system_load = system_load_average_; |
| 425 | } |
| 426 | // If we haven't started taking samples yet, wait until we have at least |
| 427 | // the correct number of samples per the wait time. |
| 428 | if (cpu_adapt_wait_time_ == 0) { |
| 429 | cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime); |
| 430 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 431 | AdaptRequest request = FindCpuRequest(current_cpus, max_cpus, |
| 432 | process_load, system_load); |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 433 | // Make sure we're not adapting too quickly. |
| 434 | if (request != KEEP) { |
| 435 | if (talk_base::TimeIsLater(talk_base::Time(), |
| 436 | cpu_adapt_wait_time_)) { |
| 437 | LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until " |
| 438 | << talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms"; |
| 439 | request = KEEP; |
| 440 | } |
| 441 | } |
| 442 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 443 | // Update how many times we have downgraded due to the cpu load. |
| 444 | switch (request) { |
| 445 | case DOWNGRADE: |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 446 | // Ignore downgrades if we have downgraded the maximum times. |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 447 | if (cpu_downgrade_count_ < kMaxCpuDowngrades) { |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 448 | ++cpu_downgrade_count_; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 449 | } else { |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 450 | LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade " |
| 451 | "because maximum downgrades reached"; |
| 452 | SignalCpuAdaptationUnable(); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 453 | } |
| 454 | break; |
| 455 | case UPGRADE: |
| 456 | if (cpu_downgrade_count_ > 0) { |
| 457 | bool is_min = IsMinimumFormat(cpu_desired_num_pixels_); |
| 458 | if (is_min) { |
| 459 | --cpu_downgrade_count_; |
| 460 | } else { |
| 461 | LOG(LS_VERBOSE) << "VAdapt CPU load low but do not upgrade " |
| 462 | "because cpu is not limiting resolution"; |
| 463 | } |
| 464 | } else { |
| 465 | LOG(LS_VERBOSE) << "VAdapt CPU load low but do not upgrade " |
| 466 | "because minimum downgrades reached"; |
| 467 | } |
| 468 | break; |
| 469 | case KEEP: |
| 470 | default: |
| 471 | break; |
| 472 | } |
| 473 | if (KEEP != request) { |
| 474 | // TODO(fbarchard): compute stepping up/down from OutputNumPixels but |
| 475 | // clamp to inputpixels / 4 (2 steps) |
| 476 | cpu_desired_num_pixels_ = cpu_downgrade_count_ == 0 ? INT_MAX : |
| 477 | static_cast<int>(input_format().width * input_format().height >> |
| 478 | cpu_downgrade_count_); |
| 479 | } |
| 480 | int new_width, new_height; |
| 481 | bool changed = AdaptToMinimumFormat(&new_width, &new_height); |
| 482 | LOG(LS_INFO) << "VAdapt CPU Request: " |
| 483 | << (DOWNGRADE == request ? "down" : |
| 484 | (UPGRADE == request ? "up" : "keep")) |
| 485 | << " Process: " << process_load |
| 486 | << " System: " << system_load |
| 487 | << " Steps: " << cpu_downgrade_count_ |
| 488 | << " Changed: " << (changed ? "true" : "false") |
| 489 | << " To: " << new_width << "x" << new_height; |
| 490 | } |
| 491 | |
| 492 | // Called by cpu adapter on up requests. |
| 493 | bool CoordinatedVideoAdapter::IsMinimumFormat(int pixels) { |
| 494 | // Find closest scale factor that matches input resolution to min_num_pixels |
| 495 | // and set that for output resolution. This is not needed for VideoAdapter, |
| 496 | // but provides feedback to unittests and users on expected resolution. |
| 497 | // Actual resolution is based on input frame. |
| 498 | VideoFormat new_output = output_format(); |
| 499 | VideoFormat input = input_format(); |
| 500 | if (input_format().IsSize0x0()) { |
| 501 | input = new_output; |
| 502 | } |
| 503 | float scale = 1.0f; |
| 504 | if (!input.IsSize0x0()) { |
| 505 | scale = FindClosestScale(input.width, |
| 506 | input.height, |
| 507 | pixels); |
| 508 | } |
| 509 | new_output.width = static_cast<int>(input.width * scale + .5f); |
| 510 | new_output.height = static_cast<int>(input.height * scale + .5f); |
| 511 | int new_pixels = new_output.width * new_output.height; |
| 512 | int num_pixels = GetOutputNumPixels(); |
| 513 | return new_pixels <= num_pixels; |
| 514 | } |
| 515 | |
| 516 | // Called by all coordinators when there is a change. |
| 517 | bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width, |
| 518 | int* new_height) { |
| 519 | VideoFormat new_output = output_format(); |
| 520 | VideoFormat input = input_format(); |
| 521 | if (input_format().IsSize0x0()) { |
| 522 | input = new_output; |
| 523 | } |
| 524 | int old_num_pixels = GetOutputNumPixels(); |
| 525 | // Find resolution that respects ViewRequest or less pixels. |
| 526 | int view_desired_num_pixels = view_desired_num_pixels_; |
| 527 | int min_num_pixels = view_desired_num_pixels_; |
| 528 | if (!input.IsSize0x0()) { |
| 529 | float scale = FindLowerScale(input.width, input.height, min_num_pixels); |
| 530 | min_num_pixels = view_desired_num_pixels = |
| 531 | static_cast<int>(input.width * input.height * scale * scale + .5f); |
| 532 | } |
| 533 | // Reduce resolution further, if necessary, based on encoder bandwidth (GD). |
| 534 | if (encoder_desired_num_pixels_ && |
| 535 | (encoder_desired_num_pixels_ < min_num_pixels)) { |
| 536 | min_num_pixels = encoder_desired_num_pixels_; |
| 537 | } |
| 538 | // Reduce resolution further, if necessary, based on CPU. |
| 539 | if (cpu_adaptation_ && cpu_desired_num_pixels_ && |
| 540 | (cpu_desired_num_pixels_ < min_num_pixels)) { |
| 541 | min_num_pixels = cpu_desired_num_pixels_; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 542 | } |
| 543 | |
| 544 | // Determine which factors are keeping adapter resolution low. |
| 545 | // Caveat: Does not consider framerate. |
| 546 | adapt_reason_ = static_cast<AdaptReason>(0); |
| 547 | if (view_desired_num_pixels == min_num_pixels) { |
| 548 | adapt_reason_ |= ADAPTREASON_VIEW; |
| 549 | } |
| 550 | if (encoder_desired_num_pixels_ == min_num_pixels) { |
| 551 | adapt_reason_ |= ADAPTREASON_BANDWIDTH; |
| 552 | } |
| 553 | if (cpu_desired_num_pixels_ == min_num_pixels) { |
| 554 | adapt_reason_ |= ADAPTREASON_CPU; |
| 555 | } |
| 556 | |
| 557 | // Prevent going below QQVGA. |
| 558 | if (min_num_pixels > 0 && min_num_pixels < kMinNumPixels) { |
| 559 | min_num_pixels = kMinNumPixels; |
| 560 | } |
| 561 | SetOutputNumPixels(min_num_pixels); |
| 562 | |
| 563 | // Find closest scale factor that matches input resolution to min_num_pixels |
| 564 | // and set that for output resolution. This is not needed for VideoAdapter, |
| 565 | // but provides feedback to unittests and users on expected resolution. |
| 566 | // Actual resolution is based on input frame. |
| 567 | float scale = 1.0f; |
| 568 | if (!input.IsSize0x0()) { |
| 569 | scale = FindClosestScale(input.width, input.height, min_num_pixels); |
| 570 | } |
| 571 | if (scale == 1.0f) { |
| 572 | adapt_reason_ = 0; |
| 573 | } |
| 574 | *new_width = new_output.width = static_cast<int>(input.width * scale + .5f); |
| 575 | *new_height = new_output.height = static_cast<int>(input.height * scale + |
| 576 | .5f); |
| 577 | new_output.interval = view_desired_interval_; |
| 578 | SetOutputFormat(new_output); |
| 579 | int new_num_pixels = GetOutputNumPixels(); |
| 580 | bool changed = new_num_pixels != old_num_pixels; |
| 581 | |
| 582 | static const char* kReasons[8] = { |
| 583 | "None", |
| 584 | "CPU", |
| 585 | "BANDWIDTH", |
| 586 | "CPU+BANDWIDTH", |
| 587 | "VIEW", |
| 588 | "CPU+VIEW", |
| 589 | "BANDWIDTH+VIEW", |
| 590 | "CPU+BANDWIDTH+VIEW", |
| 591 | }; |
| 592 | |
| 593 | LOG(LS_VERBOSE) << "VAdapt Status View: " << view_desired_num_pixels_ |
| 594 | << " GD: " << encoder_desired_num_pixels_ |
| 595 | << " CPU: " << cpu_desired_num_pixels_ |
| 596 | << " Pixels: " << min_num_pixels |
| 597 | << " Input: " << input.width |
| 598 | << "x" << input.height |
| 599 | << " Scale: " << scale |
| 600 | << " Resolution: " << new_output.width |
| 601 | << "x" << new_output.height |
| 602 | << " Changed: " << (changed ? "true" : "false") |
| 603 | << " Reason: " << kReasons[adapt_reason_]; |
henrike@webrtc.org | 28654cb | 2013-07-22 21:07:49 +0000 | [diff] [blame] | 604 | |
| 605 | if (changed) { |
| 606 | // When any adaptation occurs, historic CPU load levels are no longer |
| 607 | // accurate. Clear out our state so we can re-learn at the new normal. |
| 608 | cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime); |
| 609 | system_load_average_ = kCpuLoadInitialAverage; |
| 610 | } |
| 611 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 612 | return changed; |
| 613 | } |
| 614 | |
| 615 | } // namespace cricket |