henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1 | // libjingle |
| 2 | // Copyright 2010 Google Inc. |
| 3 | // |
| 4 | // Redistribution and use in source and binary forms, with or without |
| 5 | // modification, are permitted provided that the following conditions are met: |
| 6 | // |
| 7 | // 1. Redistributions of source code must retain the above copyright notice, |
| 8 | // this list of conditions and the following disclaimer. |
| 9 | // 2. Redistributions in binary form must reproduce the above copyright notice, |
| 10 | // this list of conditions and the following disclaimer in the documentation |
| 11 | // and/or other materials provided with the distribution. |
| 12 | // 3. The name of the author may not be used to endorse or promote products |
| 13 | // derived from this software without specific prior written permission. |
| 14 | // |
| 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 16 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 17 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
| 18 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 19 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 20 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
| 21 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 22 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
| 23 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
| 24 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 | |
| 26 | #include "talk/media/base/videoadapter.h" |
| 27 | |
| 28 | #include <limits.h> // For INT_MAX |
| 29 | |
| 30 | #include "talk/media/base/constants.h" |
| 31 | #include "talk/base/logging.h" |
| 32 | #include "talk/base/timeutils.h" |
| 33 | #include "talk/media/base/videoframe.h" |
| 34 | |
| 35 | namespace cricket { |
| 36 | |
| 37 | // TODO(fbarchard): Make downgrades settable |
| 38 | static const int kMaxCpuDowngrades = 2; // Downgrade at most 2 times for CPU. |
| 39 | static const int kDefaultDowngradeWaitTimeMs = 2000; |
| 40 | |
| 41 | // TODO(fbarchard): Consider making scale factor table settable, to allow |
| 42 | // application to select quality vs performance tradeoff. |
| 43 | // TODO(fbarchard): Add framerate scaling to tables for 1/2 framerate. |
| 44 | // List of scale factors that adapter will scale by. |
| 45 | #if defined(IOS) || defined(ANDROID) |
| 46 | // Mobile needs 1/4 scale for VGA (640 x 360) to QQVGA (160 x 90) |
| 47 | // or 1/4 scale for HVGA (480 x 270) to QQHVGA (120 x 67) |
| 48 | static const int kMinNumPixels = 120 * 67; |
| 49 | static float kScaleFactors[] = { |
| 50 | 1.f / 1.f, // Full size. |
| 51 | 3.f / 4.f, // 3/4 scale. |
| 52 | 1.f / 2.f, // 1/2 scale. |
| 53 | 3.f / 8.f, // 3/8 scale. |
| 54 | 1.f / 4.f, // 1/4 scale. |
| 55 | }; |
| 56 | #else |
| 57 | // Desktop needs 1/8 scale for HD (1280 x 720) to QQVGA (160 x 90) |
| 58 | static const int kMinNumPixels = 160 * 100; |
| 59 | static float kScaleFactors[] = { |
| 60 | 1.f / 1.f, // Full size. |
| 61 | 3.f / 4.f, // 3/4 scale. |
| 62 | 1.f / 2.f, // 1/2 scale. |
| 63 | 3.f / 8.f, // 3/8 scale. |
| 64 | 1.f / 4.f, // 1/4 scale. |
| 65 | 3.f / 16.f, // 3/16 scale. |
| 66 | 1.f / 8.f // 1/8 scale. |
| 67 | }; |
| 68 | #endif |
| 69 | |
| 70 | static const int kNumScaleFactors = ARRAY_SIZE(kScaleFactors); |
| 71 | |
| 72 | // Find the scale factor that, when applied to width and height, is closest |
| 73 | // to num_pixels. |
| 74 | float VideoAdapter::FindClosestScale(int width, int height, |
| 75 | int target_num_pixels) { |
| 76 | if (!target_num_pixels) { |
| 77 | return 0.f; |
| 78 | } |
| 79 | int best_distance = INT_MAX; |
| 80 | int best_index = kNumScaleFactors - 1; // Default to max scale. |
| 81 | for (int i = 0; i < kNumScaleFactors; ++i) { |
| 82 | int test_num_pixels = static_cast<int>(width * kScaleFactors[i] * |
| 83 | height * kScaleFactors[i]); |
| 84 | int diff = test_num_pixels - target_num_pixels; |
| 85 | if (diff < 0) { |
| 86 | diff = -diff; |
| 87 | } |
| 88 | if (diff < best_distance) { |
| 89 | best_distance = diff; |
| 90 | best_index = i; |
| 91 | if (best_distance == 0) { // Found exact match. |
| 92 | break; |
| 93 | } |
| 94 | } |
| 95 | } |
| 96 | return kScaleFactors[best_index]; |
| 97 | } |
| 98 | |
| 99 | // Finds the scale factor that, when applied to width and height, produces |
| 100 | // fewer than num_pixels. |
| 101 | float VideoAdapter::FindLowerScale(int width, int height, |
| 102 | int target_num_pixels) { |
| 103 | if (!target_num_pixels) { |
| 104 | return 0.f; |
| 105 | } |
| 106 | int best_distance = INT_MAX; |
| 107 | int best_index = kNumScaleFactors - 1; // Default to max scale. |
| 108 | for (int i = 0; i < kNumScaleFactors; ++i) { |
| 109 | int test_num_pixels = static_cast<int>(width * kScaleFactors[i] * |
| 110 | height * kScaleFactors[i]); |
| 111 | int diff = target_num_pixels - test_num_pixels; |
| 112 | if (diff >= 0 && diff < best_distance) { |
| 113 | best_distance = diff; |
| 114 | best_index = i; |
| 115 | if (best_distance == 0) { // Found exact match. |
| 116 | break; |
| 117 | } |
| 118 | } |
| 119 | } |
| 120 | return kScaleFactors[best_index]; |
| 121 | } |
| 122 | |
| 123 | // There are several frame sizes used by Adapter. This explains them |
| 124 | // input_format - set once by server to frame size expected from the camera. |
| 125 | // output_format - size that output would like to be. Includes framerate. |
| 126 | // output_num_pixels - size that output should be constrained to. Used to |
| 127 | // compute output_format from in_frame. |
| 128 | // in_frame - actual camera captured frame size, which is typically the same |
| 129 | // as input_format. This can also be rotated or cropped for aspect ratio. |
| 130 | // out_frame - actual frame output by adapter. Should be a direct scale of |
| 131 | // in_frame maintaining rotation and aspect ratio. |
| 132 | // OnOutputFormatRequest - server requests you send this resolution based on |
| 133 | // view requests. |
| 134 | // OnEncoderResolutionRequest - encoder requests you send this resolution based |
| 135 | // on bandwidth |
| 136 | // OnCpuLoadUpdated - cpu monitor requests you send this resolution based on |
| 137 | // cpu load. |
| 138 | |
| 139 | /////////////////////////////////////////////////////////////////////// |
| 140 | // Implementation of VideoAdapter |
| 141 | VideoAdapter::VideoAdapter() |
| 142 | : output_num_pixels_(INT_MAX), |
| 143 | black_output_(false), |
| 144 | is_black_(false), |
| 145 | interval_next_frame_(0) { |
| 146 | } |
| 147 | |
| 148 | VideoAdapter::~VideoAdapter() { |
| 149 | } |
| 150 | |
| 151 | void VideoAdapter::SetInputFormat(const VideoFrame& in_frame) { |
| 152 | talk_base::CritScope cs(&critical_section_); |
| 153 | input_format_.width = in_frame.GetWidth(); |
| 154 | input_format_.height = in_frame.GetHeight(); |
| 155 | } |
| 156 | |
| 157 | void VideoAdapter::SetInputFormat(const VideoFormat& format) { |
| 158 | talk_base::CritScope cs(&critical_section_); |
| 159 | input_format_ = format; |
| 160 | output_format_.interval = talk_base::_max( |
| 161 | output_format_.interval, input_format_.interval); |
| 162 | } |
| 163 | |
| 164 | void VideoAdapter::SetOutputFormat(const VideoFormat& format) { |
| 165 | talk_base::CritScope cs(&critical_section_); |
| 166 | output_format_ = format; |
| 167 | output_num_pixels_ = output_format_.width * output_format_.height; |
| 168 | output_format_.interval = talk_base::_max( |
| 169 | output_format_.interval, input_format_.interval); |
| 170 | } |
| 171 | |
| 172 | const VideoFormat& VideoAdapter::input_format() { |
| 173 | talk_base::CritScope cs(&critical_section_); |
| 174 | return input_format_; |
| 175 | } |
| 176 | |
| 177 | const VideoFormat& VideoAdapter::output_format() { |
| 178 | talk_base::CritScope cs(&critical_section_); |
| 179 | return output_format_; |
| 180 | } |
| 181 | |
| 182 | void VideoAdapter::SetBlackOutput(bool black) { |
| 183 | talk_base::CritScope cs(&critical_section_); |
| 184 | black_output_ = black; |
| 185 | } |
| 186 | |
| 187 | // Constrain output resolution to this many pixels overall |
| 188 | void VideoAdapter::SetOutputNumPixels(int num_pixels) { |
| 189 | output_num_pixels_ = num_pixels; |
| 190 | } |
| 191 | |
| 192 | int VideoAdapter::GetOutputNumPixels() const { |
| 193 | return output_num_pixels_; |
| 194 | } |
| 195 | |
| 196 | // TODO(fbarchard): Add AdaptFrameRate function that only drops frames but |
| 197 | // not resolution. |
| 198 | bool VideoAdapter::AdaptFrame(const VideoFrame* in_frame, |
| 199 | const VideoFrame** out_frame) { |
| 200 | talk_base::CritScope cs(&critical_section_); |
| 201 | if (!in_frame || !out_frame) { |
| 202 | return false; |
| 203 | } |
| 204 | |
| 205 | // Update input to actual frame dimensions. |
| 206 | SetInputFormat(*in_frame); |
| 207 | |
| 208 | // Drop the input frame if necessary. |
| 209 | bool should_drop = false; |
| 210 | if (!output_num_pixels_) { |
| 211 | // Drop all frames as the output format is 0x0. |
| 212 | should_drop = true; |
| 213 | } else { |
| 214 | // Drop some frames based on input fps and output fps. |
| 215 | // Normally output fps is less than input fps. |
| 216 | // TODO(fbarchard): Consider adjusting interval to reflect the adjusted |
| 217 | // interval between frames after dropping some frames. |
| 218 | interval_next_frame_ += input_format_.interval; |
| 219 | if (output_format_.interval > 0) { |
| 220 | if (interval_next_frame_ >= output_format_.interval) { |
| 221 | interval_next_frame_ %= output_format_.interval; |
| 222 | } else { |
| 223 | should_drop = true; |
| 224 | } |
| 225 | } |
| 226 | } |
| 227 | if (should_drop) { |
| 228 | *out_frame = NULL; |
| 229 | return true; |
| 230 | } |
| 231 | |
| 232 | if (output_num_pixels_) { |
| 233 | float scale = VideoAdapter::FindClosestScale(in_frame->GetWidth(), |
| 234 | in_frame->GetHeight(), |
| 235 | output_num_pixels_); |
| 236 | output_format_.width = static_cast<int>(in_frame->GetWidth() * scale + .5f); |
| 237 | output_format_.height = static_cast<int>(in_frame->GetHeight() * scale + |
| 238 | .5f); |
| 239 | } |
| 240 | |
| 241 | if (!StretchToOutputFrame(in_frame)) { |
| 242 | return false; |
| 243 | } |
| 244 | |
| 245 | *out_frame = output_frame_.get(); |
| 246 | return true; |
| 247 | } |
| 248 | |
| 249 | bool VideoAdapter::StretchToOutputFrame(const VideoFrame* in_frame) { |
| 250 | int output_width = output_format_.width; |
| 251 | int output_height = output_format_.height; |
| 252 | |
| 253 | // Create and stretch the output frame if it has not been created yet or its |
| 254 | // size is not same as the expected. |
| 255 | bool stretched = false; |
| 256 | if (!output_frame_ || |
| 257 | output_frame_->GetWidth() != static_cast<size_t>(output_width) || |
| 258 | output_frame_->GetHeight() != static_cast<size_t>(output_height)) { |
| 259 | output_frame_.reset( |
| 260 | in_frame->Stretch(output_width, output_height, true, true)); |
| 261 | if (!output_frame_) { |
| 262 | LOG(LS_WARNING) << "Adapter failed to stretch frame to " |
| 263 | << output_width << "x" << output_height; |
| 264 | return false; |
| 265 | } |
| 266 | stretched = true; |
| 267 | is_black_ = false; |
| 268 | } |
| 269 | |
| 270 | if (!black_output_) { |
| 271 | if (!stretched) { |
| 272 | // The output frame does not need to be blacken and has not been stretched |
| 273 | // from the input frame yet, stretch the input frame. This is the most |
| 274 | // common case. |
| 275 | in_frame->StretchToFrame(output_frame_.get(), true, true); |
| 276 | } |
| 277 | is_black_ = false; |
| 278 | } else { |
| 279 | if (!is_black_) { |
| 280 | output_frame_->SetToBlack(); |
| 281 | is_black_ = true; |
| 282 | } |
| 283 | output_frame_->SetElapsedTime(in_frame->GetElapsedTime()); |
| 284 | output_frame_->SetTimeStamp(in_frame->GetTimeStamp()); |
| 285 | } |
| 286 | |
| 287 | return true; |
| 288 | } |
| 289 | |
| 290 | /////////////////////////////////////////////////////////////////////// |
| 291 | // Implementation of CoordinatedVideoAdapter |
| 292 | CoordinatedVideoAdapter::CoordinatedVideoAdapter() |
| 293 | : cpu_adaptation_(false), |
| 294 | gd_adaptation_(true), |
| 295 | view_adaptation_(true), |
| 296 | view_switch_(false), |
| 297 | cpu_downgrade_count_(0), |
| 298 | cpu_downgrade_wait_time_(0), |
| 299 | high_system_threshold_(kHighSystemCpuThreshold), |
| 300 | low_system_threshold_(kLowSystemCpuThreshold), |
| 301 | process_threshold_(kProcessCpuThreshold), |
| 302 | view_desired_num_pixels_(INT_MAX), |
| 303 | view_desired_interval_(0), |
| 304 | encoder_desired_num_pixels_(INT_MAX), |
| 305 | cpu_desired_num_pixels_(INT_MAX), |
| 306 | adapt_reason_(0) { |
| 307 | } |
| 308 | |
| 309 | // Helper function to UPGRADE or DOWNGRADE a number of pixels |
| 310 | void CoordinatedVideoAdapter::StepPixelCount( |
| 311 | CoordinatedVideoAdapter::AdaptRequest request, |
| 312 | int* num_pixels) { |
| 313 | switch (request) { |
| 314 | case CoordinatedVideoAdapter::DOWNGRADE: |
| 315 | *num_pixels /= 2; |
| 316 | break; |
| 317 | |
| 318 | case CoordinatedVideoAdapter::UPGRADE: |
| 319 | *num_pixels *= 2; |
| 320 | break; |
| 321 | |
| 322 | default: // No change in pixel count |
| 323 | break; |
| 324 | } |
| 325 | return; |
| 326 | } |
| 327 | |
| 328 | // Find the adaptation request of the cpu based on the load. Return UPGRADE if |
| 329 | // the load is low, DOWNGRADE if the load is high, and KEEP otherwise. |
| 330 | CoordinatedVideoAdapter::AdaptRequest CoordinatedVideoAdapter::FindCpuRequest( |
| 331 | int current_cpus, int max_cpus, |
| 332 | float process_load, float system_load) { |
| 333 | // Downgrade if system is high and plugin is at least more than midrange. |
| 334 | if (system_load >= high_system_threshold_ * max_cpus && |
| 335 | process_load >= process_threshold_ * current_cpus) { |
| 336 | return CoordinatedVideoAdapter::DOWNGRADE; |
| 337 | // Upgrade if system is low. |
| 338 | } else if (system_load < low_system_threshold_ * max_cpus) { |
| 339 | return CoordinatedVideoAdapter::UPGRADE; |
| 340 | } |
| 341 | return CoordinatedVideoAdapter::KEEP; |
| 342 | } |
| 343 | |
| 344 | // A remote view request for a new resolution. |
| 345 | void CoordinatedVideoAdapter::OnOutputFormatRequest(const VideoFormat& format) { |
| 346 | talk_base::CritScope cs(&request_critical_section_); |
| 347 | if (!view_adaptation_) { |
| 348 | return; |
| 349 | } |
| 350 | // Set output for initial aspect ratio in mediachannel unittests. |
| 351 | int old_num_pixels = GetOutputNumPixels(); |
| 352 | SetOutputFormat(format); |
| 353 | SetOutputNumPixels(old_num_pixels); |
| 354 | view_desired_num_pixels_ = format.width * format.height; |
| 355 | view_desired_interval_ = format.interval; |
| 356 | int new_width, new_height; |
| 357 | bool changed = AdaptToMinimumFormat(&new_width, &new_height); |
| 358 | LOG(LS_INFO) << "VAdapt View Request: " |
| 359 | << format.width << "x" << format.height |
| 360 | << " Pixels: " << view_desired_num_pixels_ |
| 361 | << " Changed: " << (changed ? "true" : "false") |
| 362 | << " To: " << new_width << "x" << new_height; |
| 363 | } |
| 364 | |
| 365 | // A Bandwidth GD request for new resolution |
| 366 | void CoordinatedVideoAdapter::OnEncoderResolutionRequest( |
| 367 | int width, int height, AdaptRequest request) { |
| 368 | talk_base::CritScope cs(&request_critical_section_); |
| 369 | if (!gd_adaptation_) { |
| 370 | return; |
| 371 | } |
| 372 | int old_encoder_desired_num_pixels = encoder_desired_num_pixels_; |
| 373 | if (KEEP != request) { |
| 374 | int new_encoder_desired_num_pixels = width * height; |
| 375 | int old_num_pixels = GetOutputNumPixels(); |
| 376 | if (new_encoder_desired_num_pixels != old_num_pixels) { |
| 377 | LOG(LS_VERBOSE) << "VAdapt GD resolution stale. Ignored"; |
| 378 | } else { |
| 379 | // Update the encoder desired format based on the request. |
| 380 | encoder_desired_num_pixels_ = new_encoder_desired_num_pixels; |
| 381 | StepPixelCount(request, &encoder_desired_num_pixels_); |
| 382 | } |
| 383 | } |
| 384 | int new_width, new_height; |
| 385 | bool changed = AdaptToMinimumFormat(&new_width, &new_height); |
| 386 | |
| 387 | // Ignore up or keep if no change. |
| 388 | if (DOWNGRADE != request && view_switch_ && !changed) { |
| 389 | encoder_desired_num_pixels_ = old_encoder_desired_num_pixels; |
| 390 | LOG(LS_VERBOSE) << "VAdapt ignoring GD request."; |
| 391 | } |
| 392 | |
| 393 | LOG(LS_INFO) << "VAdapt GD Request: " |
| 394 | << (DOWNGRADE == request ? "down" : |
| 395 | (UPGRADE == request ? "up" : "keep")) |
| 396 | << " From: " << width << "x" << height |
| 397 | << " Pixels: " << encoder_desired_num_pixels_ |
| 398 | << " Changed: " << (changed ? "true" : "false") |
| 399 | << " To: " << new_width << "x" << new_height; |
| 400 | } |
| 401 | |
| 402 | // A CPU request for new resolution |
| 403 | void CoordinatedVideoAdapter::OnCpuLoadUpdated( |
| 404 | int current_cpus, int max_cpus, float process_load, float system_load) { |
| 405 | talk_base::CritScope cs(&request_critical_section_); |
| 406 | if (!cpu_adaptation_) { |
| 407 | return; |
| 408 | } |
| 409 | AdaptRequest request = FindCpuRequest(current_cpus, max_cpus, |
| 410 | process_load, system_load); |
| 411 | // Update how many times we have downgraded due to the cpu load. |
| 412 | switch (request) { |
| 413 | case DOWNGRADE: |
| 414 | if (cpu_downgrade_count_ < kMaxCpuDowngrades) { |
| 415 | // Ignore downgrades if we have downgraded the maximum times or we just |
| 416 | // downgraded in a short time. |
| 417 | if (cpu_downgrade_wait_time_ != 0 && |
| 418 | talk_base::TimeIsLater(talk_base::Time(), |
| 419 | cpu_downgrade_wait_time_)) { |
| 420 | LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade until " |
| 421 | << talk_base::TimeUntil(cpu_downgrade_wait_time_) |
| 422 | << " ms."; |
| 423 | request = KEEP; |
| 424 | } else { |
| 425 | ++cpu_downgrade_count_; |
| 426 | } |
| 427 | } else { |
| 428 | LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade " |
| 429 | "because maximum downgrades reached"; |
| 430 | SignalCpuAdaptationUnable(); |
| 431 | } |
| 432 | break; |
| 433 | case UPGRADE: |
| 434 | if (cpu_downgrade_count_ > 0) { |
| 435 | bool is_min = IsMinimumFormat(cpu_desired_num_pixels_); |
| 436 | if (is_min) { |
| 437 | --cpu_downgrade_count_; |
| 438 | } else { |
| 439 | LOG(LS_VERBOSE) << "VAdapt CPU load low but do not upgrade " |
| 440 | "because cpu is not limiting resolution"; |
| 441 | } |
| 442 | } else { |
| 443 | LOG(LS_VERBOSE) << "VAdapt CPU load low but do not upgrade " |
| 444 | "because minimum downgrades reached"; |
| 445 | } |
| 446 | break; |
| 447 | case KEEP: |
| 448 | default: |
| 449 | break; |
| 450 | } |
| 451 | if (KEEP != request) { |
| 452 | // TODO(fbarchard): compute stepping up/down from OutputNumPixels but |
| 453 | // clamp to inputpixels / 4 (2 steps) |
| 454 | cpu_desired_num_pixels_ = cpu_downgrade_count_ == 0 ? INT_MAX : |
| 455 | static_cast<int>(input_format().width * input_format().height >> |
| 456 | cpu_downgrade_count_); |
| 457 | } |
| 458 | int new_width, new_height; |
| 459 | bool changed = AdaptToMinimumFormat(&new_width, &new_height); |
| 460 | LOG(LS_INFO) << "VAdapt CPU Request: " |
| 461 | << (DOWNGRADE == request ? "down" : |
| 462 | (UPGRADE == request ? "up" : "keep")) |
| 463 | << " Process: " << process_load |
| 464 | << " System: " << system_load |
| 465 | << " Steps: " << cpu_downgrade_count_ |
| 466 | << " Changed: " << (changed ? "true" : "false") |
| 467 | << " To: " << new_width << "x" << new_height; |
| 468 | } |
| 469 | |
| 470 | // Called by cpu adapter on up requests. |
| 471 | bool CoordinatedVideoAdapter::IsMinimumFormat(int pixels) { |
| 472 | // Find closest scale factor that matches input resolution to min_num_pixels |
| 473 | // and set that for output resolution. This is not needed for VideoAdapter, |
| 474 | // but provides feedback to unittests and users on expected resolution. |
| 475 | // Actual resolution is based on input frame. |
| 476 | VideoFormat new_output = output_format(); |
| 477 | VideoFormat input = input_format(); |
| 478 | if (input_format().IsSize0x0()) { |
| 479 | input = new_output; |
| 480 | } |
| 481 | float scale = 1.0f; |
| 482 | if (!input.IsSize0x0()) { |
| 483 | scale = FindClosestScale(input.width, |
| 484 | input.height, |
| 485 | pixels); |
| 486 | } |
| 487 | new_output.width = static_cast<int>(input.width * scale + .5f); |
| 488 | new_output.height = static_cast<int>(input.height * scale + .5f); |
| 489 | int new_pixels = new_output.width * new_output.height; |
| 490 | int num_pixels = GetOutputNumPixels(); |
| 491 | return new_pixels <= num_pixels; |
| 492 | } |
| 493 | |
| 494 | // Called by all coordinators when there is a change. |
| 495 | bool CoordinatedVideoAdapter::AdaptToMinimumFormat(int* new_width, |
| 496 | int* new_height) { |
| 497 | VideoFormat new_output = output_format(); |
| 498 | VideoFormat input = input_format(); |
| 499 | if (input_format().IsSize0x0()) { |
| 500 | input = new_output; |
| 501 | } |
| 502 | int old_num_pixels = GetOutputNumPixels(); |
| 503 | // Find resolution that respects ViewRequest or less pixels. |
| 504 | int view_desired_num_pixels = view_desired_num_pixels_; |
| 505 | int min_num_pixels = view_desired_num_pixels_; |
| 506 | if (!input.IsSize0x0()) { |
| 507 | float scale = FindLowerScale(input.width, input.height, min_num_pixels); |
| 508 | min_num_pixels = view_desired_num_pixels = |
| 509 | static_cast<int>(input.width * input.height * scale * scale + .5f); |
| 510 | } |
| 511 | // Reduce resolution further, if necessary, based on encoder bandwidth (GD). |
| 512 | if (encoder_desired_num_pixels_ && |
| 513 | (encoder_desired_num_pixels_ < min_num_pixels)) { |
| 514 | min_num_pixels = encoder_desired_num_pixels_; |
| 515 | } |
| 516 | // Reduce resolution further, if necessary, based on CPU. |
| 517 | if (cpu_adaptation_ && cpu_desired_num_pixels_ && |
| 518 | (cpu_desired_num_pixels_ < min_num_pixels)) { |
| 519 | min_num_pixels = cpu_desired_num_pixels_; |
| 520 | // Update the cpu_downgrade_wait_time_ if we are going to downgrade video. |
| 521 | cpu_downgrade_wait_time_ = |
| 522 | talk_base::TimeAfter(kDefaultDowngradeWaitTimeMs); |
| 523 | } |
| 524 | |
| 525 | // Determine which factors are keeping adapter resolution low. |
| 526 | // Caveat: Does not consider framerate. |
| 527 | adapt_reason_ = static_cast<AdaptReason>(0); |
| 528 | if (view_desired_num_pixels == min_num_pixels) { |
| 529 | adapt_reason_ |= ADAPTREASON_VIEW; |
| 530 | } |
| 531 | if (encoder_desired_num_pixels_ == min_num_pixels) { |
| 532 | adapt_reason_ |= ADAPTREASON_BANDWIDTH; |
| 533 | } |
| 534 | if (cpu_desired_num_pixels_ == min_num_pixels) { |
| 535 | adapt_reason_ |= ADAPTREASON_CPU; |
| 536 | } |
| 537 | |
| 538 | // Prevent going below QQVGA. |
| 539 | if (min_num_pixels > 0 && min_num_pixels < kMinNumPixels) { |
| 540 | min_num_pixels = kMinNumPixels; |
| 541 | } |
| 542 | SetOutputNumPixels(min_num_pixels); |
| 543 | |
| 544 | // Find closest scale factor that matches input resolution to min_num_pixels |
| 545 | // and set that for output resolution. This is not needed for VideoAdapter, |
| 546 | // but provides feedback to unittests and users on expected resolution. |
| 547 | // Actual resolution is based on input frame. |
| 548 | float scale = 1.0f; |
| 549 | if (!input.IsSize0x0()) { |
| 550 | scale = FindClosestScale(input.width, input.height, min_num_pixels); |
| 551 | } |
| 552 | if (scale == 1.0f) { |
| 553 | adapt_reason_ = 0; |
| 554 | } |
| 555 | *new_width = new_output.width = static_cast<int>(input.width * scale + .5f); |
| 556 | *new_height = new_output.height = static_cast<int>(input.height * scale + |
| 557 | .5f); |
| 558 | new_output.interval = view_desired_interval_; |
| 559 | SetOutputFormat(new_output); |
| 560 | int new_num_pixels = GetOutputNumPixels(); |
| 561 | bool changed = new_num_pixels != old_num_pixels; |
| 562 | |
| 563 | static const char* kReasons[8] = { |
| 564 | "None", |
| 565 | "CPU", |
| 566 | "BANDWIDTH", |
| 567 | "CPU+BANDWIDTH", |
| 568 | "VIEW", |
| 569 | "CPU+VIEW", |
| 570 | "BANDWIDTH+VIEW", |
| 571 | "CPU+BANDWIDTH+VIEW", |
| 572 | }; |
| 573 | |
| 574 | LOG(LS_VERBOSE) << "VAdapt Status View: " << view_desired_num_pixels_ |
| 575 | << " GD: " << encoder_desired_num_pixels_ |
| 576 | << " CPU: " << cpu_desired_num_pixels_ |
| 577 | << " Pixels: " << min_num_pixels |
| 578 | << " Input: " << input.width |
| 579 | << "x" << input.height |
| 580 | << " Scale: " << scale |
| 581 | << " Resolution: " << new_output.width |
| 582 | << "x" << new_output.height |
| 583 | << " Changed: " << (changed ? "true" : "false") |
| 584 | << " Reason: " << kReasons[adapt_reason_]; |
| 585 | return changed; |
| 586 | } |
| 587 | |
| 588 | } // namespace cricket |