Ram Chandrasekar | 613e1f4 | 2022-08-25 00:02:12 +0000 | [diff] [blame] | 1 | # Copyright 2022 The ChromiumOS Authors |
| 2 | # Use of this source code is governed by a BSD-style license that can be |
| 3 | # found in the LICENSE file. |
| 4 | |
| 5 | """Parse and operate based on disk layout files. |
| 6 | |
| 7 | For information on the JSON format, see: |
| 8 | http://dev.chromium.org/chromium-os/developer-guide/disk-layout-format |
| 9 | |
| 10 | The --adjust_part flag takes arguments like: |
| 11 | <label>:<op><size> |
| 12 | Where: |
| 13 | <label> is a label name as found in the disk layout file |
| 14 | <op> is one of the three: + - = |
| 15 | <size> is a number followed by an optional size qualifier: |
| 16 | B, KiB, MiB, GiB, TiB: bytes, kibi-, mebi-, gibi-, tebi- (base 1024) |
| 17 | B, K, M, G, T: short hand for above |
| 18 | B, KB, MB, GB, TB: bytes, kilo-, mega-, giga-, tera- (base 1000) |
| 19 | |
| 20 | This will set the ROOT-A partition size to 1 gibibytes (1024 * 1024 * 1024 * 1): |
| 21 | --adjust_part ROOT-A:=1GiB |
| 22 | This will grow the ROOT-A partition size by 500 mebibytes (1024 * 1024 * 500): |
| 23 | --adjust_part ROOT-A:+500MiB |
| 24 | This will shrink the ROOT-A partition size by 10 mebibytes (1024 * 1024 * 10): |
| 25 | --adjust_part ROOT-A:-20MiB |
| 26 | """ |
| 27 | |
| 28 | import argparse |
| 29 | import copy |
| 30 | import inspect |
| 31 | import json |
| 32 | import math |
| 33 | import os |
| 34 | from pathlib import Path |
| 35 | import re |
| 36 | import sys |
| 37 | |
| 38 | from chromite.lib import constants |
| 39 | |
| 40 | |
| 41 | class ConfigNotFound(Exception): |
| 42 | """Config Not Found""" |
| 43 | |
| 44 | |
| 45 | class PartitionNotFound(Exception): |
| 46 | """Partition Not Found""" |
| 47 | |
| 48 | |
| 49 | class InvalidLayout(Exception): |
| 50 | """Invalid Layout""" |
| 51 | |
| 52 | |
| 53 | class InvalidAdjustment(Exception): |
| 54 | """Invalid Adjustment""" |
| 55 | |
| 56 | |
| 57 | class InvalidSize(Exception): |
| 58 | """Invalid Size""" |
| 59 | |
| 60 | |
| 61 | class ConflictingOptions(Exception): |
| 62 | """Conflicting Options""" |
| 63 | |
| 64 | |
| 65 | class ConflictingPartitionOrder(Exception): |
| 66 | """The partition order in the parent and child layout don't match.""" |
| 67 | |
| 68 | |
| 69 | class MismatchedRootfsFormat(Exception): |
| 70 | """Rootfs partitions in different formats""" |
| 71 | |
| 72 | |
| 73 | class MismatchedRootfsBlocks(Exception): |
| 74 | """Rootfs partitions have different numbers of reserved erase blocks""" |
| 75 | |
| 76 | |
| 77 | class MissingEraseBlockField(Exception): |
| 78 | """Partition has reserved erase blocks but not other fields needed""" |
| 79 | |
| 80 | |
| 81 | class ExcessFailureProbability(Exception): |
| 82 | """Chances are high that the partition will have too many bad blocks""" |
| 83 | |
| 84 | |
| 85 | class UnalignedPartition(Exception): |
| 86 | """Partition size does not divide erase block size""" |
| 87 | |
| 88 | |
| 89 | class ExpandNandImpossible(Exception): |
| 90 | """Partition is raw NAND and marked with the incompatible expand feature""" |
| 91 | |
| 92 | |
| 93 | class ExcessPartitionSize(Exception): |
| 94 | """Partitions sum to more than the size of the whole device""" |
| 95 | |
| 96 | |
| 97 | COMMON_LAYOUT = "common" |
| 98 | BASE_LAYOUT = "base" |
| 99 | # Blocks of the partition entry array. |
| 100 | SIZE_OF_PARTITION_ENTRY_ARRAY_BYTES = 16 * 1024 |
| 101 | SIZE_OF_PMBR = 1 |
| 102 | SIZE_OF_GPT_HEADER = 1 |
| 103 | DEFAULT_SECTOR_SIZE = 512 |
| 104 | MAX_SECTOR_SIZE = 8 * 1024 |
| 105 | START_SECTOR = 4 * MAX_SECTOR_SIZE |
| 106 | SECONDARY_GPT_BYTES = ( |
| 107 | SIZE_OF_PARTITION_ENTRY_ARRAY_BYTES + SIZE_OF_GPT_HEADER * MAX_SECTOR_SIZE |
| 108 | ) |
| 109 | |
| 110 | |
| 111 | def ParseHumanNumber(operand): |
| 112 | """Parse a human friendly number |
| 113 | |
| 114 | This handles things like 4GiB and 4MB and such. See the usage string for |
| 115 | full details on all the formats supported. |
| 116 | |
| 117 | Args: |
| 118 | operand: The number to parse (may be an int or string) |
| 119 | |
| 120 | Returns: |
| 121 | An integer |
| 122 | """ |
| 123 | operand = str(operand) |
| 124 | negative = -1 if operand.startswith("-") else 1 |
| 125 | if negative == -1: |
| 126 | operand = operand[1:] |
| 127 | operand_digits = re.sub(r"\D", r"", operand) |
| 128 | |
| 129 | size_factor = block_factor = 1 |
| 130 | suffix = operand[len(operand_digits) :].strip() |
| 131 | if suffix: |
| 132 | size_factors = { |
| 133 | "B": 0, |
| 134 | "K": 1, |
| 135 | "M": 2, |
| 136 | "G": 3, |
| 137 | "T": 4, |
| 138 | } |
| 139 | try: |
| 140 | size_factor = size_factors[suffix[0].upper()] |
| 141 | except KeyError: |
| 142 | raise InvalidAdjustment("Unknown size type %s" % suffix) |
| 143 | if size_factor == 0 and len(suffix) > 1: |
| 144 | raise InvalidAdjustment("Unknown size type %s" % suffix) |
| 145 | block_factors = { |
| 146 | "": 1024, |
| 147 | "B": 1000, |
| 148 | "IB": 1024, |
| 149 | } |
| 150 | try: |
| 151 | block_factor = block_factors[suffix[1:].upper()] |
| 152 | except KeyError: |
| 153 | raise InvalidAdjustment("Unknown size type %s" % suffix) |
| 154 | |
| 155 | return int(operand_digits) * pow(block_factor, size_factor) * negative |
| 156 | |
| 157 | |
| 158 | def ProduceHumanNumber(number): |
| 159 | """A simple reverse of ParseHumanNumber, converting a number to human form. |
| 160 | |
| 161 | Args: |
| 162 | number: A number (int) to be converted to human form. |
| 163 | |
| 164 | Returns: |
| 165 | A string, such as "1 KiB", that satisfies the condition |
| 166 | ParseHumanNumber(ProduceHumanNumber(i)) == i. |
| 167 | """ |
| 168 | scales = [ |
| 169 | (2**40, "Ti"), |
| 170 | (10**12, "T"), |
| 171 | (2**30, "Gi"), |
| 172 | (10**9, "G"), |
| 173 | (2**20, "Mi"), |
| 174 | (10**6, "M"), |
| 175 | (2**10, "Ki"), |
| 176 | (10**3, "K"), |
| 177 | ] |
| 178 | for denom, suffix in scales: |
| 179 | if (number % denom) == 0: |
| 180 | return "%d %sB" % (number // denom, suffix) |
| 181 | return str(number) |
| 182 | |
| 183 | |
| 184 | def ParseRelativeNumber(max_number, number): |
| 185 | """Return the number that is relative to |max_number| by |number| |
| 186 | |
| 187 | We support three forms: |
| 188 | 90% - |number| is a percentage of |max_number| |
| 189 | 100 - |number| is the answer already (and |max_number| is ignored) |
| 190 | -90 - |number| is subtracted from |max_number| |
| 191 | |
| 192 | Args: |
| 193 | max_number: The limit to use when |number| is negative or a percent |
| 194 | number: The (possibly relative) number to parse |
| 195 | (may be an int or string). |
| 196 | """ |
| 197 | max_number = int(max_number) |
| 198 | number = str(number) |
| 199 | if number.endswith("%"): |
| 200 | percent = number[:-1] / 100 |
| 201 | return int(max_number * percent) |
| 202 | else: |
| 203 | number = ParseHumanNumber(number) |
| 204 | if number < 0: |
| 205 | return max_number + number |
| 206 | else: |
| 207 | return number |
| 208 | |
| 209 | |
| 210 | def _ApplyLayoutOverrides(layout_to_override, layout): |
| 211 | """Applies |layout| overrides on to |layout_to_override|. |
| 212 | |
| 213 | First add missing partition from layout to layout_to_override. |
| 214 | Then, update partitions in layout_to_override with layout information. |
| 215 | """ |
| 216 | # First check that all the partitions defined in both layouts are defined in |
| 217 | # the same order in each layout. Otherwise, the order in which they end up |
| 218 | # in the merged layout doesn't match what the user sees in the child layout. |
| 219 | common_nums = set.intersection( |
| 220 | {part["num"] for part in layout_to_override if "num" in part}, |
| 221 | {part["num"] for part in layout if "num" in part}, |
| 222 | ) |
| 223 | layout_to_override_order = [ |
| 224 | part["num"] |
| 225 | for part in layout_to_override |
| 226 | if part.get("num") in common_nums |
| 227 | ] |
| 228 | layout_order = [ |
| 229 | part["num"] for part in layout if part.get("num") in common_nums |
| 230 | ] |
| 231 | if layout_order != layout_to_override_order: |
| 232 | raise ConflictingPartitionOrder( |
| 233 | "Layouts share partitions %s but they are in different order: " |
| 234 | "layout_to_override: %s, layout: %s" |
| 235 | % ( |
| 236 | sorted(common_nums), |
| 237 | [part.get("num") for part in layout_to_override], |
| 238 | [part.get("num") for part in layout], |
| 239 | ) |
| 240 | ) |
| 241 | |
| 242 | # Merge layouts with the partitions in the same order they are in both |
| 243 | # layouts. |
| 244 | part_index = 0 |
| 245 | for part_to_apply in layout: |
| 246 | num = part_to_apply.get("num") |
| 247 | |
| 248 | if part_index == len(layout_to_override): |
| 249 | # The part_to_apply is past the list of partitions to override, this |
| 250 | # means that is a new partition added at the end. |
| 251 | # Need of deepcopy, in case we change layout later. |
| 252 | layout_to_override.append(copy.deepcopy(part_to_apply)) |
| 253 | elif layout_to_override[part_index].get("num") is None and num is None: |
| 254 | # Allow modifying gaps after a partition. |
| 255 | # TODO(deymo): Drop support for "gap" partitions and use alignment |
| 256 | # instead. |
| 257 | layout_to_override[part_index].update(part_to_apply) |
| 258 | elif num in common_nums: |
| 259 | while layout_to_override[part_index].get("num") != num: |
| 260 | part_index += 1 |
| 261 | layout_to_override[part_index].update(part_to_apply) |
| 262 | else: |
| 263 | # Need of deepcopy, in case we change layout later. |
| 264 | layout_to_override.insert(part_index, copy.deepcopy(part_to_apply)) |
| 265 | part_index += 1 |
| 266 | |
| 267 | |
| 268 | def LoadJSONWithComments(filename): |
| 269 | """Loads a JSON file ignoring lines with comments. |
| 270 | |
| 271 | RFC 7159 doesn't allow comments on the file JSON format. This functions |
| 272 | loads a JSON file removing all the comment lines. A comment line is any line |
| 273 | starting with # and optionally indented with whitespaces. Note that inline |
| 274 | comments are not supported. |
| 275 | |
| 276 | Args: |
| 277 | filename: The input filename. |
| 278 | |
| 279 | Returns: |
| 280 | The parsed JSON object. |
| 281 | """ |
| 282 | regex = re.compile(r"^\s*#.*") |
| 283 | with open(filename) as f: |
| 284 | source = "".join(regex.sub("", line) for line in f) |
| 285 | return json.loads(source) |
| 286 | |
| 287 | |
| 288 | def _LoadStackedPartitionConfig(filename): |
| 289 | """Loads a partition table and its possible parent tables. |
| 290 | |
| 291 | This does very little validation. It's just enough to walk all of the |
| 292 | parent files and merges them with the current config. Overall |
| 293 | validation is left to the caller. |
| 294 | |
| 295 | Args: |
| 296 | filename: Filename to load into object. |
| 297 | |
| 298 | Returns: |
| 299 | Object containing disk layout configuration |
| 300 | """ |
| 301 | if not os.path.exists(filename): |
| 302 | raise ConfigNotFound("Partition config %s was not found!" % filename) |
| 303 | config = LoadJSONWithComments(filename) |
| 304 | |
| 305 | # Let's first apply our new configs onto base. |
| 306 | common_layout = config["layouts"].setdefault(COMMON_LAYOUT, []) |
| 307 | for layout_name, layout in config["layouts"].items(): |
| 308 | # Don't apply on yourself. |
| 309 | if layout_name == COMMON_LAYOUT or layout_name == "_comment": |
| 310 | continue |
| 311 | |
| 312 | # Need to copy a list of dicts so make a deep copy. |
| 313 | working_layout = copy.deepcopy(common_layout) |
| 314 | _ApplyLayoutOverrides(working_layout, layout) |
| 315 | config["layouts"][layout_name] = working_layout |
| 316 | |
| 317 | dirname = os.path.dirname(filename) |
| 318 | # Now let's inherit the values from all our parents. |
| 319 | for parent in config.get("parent", "").split(): |
| 320 | parent_filename = os.path.join(dirname, parent) |
| 321 | if not os.path.exists(parent_filename): |
| 322 | # Try loading from src/scripts/build_library directory. |
| 323 | parent_filename = ( |
| 324 | Path(constants.CROSUTILS_DIR) / "build_library" / parent |
| 325 | ) |
| 326 | parent_config = _LoadStackedPartitionConfig(parent_filename) |
| 327 | |
| 328 | # First if the parent is missing any fields the new config has, |
| 329 | # fill them in. |
| 330 | for key in config.keys(): |
| 331 | if key == "parent": |
| 332 | continue |
| 333 | elif key == "metadata": |
| 334 | # We handle this especially to allow for inner metadata fields |
| 335 | # to be added / modified. |
| 336 | parent_config.setdefault(key, {}) |
| 337 | parent_config[key].update(config[key]) |
| 338 | else: |
| 339 | parent_config.setdefault(key, config[key]) |
| 340 | |
| 341 | # The overrides work by taking the parent_config, apply the new config |
| 342 | # layout info, and return the resulting config which is stored in the |
| 343 | # parent config. |
| 344 | |
| 345 | # So there's an issue where an inheriting layout file may contain new |
| 346 | # layouts not previously defined in the parent layout. Since we are |
| 347 | # building these layout files based on the parent configs and overriding |
| 348 | # new values, we first add the new layouts not previously defined in the |
| 349 | # parent config using a copy of the base layout from that parent config. |
| 350 | parent_layouts = set(parent_config["layouts"]) |
| 351 | config_layouts = set(config["layouts"]) |
| 352 | new_layouts = config_layouts - parent_layouts |
| 353 | |
| 354 | # Actually add the copy. Use a copy such that each is unique. |
| 355 | parent_cmn_layout = parent_config["layouts"].setdefault( |
| 356 | COMMON_LAYOUT, [] |
| 357 | ) |
| 358 | for layout_name in new_layouts: |
| 359 | parent_config["layouts"][layout_name] = copy.deepcopy( |
| 360 | parent_cmn_layout |
| 361 | ) |
| 362 | |
| 363 | # Iterate through each layout in the parent config and apply the new |
| 364 | # layout. |
| 365 | common_layout = config["layouts"].setdefault(COMMON_LAYOUT, []) |
| 366 | for layout_name, parent_layout in parent_config["layouts"].items(): |
| 367 | if layout_name == "_comment": |
| 368 | continue |
| 369 | |
| 370 | layout_override = config["layouts"].setdefault(layout_name, []) |
| 371 | if layout_name != COMMON_LAYOUT: |
| 372 | _ApplyLayoutOverrides(parent_layout, common_layout) |
| 373 | |
| 374 | _ApplyLayoutOverrides(parent_layout, layout_override) |
| 375 | |
| 376 | config = parent_config |
| 377 | |
| 378 | config.pop("parent", None) |
| 379 | return config |
| 380 | |
| 381 | |
| 382 | def LoadPartitionConfig(filename): |
| 383 | """Loads a partition tables configuration file into a Python object. |
| 384 | |
| 385 | Args: |
| 386 | filename: Filename to load into object |
| 387 | |
| 388 | Returns: |
| 389 | Object containing disk layout configuration |
| 390 | """ |
| 391 | |
| 392 | valid_keys = set(("_comment", "metadata", "layouts", "parent")) |
| 393 | valid_layout_keys = set( |
| 394 | ( |
| 395 | "_comment", |
| 396 | "num", |
| 397 | "fs_blocks", |
| 398 | "fs_block_size", |
| 399 | "fs_align", |
| 400 | "bytes", |
| 401 | "uuid", |
| 402 | "label", |
| 403 | "format", |
| 404 | "fs_format", |
| 405 | "type", |
| 406 | "features", |
| 407 | "size", |
| 408 | "fs_size", |
| 409 | "fs_options", |
| 410 | "erase_block_size", |
| 411 | "hybrid_mbr", |
| 412 | "reserved_erase_blocks", |
| 413 | "max_bad_erase_blocks", |
| 414 | "external_gpt", |
| 415 | "page_size", |
| 416 | "size_min", |
| 417 | "fs_size_min", |
| 418 | ) |
| 419 | ) |
| 420 | valid_features = set(("expand", "last_partition")) |
| 421 | |
| 422 | config = _LoadStackedPartitionConfig(filename) |
| 423 | try: |
| 424 | metadata = config["metadata"] |
| 425 | metadata["fs_block_size"] = ParseHumanNumber(metadata["fs_block_size"]) |
| 426 | if metadata.get("fs_align") is None: |
| 427 | metadata["fs_align"] = metadata["fs_block_size"] |
| 428 | else: |
| 429 | metadata["fs_align"] = ParseHumanNumber(metadata["fs_align"]) |
| 430 | |
| 431 | if (metadata["fs_align"] < metadata["fs_block_size"]) or ( |
| 432 | metadata["fs_align"] % metadata["fs_block_size"] |
| 433 | ): |
| 434 | raise InvalidLayout("fs_align must be a multiple of fs_block_size") |
| 435 | |
| 436 | unknown_keys = set(config.keys()) - valid_keys |
| 437 | if unknown_keys: |
| 438 | raise InvalidLayout("Unknown items: %r" % unknown_keys) |
| 439 | |
| 440 | if len(config["layouts"]) <= 0: |
| 441 | raise InvalidLayout('Missing "layouts" entries') |
| 442 | |
| 443 | if not BASE_LAYOUT in config["layouts"].keys(): |
| 444 | raise InvalidLayout('Missing "base" config in "layouts"') |
| 445 | |
| 446 | for layout_name, layout in config["layouts"].items(): |
| 447 | if layout_name == "_comment": |
| 448 | continue |
| 449 | |
| 450 | for part in layout: |
| 451 | unknown_keys = set(part.keys()) - valid_layout_keys |
| 452 | if unknown_keys: |
| 453 | raise InvalidLayout( |
| 454 | "Unknown items in layout %s: %r" |
| 455 | % (layout_name, unknown_keys) |
| 456 | ) |
| 457 | |
| 458 | if part.get("num") == "metadata" and "type" not in part: |
| 459 | part["type"] = "blank" |
| 460 | |
| 461 | if part["type"] != "blank": |
| 462 | for s in ("num", "label"): |
| 463 | if not s in part: |
| 464 | raise InvalidLayout( |
| 465 | 'Layout "%s" missing "%s"' % (layout_name, s) |
| 466 | ) |
| 467 | |
| 468 | if "size" in part: |
| 469 | part["bytes"] = ParseHumanNumber(part["size"]) |
| 470 | if "size_min" in part: |
| 471 | size_min = ParseHumanNumber(part["size_min"]) |
| 472 | if part["bytes"] < size_min: |
| 473 | part["bytes"] = size_min |
| 474 | elif part.get("num") != "metadata": |
| 475 | part["bytes"] = 1 |
| 476 | |
| 477 | if "fs_size" in part: |
| 478 | part["fs_bytes"] = ParseHumanNumber(part["fs_size"]) |
| 479 | if "fs_size_min" in part: |
| 480 | fs_size_min = ParseHumanNumber(part["fs_size_min"]) |
| 481 | if part["fs_bytes"] < fs_size_min: |
| 482 | part["fs_bytes"] = fs_size_min |
| 483 | if part["fs_bytes"] <= 0: |
| 484 | raise InvalidSize( |
| 485 | 'File system size "%s" must be positive' |
| 486 | % part["fs_size"] |
| 487 | ) |
| 488 | if part["fs_bytes"] > part["bytes"]: |
| 489 | raise InvalidSize( |
| 490 | "Filesystem may not be larger than partition: " |
| 491 | "%s %s: %d > %d" |
| 492 | % ( |
| 493 | layout_name, |
| 494 | part["label"], |
| 495 | part["fs_bytes"], |
| 496 | part["bytes"], |
| 497 | ) |
| 498 | ) |
| 499 | if part["fs_bytes"] % metadata["fs_align"] != 0: |
| 500 | raise InvalidSize( |
| 501 | 'File system size: "%s" (%s bytes) is not an ' |
| 502 | "even multiple of fs_align: %s" |
| 503 | % ( |
| 504 | part["fs_size"], |
| 505 | part["fs_bytes"], |
| 506 | metadata["fs_align"], |
| 507 | ) |
| 508 | ) |
| 509 | if part.get("format") == "ubi": |
| 510 | part_meta = GetMetadataPartition(layout) |
| 511 | page_size = ParseHumanNumber(part_meta["page_size"]) |
| 512 | eb_size = ParseHumanNumber( |
| 513 | part_meta["erase_block_size"] |
| 514 | ) |
| 515 | ubi_eb_size = eb_size - 2 * page_size |
| 516 | if (part["fs_bytes"] % ubi_eb_size) != 0: |
| 517 | # Trim fs_bytes to multiple of UBI eraseblock size. |
| 518 | fs_bytes = part["fs_bytes"] - ( |
| 519 | part["fs_bytes"] % ubi_eb_size |
| 520 | ) |
| 521 | raise InvalidSize( |
| 522 | 'File system size: "%s" (%d bytes) is not a ' |
| 523 | "multiple of UBI erase block size (%d). " |
| 524 | 'Please set "fs_size" to "%s" in the "common"' |
| 525 | " layout instead." |
| 526 | % ( |
| 527 | part["fs_size"], |
| 528 | part["fs_bytes"], |
| 529 | ubi_eb_size, |
| 530 | ProduceHumanNumber(fs_bytes), |
| 531 | ) |
| 532 | ) |
| 533 | |
| 534 | if "fs_blocks" in part: |
| 535 | max_fs_blocks = part["bytes"] // metadata["fs_block_size"] |
| 536 | part["fs_blocks"] = ParseRelativeNumber( |
| 537 | max_fs_blocks, part["fs_blocks"] |
| 538 | ) |
| 539 | part["fs_bytes"] = ( |
| 540 | part["fs_blocks"] * metadata["fs_block_size"] |
| 541 | ) |
| 542 | if part["fs_bytes"] % metadata["fs_align"] != 0: |
| 543 | raise InvalidSize( |
| 544 | 'File system size: "%s" (%s bytes) is not an even ' |
| 545 | "multiple of fs_align: %s" |
| 546 | % ( |
| 547 | part["fs_blocks"], |
| 548 | part["fs_bytes"], |
| 549 | metadata["fs_align"], |
| 550 | ) |
| 551 | ) |
| 552 | |
| 553 | if part["fs_bytes"] > part["bytes"]: |
| 554 | raise InvalidLayout( |
| 555 | "Filesystem may not be larger than partition: " |
| 556 | "%s %s: %d > %d" |
| 557 | % ( |
| 558 | layout_name, |
| 559 | part["label"], |
| 560 | part["fs_bytes"], |
| 561 | part["bytes"], |
| 562 | ) |
| 563 | ) |
| 564 | if "erase_block_size" in part: |
| 565 | part["erase_block_size"] = ParseHumanNumber( |
| 566 | part["erase_block_size"] |
| 567 | ) |
| 568 | if "page_size" in part: |
| 569 | part["page_size"] = ParseHumanNumber(part["page_size"]) |
| 570 | |
| 571 | part.setdefault("features", []) |
| 572 | unknown_features = set(part["features"]) - valid_features |
| 573 | if unknown_features: |
| 574 | raise InvalidLayout( |
| 575 | "%s: Unknown features: %s" |
| 576 | % (part["label"], unknown_features) |
| 577 | ) |
| 578 | except KeyError as e: |
| 579 | raise InvalidLayout("Layout is missing required entries: %s" % e) |
| 580 | |
| 581 | return config |
| 582 | |
| 583 | |
| 584 | def _GetPrimaryEntryArrayPaddingBytes(config): |
| 585 | """Return the start LBA of the primary partition entry array. |
| 586 | |
| 587 | Normally this comes after the primary GPT header but can be adjusted by |
| 588 | setting the "primary_entry_array_padding_bytes" key under "metadata" in |
| 589 | the config. |
| 590 | |
| 591 | Args: |
| 592 | config: The config dictionary. |
| 593 | |
| 594 | Returns: |
| 595 | The position of the primary partition entry array. |
| 596 | """ |
| 597 | |
| 598 | return config["metadata"].get("primary_entry_array_padding_bytes", 0) |
| 599 | |
| 600 | |
| 601 | def _HasBadEraseBlocks(partitions): |
| 602 | return "max_bad_erase_blocks" in GetMetadataPartition(partitions) |
| 603 | |
| 604 | |
| 605 | def _HasExternalGpt(partitions): |
| 606 | return GetMetadataPartition(partitions).get("external_gpt", False) |
| 607 | |
| 608 | |
| 609 | def _GetPartitionStartByteOffset(config, partitions): |
| 610 | """Return the first usable location (LBA) for partitions. |
| 611 | |
| 612 | This value is the byte offset after the PMBR, the primary GPT header, and |
| 613 | partition entry array. |
| 614 | |
| 615 | We round it up to 32K bytes to maintain the same layout as before in the |
| 616 | normal (no padding between the primary GPT header and its partition entry |
| 617 | array) case. |
| 618 | |
| 619 | Args: |
| 620 | config: The config dictionary. |
| 621 | partitions: List of partitions to process |
| 622 | |
| 623 | Returns: |
| 624 | A suitable byte offset for partitions. |
| 625 | """ |
| 626 | |
| 627 | if _HasExternalGpt(partitions): |
| 628 | # If the GPT is external, then the offset of the partitions' actual |
| 629 | # data will be 0, and we don't need to make space at the beginning for |
| 630 | # the GPT. |
| 631 | return 0 |
| 632 | else: |
| 633 | return START_SECTOR + _GetPrimaryEntryArrayPaddingBytes(config) |
| 634 | |
| 635 | |
| 636 | def GetTableTotals(config, partitions): |
| 637 | """Calculates total sizes/counts for a partition table. |
| 638 | |
| 639 | Args: |
| 640 | config: The config dictionary. |
| 641 | partitions: List of partitions to process |
| 642 | |
| 643 | Returns: |
| 644 | Dict containing totals data |
| 645 | """ |
| 646 | |
| 647 | fs_block_align_losses = 0 |
| 648 | start_sector = _GetPartitionStartByteOffset(config, partitions) |
| 649 | ret = { |
| 650 | "expand_count": 0, |
| 651 | "expand_min": 0, |
| 652 | "last_partition_count": 0, |
| 653 | "byte_count": start_sector, |
| 654 | } |
| 655 | |
| 656 | # Total up the size of all non-expanding partitions to get the minimum |
| 657 | # required disk size. |
| 658 | for partition in partitions: |
| 659 | if partition.get("num") == "metadata": |
| 660 | continue |
| 661 | |
| 662 | if ( |
| 663 | partition.get("type") in ("data", "rootfs") |
| 664 | and partition["bytes"] > 1 |
| 665 | ): |
| 666 | fs_block_align_losses += config["metadata"]["fs_align"] |
| 667 | else: |
| 668 | fs_block_align_losses += config["metadata"]["fs_block_size"] |
| 669 | if "expand" in partition["features"]: |
| 670 | ret["expand_count"] += 1 |
| 671 | ret["expand_min"] += partition["bytes"] |
| 672 | else: |
| 673 | ret["byte_count"] += partition["bytes"] |
| 674 | if "last_partition" in partition["features"]: |
| 675 | ret["last_partition_count"] += 1 |
| 676 | |
| 677 | # Account for the secondary GPT header and table. |
| 678 | ret["byte_count"] += SECONDARY_GPT_BYTES |
| 679 | |
| 680 | # At present, only one expanding partition is permitted. |
| 681 | # Whilst it'd be possible to have two, we don't need this yet |
| 682 | # and it complicates things, so it's been left out for now. |
| 683 | if ret["expand_count"] > 1: |
| 684 | raise InvalidLayout( |
| 685 | "1 expand partition allowed, %d requested" % ret["expand_count"] |
| 686 | ) |
| 687 | |
| 688 | # Only one partition can be last on the disk. |
| 689 | if ret["last_partition_count"] > 1: |
| 690 | raise InvalidLayout( |
| 691 | "Only one last partition allowed, %d requested" |
| 692 | % ret["last_partition_count"] |
| 693 | ) |
| 694 | |
| 695 | # We lose some extra bytes from the alignment which are now not considered |
| 696 | # in min_disk_size because partitions are aligned on the fly. Adding |
| 697 | # fs_block_align_losses corrects for the loss. |
| 698 | ret["min_disk_size"] = ( |
| 699 | ret["byte_count"] + ret["expand_min"] + fs_block_align_losses |
| 700 | ) |
| 701 | |
| 702 | return ret |
| 703 | |
| 704 | |
| 705 | def GetPartitionTable(options, config, image_type): |
| 706 | """Generates requested image_type layout from a layout configuration. |
| 707 | |
| 708 | This loads the base table and then overlays the requested layout over |
| 709 | the base layout. |
| 710 | |
| 711 | Args: |
| 712 | options: Flags passed to the script |
| 713 | config: Partition configuration file object |
| 714 | image_type: Type of image eg base/test/dev/factory_install |
| 715 | |
| 716 | Returns: |
| 717 | Object representing a selected partition table |
| 718 | """ |
| 719 | |
| 720 | # We make a deep copy so that changes to the dictionaries in this list do |
| 721 | # not persist across calls. |
| 722 | try: |
| 723 | partitions = copy.deepcopy(config["layouts"][image_type]) |
| 724 | except KeyError: |
| 725 | raise InvalidLayout("Unknown layout: %s" % image_type) |
| 726 | metadata = config["metadata"] |
| 727 | |
| 728 | # Convert fs_options to a string. |
| 729 | for partition in partitions: |
| 730 | fs_options = partition.get("fs_options", "") |
| 731 | if isinstance(fs_options, dict): |
| 732 | fs_format = partition.get("fs_format") |
| 733 | fs_options = fs_options.get(fs_format, "") |
| 734 | elif not isinstance(fs_options, str): |
| 735 | raise InvalidLayout( |
| 736 | "Partition number %s: fs_format must be a string or " |
| 737 | "dict, not %s" % (partition.get("num"), type(fs_options)) |
| 738 | ) |
| 739 | if '"' in fs_options or "'" in fs_options: |
| 740 | raise InvalidLayout( |
| 741 | "Partition number %s: fs_format cannot have quotes" |
| 742 | % partition.get("num") |
| 743 | ) |
| 744 | partition["fs_options"] = fs_options |
| 745 | |
| 746 | for adjustment_str in options.adjust_part.split(): |
| 747 | adjustment = adjustment_str.split(":") |
| 748 | if len(adjustment) < 2: |
| 749 | raise InvalidAdjustment( |
| 750 | 'Adjustment "%s" is incomplete' % adjustment_str |
| 751 | ) |
| 752 | |
| 753 | label = adjustment[0] |
| 754 | operator = adjustment[1][0] |
| 755 | operand = adjustment[1][1:] |
| 756 | ApplyPartitionAdjustment(partitions, metadata, label, operator, operand) |
| 757 | |
| 758 | return partitions |
| 759 | |
| 760 | |
| 761 | def ApplyPartitionAdjustment(partitions, metadata, label, operator, operand): |
| 762 | """Applies an adjustment to a partition specified by label |
| 763 | |
| 764 | Args: |
| 765 | partitions: Partition table to modify |
| 766 | metadata: Partition table metadata |
| 767 | label: The label of the partition to adjust |
| 768 | operator: Type of adjustment (+/-/=) |
| 769 | operand: How much to adjust by |
| 770 | """ |
| 771 | |
| 772 | partition = GetPartitionByLabel(partitions, label) |
| 773 | |
| 774 | operand_bytes = ParseHumanNumber(operand) |
| 775 | |
| 776 | if operator == "+": |
| 777 | partition["bytes"] += operand_bytes |
| 778 | elif operator == "-": |
| 779 | partition["bytes"] -= operand_bytes |
| 780 | elif operator == "=": |
| 781 | partition["bytes"] = operand_bytes |
| 782 | else: |
| 783 | raise ValueError("unknown operator %s" % operator) |
| 784 | |
| 785 | if partition["type"] == "rootfs": |
| 786 | # If we're adjusting a rootFS partition, we assume the full partition |
| 787 | # size specified is being used for the filesystem, minus the space |
| 788 | # reserved for the hashpad. |
| 789 | partition["fs_bytes"] = partition["bytes"] |
| 790 | partition["fs_blocks"] = ( |
| 791 | partition["fs_bytes"] // metadata["fs_block_size"] |
| 792 | ) |
| 793 | partition["bytes"] = int(partition["bytes"] * 1.15) |
| 794 | |
| 795 | |
| 796 | def GetPartitionTableFromConfig(options, layout_filename, image_type): |
| 797 | """Loads a partition table and returns a given partition table type |
| 798 | |
| 799 | Args: |
| 800 | options: Flags passed to the script |
| 801 | layout_filename: The filename to load tables from |
| 802 | image_type: The type of partition table to return |
| 803 | """ |
| 804 | |
| 805 | config = LoadPartitionConfig(layout_filename) |
| 806 | partitions = GetPartitionTable(options, config, image_type) |
| 807 | |
| 808 | return partitions |
| 809 | |
| 810 | |
| 811 | def GetScriptShell(): |
| 812 | """Loads and returns the skeleton script for our output script. |
| 813 | |
| 814 | Returns: |
| 815 | A string containing the skeleton script |
| 816 | """ |
| 817 | |
| 818 | script_shell_path = Path(constants.CHROMITE_DIR) / "sdk/cgpt_shell.sh" |
| 819 | with open(script_shell_path, "r") as f: |
| 820 | script_shell = "".join(f.readlines()) |
| 821 | |
| 822 | # Before we return, insert the path to this tool so somebody reading the |
| 823 | # script later can tell where it was generated. |
| 824 | script_shell = script_shell.replace( |
| 825 | "@SCRIPT_GENERATOR@", str(script_shell_path) |
| 826 | ) |
| 827 | |
| 828 | return script_shell |
| 829 | |
| 830 | |
| 831 | def GetFullPartitionSize(partition, metadata): |
| 832 | """Get the size of the partition including metadata/reserved space in bytes. |
| 833 | |
| 834 | The partition only has to be bigger for raw NAND devices. Formula: |
| 835 | - Add UBI per-block metadata (2 pages) if partition is UBI |
| 836 | - Round up to erase block size |
| 837 | - Add UBI per-partition metadata (4 blocks) if partition is UBI |
| 838 | - Add reserved erase blocks |
| 839 | """ |
| 840 | |
| 841 | erase_block_size = metadata.get("erase_block_size", 0) |
| 842 | size = partition["bytes"] |
| 843 | |
| 844 | if erase_block_size == 0: |
| 845 | return size |
| 846 | |
| 847 | # See "Flash space overhead" in |
| 848 | # http://www.linux-mtd.infradead.org/doc/ubi.html |
| 849 | # for overhead calculations. |
| 850 | is_ubi = partition.get("format") == "ubi" |
| 851 | reserved_erase_blocks = partition.get("reserved_erase_blocks", 0) |
| 852 | page_size = metadata.get("page_size", 0) |
| 853 | |
| 854 | if is_ubi: |
| 855 | ubi_block_size = erase_block_size - 2 * page_size |
| 856 | erase_blocks = (size + ubi_block_size - 1) // ubi_block_size |
| 857 | size += erase_blocks * 2 * page_size |
| 858 | |
| 859 | erase_blocks = (size + erase_block_size - 1) // erase_block_size |
| 860 | size = erase_blocks * erase_block_size |
| 861 | |
| 862 | if is_ubi: |
| 863 | size += erase_block_size * 4 |
| 864 | |
| 865 | size += reserved_erase_blocks * erase_block_size |
| 866 | return size |
| 867 | |
| 868 | |
| 869 | def WriteLayoutFunction(options, slines, func, image_type, config): |
| 870 | """Writes a shell script function to write out a given partition table. |
| 871 | |
| 872 | Args: |
| 873 | options: Flags passed to the script |
| 874 | slines: lines to write to the script |
| 875 | func: function of the layout: |
| 876 | for removable storage device: 'partition', |
| 877 | for the fixed storage device: 'base' |
| 878 | image_type: Type of image eg base/test/dev/factory_install |
| 879 | config: Partition configuration file object |
| 880 | """ |
| 881 | |
| 882 | gpt_add = '${GPT} add -i %d -b $(( curr / block_size )) -s ${blocks} -t %s \ |
| 883 | -l "%s" ${target}' |
| 884 | partitions = GetPartitionTable(options, config, image_type) |
| 885 | metadata = GetMetadataPartition(partitions) |
| 886 | partition_totals = GetTableTotals(config, partitions) |
| 887 | fs_align_snippet = [ |
| 888 | "if [ $(( curr %% %d )) -gt 0 ]; then" % config["metadata"]["fs_align"], |
| 889 | " : $(( curr += %d - curr %% %d ))" |
| 890 | % ((config["metadata"]["fs_align"],) * 2), |
| 891 | "fi", |
| 892 | ] |
| 893 | |
| 894 | lines = [ |
| 895 | "write_%s_table() {" % func, |
| 896 | ] |
| 897 | |
| 898 | if _HasExternalGpt(partitions): |
| 899 | # Read GPT from device to get size, then wipe it out and operate |
| 900 | # on GPT in tmpfs. We don't rely on cgpt's ability to deal |
| 901 | # directly with the GPT on SPI NOR flash because rewriting the |
| 902 | # table so many times would take a long time (>30min). |
| 903 | # Also, wiping out the previous GPT with create_image won't work |
| 904 | # for NAND and there's no equivalent via cgpt. |
| 905 | lines += [ |
| 906 | "gptfile=$(mktemp)", |
| 907 | "flashrom -r -iRW_GPT:${gptfile}", |
| 908 | "gptsize=$(stat ${gptfile} --format %s)", |
| 909 | "dd if=/dev/zero of=${gptfile} bs=${gptsize} count=1", |
| 910 | 'target="-D %d ${gptfile}"' % metadata["bytes"], |
| 911 | ] |
| 912 | else: |
| 913 | lines += [ |
| 914 | 'local target="$1"', |
| 915 | 'create_image "${target}" %d' % partition_totals["min_disk_size"], |
| 916 | ] |
| 917 | |
| 918 | lines += [ |
| 919 | "local blocks", |
| 920 | 'block_size=$(blocksize "${target}")', |
| 921 | 'numsecs=$(numsectors "${target}")', |
| 922 | ] |
| 923 | |
| 924 | # ${target} is referenced unquoted because it may expand into multiple |
| 925 | # arguments in the case of NAND |
| 926 | lines += [ |
| 927 | "local curr=%d" % _GetPartitionStartByteOffset(config, partitions), |
| 928 | "# Make sure Padding is block_size aligned.", |
| 929 | "if [ $(( %d & (block_size - 1) )) -gt 0 ]; then" |
| 930 | % _GetPrimaryEntryArrayPaddingBytes(config), |
| 931 | ' echo "Primary Entry Array padding is not block aligned." >&2', |
| 932 | " exit 1", |
| 933 | "fi", |
| 934 | "# Create the GPT headers and tables. Pad the primary ones.", |
| 935 | "${GPT} create -p $(( %d / block_size )) ${target}" |
| 936 | % _GetPrimaryEntryArrayPaddingBytes(config), |
| 937 | ] |
| 938 | |
| 939 | metadata = GetMetadataPartition(partitions) |
| 940 | stateful = None |
| 941 | last_part = None |
| 942 | # Set up the expanding partition size and write out all the cgpt add |
| 943 | # commands. |
| 944 | for partition in partitions: |
| 945 | if partition.get("num") == "metadata": |
| 946 | continue |
| 947 | |
| 948 | partition["var"] = GetFullPartitionSize(partition, metadata) |
| 949 | if "expand" in partition["features"]: |
| 950 | stateful = partition |
| 951 | continue |
| 952 | |
| 953 | # Save the last partition to place at the end of the disk.. |
| 954 | if "last_partition" in partition["features"]: |
| 955 | last_part = partition |
| 956 | continue |
| 957 | |
| 958 | if ( |
| 959 | partition.get("type") in ["data", "rootfs"] |
| 960 | and partition["bytes"] > 1 |
| 961 | ): |
| 962 | lines += fs_align_snippet |
| 963 | |
| 964 | if partition["var"] != 0 and partition.get("num") != "metadata": |
| 965 | lines += [ |
| 966 | "blocks=$(( %s / block_size ))" % partition["var"], |
| 967 | "if [ $(( %s %% block_size )) -gt 0 ]; then" % partition["var"], |
| 968 | " : $(( blocks += 1 ))", |
| 969 | "fi", |
| 970 | ] |
| 971 | |
| 972 | if partition["type"] != "blank": |
| 973 | lines += [ |
| 974 | gpt_add |
| 975 | % (partition["num"], partition["type"], partition["label"]), |
| 976 | ] |
| 977 | |
| 978 | # Increment the curr counter ready for the next partition. |
| 979 | if partition["var"] != 0 and partition.get("num") != "metadata": |
| 980 | lines += [ |
| 981 | ": $(( curr += blocks * block_size ))", |
| 982 | ] |
| 983 | |
| 984 | if stateful is not None: |
| 985 | lines += fs_align_snippet + [ |
| 986 | "blocks=$(( numsecs - (curr + %d) / block_size ))" |
| 987 | % SECONDARY_GPT_BYTES, |
| 988 | ] |
| 989 | if last_part is not None: |
| 990 | lines += [ |
| 991 | "reserved_blocks=$(( (%s + block_size - 1) / block_size ))" |
| 992 | % last_part["var"], |
| 993 | ": $(( blocks = blocks - reserved_blocks ))", |
| 994 | ] |
| 995 | lines += [ |
| 996 | gpt_add % (stateful["num"], stateful["type"], stateful["label"]), |
| 997 | ": $(( curr += blocks * block_size ))", |
| 998 | ] |
| 999 | |
| 1000 | if last_part is not None: |
| 1001 | lines += [ |
| 1002 | "reserved_blocks=$(( (%s + block_size - 1) / block_size ))" |
| 1003 | % last_part["var"], |
| 1004 | "blocks=$((reserved_blocks))", |
| 1005 | gpt_add % (last_part["num"], last_part["type"], last_part["label"]), |
| 1006 | ] |
| 1007 | |
| 1008 | # Set default priorities and retry counter on kernel partitions. |
| 1009 | tries = 15 |
| 1010 | prio = 15 |
| 1011 | # The order of partition numbers in this loop matters. |
| 1012 | # Make sure partition #2 is the first one, since it will be marked as |
| 1013 | # default bootable partition. |
| 1014 | for partition in GetPartitionsByType(partitions, "kernel"): |
| 1015 | lines += [ |
| 1016 | "${GPT} add -i %s -S 0 -T %i -P %i ${target}" |
| 1017 | % (partition["num"], tries, prio) |
| 1018 | ] |
| 1019 | prio = 0 |
| 1020 | # When not writing 'base' function, make sure the other partitions are |
| 1021 | # marked as non-bootable (retry count == 0), since the USB layout |
| 1022 | # doesn't have any valid data in slots B & C. But with base function, |
| 1023 | # called by chromeos-install script, the KERNEL A partition is |
| 1024 | # replicated into both slots A & B, so we should leave both bootable |
| 1025 | # for error recovery in this case. |
| 1026 | if func != "base": |
| 1027 | tries = 0 |
| 1028 | |
| 1029 | efi_partitions = GetPartitionsByType(partitions, "efi") |
| 1030 | if efi_partitions: |
| 1031 | lines += [ |
| 1032 | "${GPT} boot -p -b $2 -i %d ${target}" % efi_partitions[0]["num"], |
| 1033 | "${GPT} add -i %s -B 1 ${target}" % efi_partitions[0]["num"], |
| 1034 | ] |
| 1035 | else: |
| 1036 | # Provide a PMBR all the time for boot loaders (like u-boot) |
| 1037 | # that expect one to always be there. |
| 1038 | lines += [ |
| 1039 | "${GPT} boot -p -b $2 ${target}", |
| 1040 | ] |
| 1041 | |
| 1042 | if metadata.get("hybrid_mbr"): |
| 1043 | lines += ["install_hybrid_mbr ${target}"] |
| 1044 | lines += ["${GPT} show ${target}"] |
| 1045 | |
| 1046 | if _HasExternalGpt(partitions): |
| 1047 | lines += ["flashrom -w -iRW_GPT:${gptfile} --noverify-all"] |
| 1048 | |
| 1049 | slines += "%s\n}\n\n" % "\n ".join(lines) |
| 1050 | |
| 1051 | |
| 1052 | def WritePartitionSizesFunction( |
| 1053 | options, slines, func, image_type, config, data |
| 1054 | ): |
| 1055 | """Writes out the partition size variable that can be extracted by a caller. |
| 1056 | |
| 1057 | Args: |
| 1058 | options: Flags passed to the script |
| 1059 | slines: lines to write to the script file |
| 1060 | func: function of the layout: |
| 1061 | for removable storage device: 'partition', |
| 1062 | for the fixed storage device: 'base' |
| 1063 | image_type: Type of image eg base/test/dev/factory_install |
| 1064 | config: Partition configuration file object |
| 1065 | data: data dict we will write to a json file |
| 1066 | """ |
| 1067 | func_name = "load_%s_vars" % func |
| 1068 | lines = [ |
| 1069 | "%s() {" % func_name, |
| 1070 | 'DEFAULT_ROOTDEV="%s"' |
| 1071 | % config["metadata"].get("rootdev_%s" % func, ""), |
| 1072 | ] |
| 1073 | |
| 1074 | data[func_name] = {} |
| 1075 | data[func_name]["DEFAULT_ROOTDEV"] = "%s" % config["metadata"].get( |
| 1076 | "rootdev_%s" % func, "" |
| 1077 | ) |
| 1078 | |
| 1079 | partitions = GetPartitionTable(options, config, image_type) |
| 1080 | for partition in partitions: |
| 1081 | if partition.get("num") == "metadata": |
| 1082 | continue |
| 1083 | for key in ("label", "num"): |
| 1084 | if key in partition: |
| 1085 | shell_label = str(partition[key]).replace("-", "_").upper() |
| 1086 | part_bytes = partition["bytes"] |
| 1087 | reserved_ebs = partition.get("reserved_erase_blocks", 0) |
| 1088 | fs_bytes = partition.get("fs_bytes", part_bytes) |
| 1089 | part_format = partition.get("format", "") |
| 1090 | fs_format = partition.get("fs_format", "") |
| 1091 | fs_options = partition.get("fs_options", "") |
| 1092 | partition_num = partition.get("num", "") |
| 1093 | args = [ |
| 1094 | ("PARTITION_SIZE_", part_bytes), |
| 1095 | ("RESERVED_EBS_", reserved_ebs), |
| 1096 | ("DATA_SIZE_", fs_bytes), |
| 1097 | ("FORMAT_", part_format), |
| 1098 | ("FS_FORMAT_", fs_format), |
| 1099 | ] |
| 1100 | sargs = [ |
| 1101 | ("FS_OPTIONS_", fs_options), |
| 1102 | ("PARTITION_NUM_", partition_num), |
| 1103 | ] |
| 1104 | for arg, value in args: |
| 1105 | label = arg + shell_label |
| 1106 | lines += [ |
| 1107 | "%s=%s" % (label, value), |
| 1108 | ] |
| 1109 | data[func_name][label] = "%s" % value |
| 1110 | for arg, value in sargs: |
| 1111 | label = arg + shell_label |
| 1112 | lines += [ |
| 1113 | '%s="%s"' % (label, value), |
| 1114 | ] |
| 1115 | data[func_name][label] = "%s" % value |
| 1116 | slines += "%s\n}\n\n" % "\n ".join(lines) |
| 1117 | |
| 1118 | |
| 1119 | def GetPartitionByNumber(partitions, num): |
| 1120 | """Given a partition table and number returns the partition object. |
| 1121 | |
| 1122 | Args: |
| 1123 | partitions: List of partitions to search in |
| 1124 | num: Number of partition to find |
| 1125 | |
| 1126 | Returns: |
| 1127 | An object for the selected partition |
| 1128 | """ |
| 1129 | for partition in partitions: |
| 1130 | if partition.get("num") == int(num): |
| 1131 | return partition |
| 1132 | |
| 1133 | raise PartitionNotFound("Partition %s not found" % num) |
| 1134 | |
| 1135 | |
| 1136 | def GetPartitionsByType(partitions, typename): |
| 1137 | """Given a partition table and type returns the partitions of the type. |
| 1138 | |
| 1139 | Partitions are sorted in num order. |
| 1140 | |
| 1141 | Args: |
| 1142 | partitions: List of partitions to search in |
| 1143 | typename: The type of partitions to select |
| 1144 | |
| 1145 | Returns: |
| 1146 | A list of partitions of the type |
| 1147 | """ |
| 1148 | out = [] |
| 1149 | for partition in partitions: |
| 1150 | if partition.get("type") == typename: |
| 1151 | out.append(partition) |
| 1152 | return sorted(out, key=lambda partition: partition.get("num")) |
| 1153 | |
| 1154 | |
| 1155 | def GetMetadataPartition(partitions): |
| 1156 | """Given a partition table returns the metadata partition object. |
| 1157 | |
| 1158 | Args: |
| 1159 | partitions: List of partitions to search in |
| 1160 | |
| 1161 | Returns: |
| 1162 | An object for the metadata partition |
| 1163 | """ |
| 1164 | for partition in partitions: |
| 1165 | if partition.get("num") == "metadata": |
| 1166 | return partition |
| 1167 | |
| 1168 | return {} |
| 1169 | |
| 1170 | |
| 1171 | def GetPartitionByLabel(partitions, label): |
| 1172 | """Given a partition table and label returns the partition object. |
| 1173 | |
| 1174 | Args: |
| 1175 | partitions: List of partitions to search in |
| 1176 | label: Label of partition to find |
| 1177 | |
| 1178 | Returns: |
| 1179 | An object for the selected partition |
| 1180 | """ |
| 1181 | for partition in partitions: |
| 1182 | if "label" not in partition: |
| 1183 | continue |
| 1184 | if partition["label"] == label: |
| 1185 | return partition |
| 1186 | |
| 1187 | raise PartitionNotFound('Partition "%s" not found' % label) |
| 1188 | |
| 1189 | |
| 1190 | def WritePartitionScript( |
| 1191 | options, image_type, layout_filename, sfilename, vfilename |
| 1192 | ): |
| 1193 | """Writes a shell script with functions for the base and requested layouts. |
| 1194 | |
| 1195 | Args: |
| 1196 | options: Flags passed to the script |
| 1197 | image_type: Type of image eg base/test/dev/factory_install |
| 1198 | layout_filename: Path to partition configuration file |
| 1199 | sfilename: Filename to write the finished script to |
| 1200 | vfilename: Filename to write the partition variables json data to |
| 1201 | """ |
| 1202 | config = LoadPartitionConfig(layout_filename) |
| 1203 | |
| 1204 | with open(sfilename, "w") as f, open(vfilename, "w") as jFile: |
| 1205 | script_shell = GetScriptShell() |
| 1206 | f.write(script_shell) |
| 1207 | |
| 1208 | data = {} |
| 1209 | slines = [] |
| 1210 | for func, layout in (("base", BASE_LAYOUT), ("partition", image_type)): |
| 1211 | WriteLayoutFunction(options, slines, func, layout, config) |
| 1212 | WritePartitionSizesFunction( |
| 1213 | options, slines, func, layout, config, data |
| 1214 | ) |
| 1215 | |
| 1216 | f.write("".join(slines)) |
| 1217 | json.dump(data, jFile) |
| 1218 | |
| 1219 | # TODO: Backwards compat. Should be killed off once we update |
| 1220 | # cros_generate_update_payload to use the new code. |
| 1221 | partitions = GetPartitionTable(options, config, BASE_LAYOUT) |
| 1222 | partition = GetPartitionByLabel(partitions, "ROOT-A") |
| 1223 | f.write("ROOTFS_PARTITION_SIZE=%s\n" % (partition["bytes"],)) |
| 1224 | |
| 1225 | |
| 1226 | def GetBlockSize(_options, layout_filename): |
| 1227 | """Returns the partition table block size. |
| 1228 | |
| 1229 | Args: |
| 1230 | options: Flags passed to the script |
| 1231 | layout_filename: Path to partition configuration file |
| 1232 | |
| 1233 | Returns: |
| 1234 | Block size of all partitions in the layout |
| 1235 | """ |
| 1236 | |
| 1237 | config = LoadPartitionConfig(layout_filename) |
| 1238 | return config["metadata"]["block_size"] |
| 1239 | |
| 1240 | |
| 1241 | def GetFilesystemBlockSize(_options, layout_filename): |
| 1242 | """Returns the filesystem block size. |
| 1243 | |
| 1244 | This is used for all partitions in the table that have filesystems. |
| 1245 | |
| 1246 | Args: |
| 1247 | options: Flags passed to the script |
| 1248 | layout_filename: Path to partition configuration file |
| 1249 | |
| 1250 | Returns: |
| 1251 | Block size of all filesystems in the layout |
| 1252 | """ |
| 1253 | |
| 1254 | config = LoadPartitionConfig(layout_filename) |
| 1255 | return config["metadata"]["fs_block_size"] |
| 1256 | |
| 1257 | |
| 1258 | def GetImageTypes(_options, layout_filename): |
| 1259 | """Returns a list of all the image types in the layout. |
| 1260 | |
| 1261 | Args: |
| 1262 | options: Flags passed to the script |
| 1263 | layout_filename: Path to partition configuration file |
| 1264 | |
| 1265 | Returns: |
| 1266 | List of all image types |
| 1267 | """ |
| 1268 | |
| 1269 | config = LoadPartitionConfig(layout_filename) |
| 1270 | return " ".join(config["layouts"].keys()) |
| 1271 | |
| 1272 | |
| 1273 | def GetType(options, image_type, layout_filename, num): |
| 1274 | """Returns the type of a given partition for a given layout. |
| 1275 | |
| 1276 | Args: |
| 1277 | options: Flags passed to the script |
| 1278 | image_type: Type of image eg base/test/dev/factory_install |
| 1279 | layout_filename: Path to partition configuration file |
| 1280 | num: Number of the partition you want to read from |
| 1281 | |
| 1282 | Returns: |
| 1283 | Type of the specified partition. |
| 1284 | """ |
| 1285 | partitions = GetPartitionTableFromConfig( |
| 1286 | options, layout_filename, image_type |
| 1287 | ) |
| 1288 | partition = GetPartitionByNumber(partitions, num) |
| 1289 | return partition.get("type") |
| 1290 | |
| 1291 | |
| 1292 | def GetPartitions(options, image_type, layout_filename): |
| 1293 | """Returns the partition numbers for the image_type. |
| 1294 | |
| 1295 | Args: |
| 1296 | options: Flags passed to the script |
| 1297 | image_type: Type of image eg base/test/dev/factory_install |
| 1298 | layout_filename: Path to partition configuration file |
| 1299 | |
| 1300 | Returns: |
| 1301 | A space delimited string of partition numbers. |
| 1302 | """ |
| 1303 | partitions = GetPartitionTableFromConfig( |
| 1304 | options, layout_filename, image_type |
| 1305 | ) |
| 1306 | return " ".join( |
| 1307 | str(p["num"]) |
| 1308 | for p in partitions |
| 1309 | if "num" in p and p["num"] != "metadata" |
| 1310 | ) |
| 1311 | |
| 1312 | |
| 1313 | def GetUUID(options, image_type, layout_filename, num): |
| 1314 | """Returns the filesystem UUID of a given partition for a given layout type. |
| 1315 | |
| 1316 | Args: |
| 1317 | options: Flags passed to the script |
| 1318 | image_type: Type of image eg base/test/dev/factory_install |
| 1319 | layout_filename: Path to partition configuration file |
| 1320 | num: Number of the partition you want to read from |
| 1321 | |
| 1322 | Returns: |
| 1323 | UUID of specified partition. Defaults to random if not set. |
| 1324 | """ |
| 1325 | partitions = GetPartitionTableFromConfig( |
| 1326 | options, layout_filename, image_type |
| 1327 | ) |
| 1328 | partition = GetPartitionByNumber(partitions, num) |
| 1329 | return partition.get("uuid", "random") |
| 1330 | |
| 1331 | |
| 1332 | def GetPartitionSize(options, image_type, layout_filename, num): |
| 1333 | """Returns the partition size of a given partition for a given layout type. |
| 1334 | |
| 1335 | Args: |
| 1336 | options: Flags passed to the script |
| 1337 | image_type: Type of image eg base/test/dev/factory_install |
| 1338 | layout_filename: Path to partition configuration file |
| 1339 | num: Number of the partition you want to read from |
| 1340 | |
| 1341 | Returns: |
| 1342 | Size of selected partition in bytes |
| 1343 | """ |
| 1344 | |
| 1345 | partitions = GetPartitionTableFromConfig( |
| 1346 | options, layout_filename, image_type |
| 1347 | ) |
| 1348 | partition = GetPartitionByNumber(partitions, num) |
| 1349 | |
| 1350 | return partition["bytes"] |
| 1351 | |
| 1352 | |
| 1353 | def GetFilesystemFormat(options, image_type, layout_filename, num): |
| 1354 | """Returns the filesystem format of a partition for a given layout type. |
| 1355 | |
| 1356 | Args: |
| 1357 | options: Flags passed to the script |
| 1358 | image_type: Type of image eg base/test/dev/factory_install |
| 1359 | layout_filename: Path to partition configuration file |
| 1360 | num: Number of the partition you want to read from |
| 1361 | |
| 1362 | Returns: |
| 1363 | Format of the selected partition's filesystem |
| 1364 | """ |
| 1365 | |
| 1366 | partitions = GetPartitionTableFromConfig( |
| 1367 | options, layout_filename, image_type |
| 1368 | ) |
| 1369 | partition = GetPartitionByNumber(partitions, num) |
| 1370 | |
| 1371 | return partition.get("fs_format") |
| 1372 | |
| 1373 | |
| 1374 | def GetFormat(options, image_type, layout_filename, num): |
| 1375 | """Returns the format of a given partition for a given layout type. |
| 1376 | |
| 1377 | Args: |
| 1378 | options: Flags passed to the script |
| 1379 | image_type: Type of image eg base/test/dev/factory_install |
| 1380 | layout_filename: Path to partition configuration file |
| 1381 | num: Number of the partition you want to read from |
| 1382 | |
| 1383 | Returns: |
| 1384 | Format of the selected partition's filesystem |
| 1385 | """ |
| 1386 | |
| 1387 | partitions = GetPartitionTableFromConfig( |
| 1388 | options, layout_filename, image_type |
| 1389 | ) |
| 1390 | partition = GetPartitionByNumber(partitions, num) |
| 1391 | |
| 1392 | return partition.get("format") |
| 1393 | |
| 1394 | |
| 1395 | def GetFilesystemOptions(options, image_type, layout_filename, num): |
| 1396 | """Returns the filesystem options of a given partition and layout type. |
| 1397 | |
| 1398 | Args: |
| 1399 | options: Flags passed to the script |
| 1400 | image_type: Type of image eg base/test/dev/factory_install |
| 1401 | layout_filename: Path to partition configuration file |
| 1402 | num: Number of the partition you want to read from |
| 1403 | |
| 1404 | Returns: |
| 1405 | The selected partition's filesystem options |
| 1406 | """ |
| 1407 | |
| 1408 | partitions = GetPartitionTableFromConfig( |
| 1409 | options, layout_filename, image_type |
| 1410 | ) |
| 1411 | partition = GetPartitionByNumber(partitions, num) |
| 1412 | |
| 1413 | return partition.get("fs_options") |
| 1414 | |
| 1415 | |
| 1416 | def GetFilesystemSize(options, image_type, layout_filename, num): |
| 1417 | """Returns the filesystem size of a given partition for a given layout type. |
| 1418 | |
| 1419 | If no filesystem size is specified, returns the partition size. |
| 1420 | |
| 1421 | Args: |
| 1422 | options: Flags passed to the script |
| 1423 | image_type: Type of image eg base/test/dev/factory_install |
| 1424 | layout_filename: Path to partition configuration file |
| 1425 | num: Number of the partition you want to read from |
| 1426 | |
| 1427 | Returns: |
| 1428 | Size of selected partition filesystem in bytes |
| 1429 | """ |
| 1430 | |
| 1431 | partitions = GetPartitionTableFromConfig( |
| 1432 | options, layout_filename, image_type |
| 1433 | ) |
| 1434 | partition = GetPartitionByNumber(partitions, num) |
| 1435 | |
| 1436 | if "fs_bytes" in partition: |
| 1437 | return partition["fs_bytes"] |
| 1438 | else: |
| 1439 | return partition["bytes"] |
| 1440 | |
| 1441 | |
| 1442 | def GetLabel(options, image_type, layout_filename, num): |
| 1443 | """Returns the label for a given partition. |
| 1444 | |
| 1445 | Args: |
| 1446 | options: Flags passed to the script |
| 1447 | image_type: Type of image eg base/test/dev/factory_install |
| 1448 | layout_filename: Path to partition configuration file |
| 1449 | num: Number of the partition you want to read from |
| 1450 | |
| 1451 | Returns: |
| 1452 | Label of selected partition, or 'UNTITLED' if none specified |
| 1453 | """ |
| 1454 | |
| 1455 | partitions = GetPartitionTableFromConfig( |
| 1456 | options, layout_filename, image_type |
| 1457 | ) |
| 1458 | partition = GetPartitionByNumber(partitions, num) |
| 1459 | |
| 1460 | if "label" in partition: |
| 1461 | return partition["label"] |
| 1462 | else: |
| 1463 | return "UNTITLED" |
| 1464 | |
| 1465 | |
| 1466 | def GetNumber(options, image_type, layout_filename, label): |
| 1467 | """Returns the partition number of a given label. |
| 1468 | |
| 1469 | Args: |
| 1470 | options: Flags passed to the script |
| 1471 | image_type: Type of image eg base/test/dev/factory_install |
| 1472 | layout_filename: Path to partition configuration file |
| 1473 | label: Number of the partition you want to read from |
| 1474 | |
| 1475 | Returns: |
| 1476 | The number of the partition corresponding to the label. |
| 1477 | """ |
| 1478 | |
| 1479 | partitions = GetPartitionTableFromConfig( |
| 1480 | options, layout_filename, image_type |
| 1481 | ) |
| 1482 | partition = GetPartitionByLabel(partitions, label) |
| 1483 | return partition["num"] |
| 1484 | |
| 1485 | |
| 1486 | def GetReservedEraseBlocks(options, image_type, layout_filename, num): |
| 1487 | """Returns the number of erase blocks reserved in the partition. |
| 1488 | |
| 1489 | Args: |
| 1490 | options: Flags passed to the script |
| 1491 | image_type: Type of image eg base/test/dev/factory_install |
| 1492 | layout_filename: Path to partition configuration file |
| 1493 | num: Number of the partition you want to read from |
| 1494 | |
| 1495 | Returns: |
| 1496 | Number of reserved erase blocks |
| 1497 | """ |
| 1498 | partitions = GetPartitionTableFromConfig( |
| 1499 | options, layout_filename, image_type |
| 1500 | ) |
| 1501 | partition = GetPartitionByNumber(partitions, num) |
| 1502 | if "reserved_erase_blocks" in partition: |
| 1503 | return partition["reserved_erase_blocks"] |
| 1504 | else: |
| 1505 | return 0 |
| 1506 | |
| 1507 | |
| 1508 | def _DumpLayout(options, config, image_type): |
| 1509 | """Prints out a human readable disk layout in on-disk order. |
| 1510 | |
| 1511 | Args: |
| 1512 | options: Flags passed to the script. |
| 1513 | config: Partition configuration file object. |
| 1514 | image_type: Type of image e.g. base/test/dev/factory_install. |
| 1515 | """ |
| 1516 | try: |
| 1517 | partitions = GetPartitionTable(options, config, image_type) |
| 1518 | except InvalidLayout as e: |
| 1519 | print(str(e), file=sys.stderr) |
| 1520 | sys.exit(1) |
| 1521 | |
| 1522 | label_len = max(len(x["label"]) for x in partitions if "label" in x) |
| 1523 | type_len = max(len(x["type"]) for x in partitions if "type" in x) |
| 1524 | |
| 1525 | msg = "num:%4s label:%-*s type:%-*s size:%-10s fs_size:%-10s features:%s" |
| 1526 | |
| 1527 | print("\n%s Layout Data" % image_type.upper()) |
| 1528 | for partition in partitions: |
| 1529 | if partition.get("num") == "metadata": |
| 1530 | continue |
| 1531 | |
| 1532 | size = ProduceHumanNumber(partition["bytes"]) |
| 1533 | if "fs_bytes" in partition: |
| 1534 | fs_size = ProduceHumanNumber(partition["fs_bytes"]) |
| 1535 | else: |
| 1536 | fs_size = "auto" |
| 1537 | |
| 1538 | print( |
| 1539 | msg |
| 1540 | % ( |
| 1541 | partition.get("num", "auto"), |
| 1542 | label_len, |
| 1543 | partition.get("label", ""), |
| 1544 | type_len, |
| 1545 | partition.get("type", ""), |
| 1546 | size, |
| 1547 | fs_size, |
| 1548 | partition.get("features", []), |
| 1549 | ) |
| 1550 | ) |
| 1551 | |
| 1552 | |
| 1553 | def DoDebugOutput(options, layout_filename, image_type): |
| 1554 | """Prints out a human readable disk layout in on-disk order. |
| 1555 | |
| 1556 | Args: |
| 1557 | options: Flags passed to the script |
| 1558 | layout_filename: Path to partition configuration file |
| 1559 | image_type: Type of image e.g. ALL/LIST/base/test/dev/factory_install |
| 1560 | """ |
| 1561 | if image_type == "LIST": |
| 1562 | print(GetImageTypes(options, layout_filename)) |
| 1563 | return |
| 1564 | |
| 1565 | config = LoadPartitionConfig(layout_filename) |
| 1566 | |
| 1567 | # Print out non-layout options first. |
| 1568 | print("Config Data") |
| 1569 | metadata_msg = "field:%-14s value:%s" |
| 1570 | for key in config.keys(): |
| 1571 | if key not in ("layouts", "_comment"): |
| 1572 | print(metadata_msg % (key, config[key])) |
| 1573 | |
| 1574 | if image_type == "ALL": |
| 1575 | for layout in config["layouts"]: |
| 1576 | _DumpLayout(options, config, layout) |
| 1577 | else: |
| 1578 | _DumpLayout(options, config, image_type) |
| 1579 | |
| 1580 | |
| 1581 | def CheckRootfsPartitionsMatch(partitions): |
| 1582 | """Checks that rootfs partitions are substitutable with each other. |
| 1583 | |
| 1584 | This function asserts that either all rootfs partitions are in the same |
| 1585 | format or none have a format, and it asserts that have the same number of |
| 1586 | reserved erase blocks. |
| 1587 | """ |
| 1588 | partition_format = None |
| 1589 | reserved_erase_blocks = -1 |
| 1590 | for partition in partitions: |
| 1591 | if partition.get("type") == "rootfs": |
| 1592 | new_format = partition.get("format", "") |
| 1593 | new_reserved_erase_blocks = partition.get( |
| 1594 | "reserved_erase_blocks", 0 |
| 1595 | ) |
| 1596 | |
| 1597 | if partition_format is None: |
| 1598 | partition_format = new_format |
| 1599 | reserved_erase_blocks = new_reserved_erase_blocks |
| 1600 | |
| 1601 | if new_format != partition_format: |
| 1602 | raise MismatchedRootfsFormat( |
| 1603 | 'mismatched rootfs formats: "%s" and "%s"' |
| 1604 | % (partition_format, new_format) |
| 1605 | ) |
| 1606 | |
| 1607 | if reserved_erase_blocks != new_reserved_erase_blocks: |
| 1608 | raise MismatchedRootfsBlocks( |
| 1609 | "mismatched rootfs reserved erase block counts: %s and %s" |
| 1610 | % (reserved_erase_blocks, new_reserved_erase_blocks) |
| 1611 | ) |
| 1612 | |
| 1613 | |
| 1614 | def Combinations(n, k): |
| 1615 | """Calculate the binomial coefficient, i.e., "n choose k" |
| 1616 | |
| 1617 | This calculates the number of ways that k items can be chosen from |
| 1618 | a set of size n. For example, if there are n blocks and k of them |
| 1619 | are bad, then this returns the number of ways that the bad blocks |
| 1620 | can be distributed over the device. |
| 1621 | See http://en.wikipedia.org/wiki/Binomial_coefficient |
| 1622 | |
| 1623 | For convenience to the caller, this function allows impossible cases |
| 1624 | as input and returns 0 for them. |
| 1625 | """ |
| 1626 | if k < 0 or n < k: |
| 1627 | return 0 |
| 1628 | return math.factorial(n) // (math.factorial(k) * math.factorial(n - k)) |
| 1629 | |
| 1630 | |
| 1631 | def CheckReservedEraseBlocks(partitions): |
| 1632 | """Checks that the reserved_erase_blocks in each partition is good. |
| 1633 | |
| 1634 | This function checks that a reasonable value was given for the reserved |
| 1635 | erase block count. In particular, it checks that there's a less than |
| 1636 | 1 in 100k probability that, if the manufacturer's maximum bad erase |
| 1637 | block count is met, and assuming bad blocks are uniformly randomly |
| 1638 | distributed, then more bad blocks will fall in this partition than are |
| 1639 | reserved. Smaller partitions need a larger reserve percentage. |
| 1640 | |
| 1641 | We take the number of reserved blocks as a parameter in disk_layout.json |
| 1642 | rather than just calculating the value so that it can be tweaked |
| 1643 | explicitly along with others in squeezing the image onto flash. But |
| 1644 | we check it so that users have an easy method for determining what's |
| 1645 | acceptable--just try out a new value and do ./build_image. |
| 1646 | """ |
| 1647 | for partition in partitions: |
| 1648 | if "reserved_erase_blocks" in partition or partition.get("format") in ( |
| 1649 | "ubi", |
| 1650 | "nand", |
| 1651 | ): |
| 1652 | if partition.get("bytes", 0) == 0: |
| 1653 | continue |
| 1654 | metadata = GetMetadataPartition(partitions) |
| 1655 | if ( |
| 1656 | not _HasBadEraseBlocks(partitions) |
| 1657 | or "reserved_erase_blocks" not in partition |
| 1658 | or "bytes" not in metadata |
| 1659 | or "erase_block_size" not in metadata |
| 1660 | or "page_size" not in metadata |
| 1661 | ): |
| 1662 | raise MissingEraseBlockField( |
| 1663 | "unable to check if partition %s will have too many bad " |
| 1664 | "blocks due to missing metadata field" % partition["label"] |
| 1665 | ) |
| 1666 | |
| 1667 | reserved = partition["reserved_erase_blocks"] |
| 1668 | erase_block_size = metadata["erase_block_size"] |
| 1669 | device_erase_blocks = metadata["bytes"] // erase_block_size |
| 1670 | device_bad_blocks = metadata["max_bad_erase_blocks"] |
| 1671 | distributions = Combinations(device_erase_blocks, device_bad_blocks) |
| 1672 | partition_erase_blocks = partition["bytes"] // erase_block_size |
| 1673 | # The idea is to calculate the number of ways that there could be |
| 1674 | # reserved or more bad blocks inside the partition, assuming that |
| 1675 | # there are device_bad_blocks in the device in total |
| 1676 | # (the worst case). To get the probability, we divide this count by |
| 1677 | # the total number of ways that the bad blocks can be distributed on |
| 1678 | # the whole device. To find the first number, we sum over |
| 1679 | # increasing values for the count of bad blocks within the partition |
| 1680 | # the number of ways that those bad blocks can be inside the |
| 1681 | # partition, multiplied by the number of ways that the remaining |
| 1682 | # blocks can be distributed outside of the partition. |
| 1683 | ways_for_failure = sum( |
| 1684 | Combinations(partition_erase_blocks, partition_bad_blocks) |
| 1685 | * Combinations( |
| 1686 | device_erase_blocks - partition_erase_blocks, |
| 1687 | device_bad_blocks - partition_bad_blocks, |
| 1688 | ) |
| 1689 | for partition_bad_blocks in range( |
| 1690 | reserved + 1, device_bad_blocks + 1 |
| 1691 | ) |
| 1692 | ) |
| 1693 | probability = ways_for_failure / distributions |
| 1694 | if probability > 0.00001: |
| 1695 | raise ExcessFailureProbability( |
| 1696 | "excessive probability %f of too many " |
| 1697 | "bad blocks in partition %s" |
| 1698 | % (probability, partition["label"]) |
| 1699 | ) |
| 1700 | |
| 1701 | |
| 1702 | def CheckSimpleNandProperties(partitions): |
| 1703 | """Checks that NAND partitions are erase-block-aligned and not expand""" |
| 1704 | if not _HasBadEraseBlocks(partitions): |
| 1705 | return |
| 1706 | metadata = GetMetadataPartition(partitions) |
| 1707 | for partition in partitions: |
| 1708 | erase_block_size = metadata["erase_block_size"] |
| 1709 | if partition["bytes"] % erase_block_size != 0: |
| 1710 | raise UnalignedPartition( |
| 1711 | "partition size %s does not divide erase block size %s" |
| 1712 | % (partition["bytes"], erase_block_size) |
| 1713 | ) |
| 1714 | if "expand" in partition["features"]: |
| 1715 | raise ExpandNandImpossible( |
| 1716 | "expand partitions may not be used with raw NAND" |
| 1717 | ) |
| 1718 | |
| 1719 | |
| 1720 | def CheckTotalSize(partitions): |
| 1721 | """Checks that the sum size of all partitions fits within the device""" |
| 1722 | metadata = GetMetadataPartition(partitions) |
| 1723 | if "bytes" not in metadata: |
| 1724 | return |
| 1725 | capacity = metadata["bytes"] |
| 1726 | total = sum( |
| 1727 | GetFullPartitionSize(partition, metadata) |
| 1728 | for partition in partitions |
| 1729 | if partition.get("num") != "metadata" |
| 1730 | ) |
| 1731 | if total > capacity: |
| 1732 | raise ExcessPartitionSize("capacity = %d, total=%d" % (capacity, total)) |
| 1733 | |
| 1734 | |
| 1735 | def Validate(options, image_type, layout_filename): |
| 1736 | """Validates a layout file, used before reading sizes to check for errors. |
| 1737 | |
| 1738 | Args: |
| 1739 | options: Flags passed to the script |
| 1740 | image_type: Type of image eg base/test/dev/factory_install |
| 1741 | layout_filename: Path to partition configuration file |
| 1742 | """ |
| 1743 | partitions = GetPartitionTableFromConfig( |
| 1744 | options, layout_filename, image_type |
| 1745 | ) |
| 1746 | CheckRootfsPartitionsMatch(partitions) |
| 1747 | CheckTotalSize(partitions) |
| 1748 | CheckSimpleNandProperties(partitions) |
| 1749 | CheckReservedEraseBlocks(partitions) |
| 1750 | |
| 1751 | |
| 1752 | class ArgsAction(argparse.Action): # pylint: disable=no-init |
| 1753 | """Helper to add all arguments to an args array. |
| 1754 | |
| 1755 | ArgumentParser does not let you specify the same dest for multiple args. |
| 1756 | We take care of appending to the 'args' array ourselves here. |
| 1757 | """ |
| 1758 | |
| 1759 | def __call__(self, parser, namespace, values, option_string=None): |
| 1760 | args = getattr(namespace, "args", []) |
| 1761 | args.append(values) |
| 1762 | setattr(namespace, "args", args) |
| 1763 | |
| 1764 | |
| 1765 | class HelpAllAction(argparse.Action): |
| 1766 | """Display all subcommands help in one go.""" |
| 1767 | |
| 1768 | def __init__(self, *args, **kwargs): |
| 1769 | if "nargs" in kwargs: |
| 1770 | raise ValueError("nargs not allowed") |
| 1771 | kwargs["nargs"] = 0 |
| 1772 | argparse.Action.__init__(self, *args, **kwargs) |
| 1773 | |
| 1774 | def __call__(self, parser, namespace, values, option_string=None): |
| 1775 | print("%s\nCommands:" % (parser.description,), end="") |
| 1776 | subparser = getattr(namespace, "help_all") |
| 1777 | for key, subparser in namespace.help_all.choices.items(): |
| 1778 | # Should we include the desc of each arg too ? |
| 1779 | print( |
| 1780 | "\n %s %s\n %s" |
| 1781 | % ( |
| 1782 | key, |
| 1783 | subparser.get_default("help_all"), |
| 1784 | subparser.description, |
| 1785 | ) |
| 1786 | ) |
| 1787 | sys.exit(0) |
| 1788 | |
| 1789 | |
| 1790 | def GetParser(): |
| 1791 | """Return a parser for the CLI.""" |
| 1792 | parser = argparse.ArgumentParser( |
| 1793 | description=__doc__, |
| 1794 | formatter_class=argparse.RawDescriptionHelpFormatter, |
| 1795 | ) |
| 1796 | parser.add_argument( |
| 1797 | "--adjust_part", |
| 1798 | metavar="SPEC", |
| 1799 | default="", |
| 1800 | help="adjust partition sizes", |
| 1801 | ) |
| 1802 | |
| 1803 | action_map = { |
| 1804 | "write": WritePartitionScript, |
| 1805 | "readblocksize": GetBlockSize, |
| 1806 | "readfsblocksize": GetFilesystemBlockSize, |
| 1807 | "readpartsize": GetPartitionSize, |
| 1808 | "readformat": GetFormat, |
| 1809 | "readfsformat": GetFilesystemFormat, |
| 1810 | "readfssize": GetFilesystemSize, |
| 1811 | "readimagetypes": GetImageTypes, |
| 1812 | "readfsoptions": GetFilesystemOptions, |
| 1813 | "readlabel": GetLabel, |
| 1814 | "readnumber": GetNumber, |
| 1815 | "readreservederaseblocks": GetReservedEraseBlocks, |
| 1816 | "readtype": GetType, |
| 1817 | "readpartitionnums": GetPartitions, |
| 1818 | "readuuid": GetUUID, |
| 1819 | "debug": DoDebugOutput, |
| 1820 | "validate": Validate, |
| 1821 | } |
| 1822 | |
| 1823 | # Subparsers are required by default under Python 2. Python 3 changed to |
| 1824 | # not required, but didn't include a required option until 3.7. Setting |
| 1825 | # the required member works in all versions (and setting dest name). |
| 1826 | subparsers = parser.add_subparsers(title="Commands", dest="command") |
| 1827 | subparsers.required = True |
| 1828 | |
| 1829 | for name, func in sorted(action_map.items()): |
| 1830 | # Turn the func's docstring into something we can show the user. |
| 1831 | desc, doc = func.__doc__.split("\n", 1) |
| 1832 | # Extract the help for each argument. |
| 1833 | args_help = {} |
| 1834 | for line in doc.splitlines(): |
| 1835 | if ":" in line: |
| 1836 | arg, text = line.split(":", 1) |
| 1837 | args_help[arg.strip()] = text.strip() |
| 1838 | |
| 1839 | argspec = inspect.getfullargspec(func) |
| 1840 | # Skip the first argument as that'll be the options field. |
| 1841 | args = argspec.args[1:] |
| 1842 | |
| 1843 | subparser = subparsers.add_parser(name, description=desc, help=desc) |
| 1844 | subparser.set_defaults( |
| 1845 | callback=func, help_all=" ".join("<%s>" % x for x in args) |
| 1846 | ) |
| 1847 | for arg in args: |
| 1848 | subparser.add_argument(arg, action=ArgsAction, help=args_help[arg]) |
| 1849 | |
| 1850 | parser.add_argument( |
| 1851 | "--help-all", |
| 1852 | action=HelpAllAction, |
| 1853 | default=subparsers, |
| 1854 | help="show all commands and their help in one screen", |
| 1855 | ) |
| 1856 | |
| 1857 | return parser |
| 1858 | |
| 1859 | |
| 1860 | def main(argv): |
| 1861 | parser = GetParser() |
| 1862 | opts = parser.parse_args(argv) |
| 1863 | |
| 1864 | ret = opts.callback(opts, *opts.args) |
| 1865 | if ret is not None: |
| 1866 | print(ret) |
| 1867 | |
| 1868 | |
| 1869 | if __name__ == "__main__": |
| 1870 | sys.exit(main(sys.argv[1:])) |