blob: 3c73b16c00bba08ccc04ea01aef71452ffc9dfa6 [file] [log] [blame]
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -07001/* Copyright 2018 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6#include <stdio.h>
7#include <stdlib.h>
8#include <string.h>
Edward O'Callaghand13334a2020-07-23 12:51:00 +10009#include <stdbool.h>
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -070010
11#include "action_descriptor.h"
Vadim Bendebury622128c2018-06-21 15:50:28 -070012#include "chipdrivers.h"
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -070013#include "flash.h"
Vadim Bendebury2f346a32018-05-21 10:24:18 -070014#include "layout.h"
Miriam Polzer2eff9592020-12-22 09:31:47 +010015#include "platform.h"
Vadim Bendebury622128c2018-06-21 15:50:28 -070016#include "programmer.h"
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -070017
Edward O'Callaghand13334a2020-07-23 12:51:00 +100018
19/*
Edward O'Callaghan4d496342021-01-22 01:17:19 +110020 * This global variable is used to communicate the type of ICH found on the
21 * device. When running on non-intel platforms default value of
22 * CHIPSET_ICH_UNKNOWN is used.
23*/
24extern enum ich_chipset ich_generation;
25
26/*
Edward O'Callaghand13334a2020-07-23 12:51:00 +100027 * Unfortunate global state.
28 */
29static bool dry_run = false;
30
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -070031/*
32 * This module analyses the contents of 'before' and 'after' flash images and
33 * based on the images' differences prepares a list of processing actions to
34 * take.
35 *
36 * The goal is to prepare actions using the chip's erase capability in a most
37 * efficient way: erasing smallest possible portions of the chip gives the
38 * highest granularity, but if many small areas need to be erased, erasing a
39 * larger area, even if re-writing it completely, is more efficient. The
40 * breakdown is somewhere at 60%.
41 *
42 * Each flash chip description in flash.c includes a set of erase command
43 * descriptors, different commands allowing to erase blocks of fixed different
44 * sizes. Sometimes the erase command for a certain block size does not cover
45 * the entire chip. This module preprocesses the flash chip description to
46 * compile an array of erase commands with their block size indices such that
47 * it is guaranteed that the command can be used to erase anywhere in the chip
48 * where erase is required based on the differences between 'before' and
49 * 'after' images.
50 *
51 * 'eraser_index' below is the index into the 'block_erasers' array of the
52 * flash chip descriptor, points to the function to use to erase the block of
53 * a certain size.
54 *
55 * The erase command could potentially operate on blocks of different sizes,
56 * 'region_index' is the index into the 'block_erasers.eraseblocks' array
57 * which defines what block size would be used by this erase command.
58 */
59struct eraser {
60 int eraser_index;
61 int region_index;
62};
63
64/*
65 * A helper structure which holds information about blocks of a given size
66 * which require writing and or erasing.
67 *
68 * The actual map of the blocks is pointed at by the 'block_map' field, one
69 * byte per block. Block might need an erase, or just a write, depending on
70 * the contents of 'before' and 'after' flash images.
71 *
72 * The 'limit' field holds the number of blocks of this size, which is
73 * equivalent to one block of the next larger size in term of time required
74 * for erasing/programming.
75 */
76struct range_map {
77 size_t block_size;
78 int limit;
79 struct b_map {
80 uint8_t need_change:1;
81 uint8_t need_erase:1;
82 } *block_map;
83};
84
85/*
86 * A debug function printing out the array or processing units from an the
87 * action descriptor.
88 */
89static void dump_descriptor(struct action_descriptor *descriptor)
90{
91 struct processing_unit *pu = descriptor->processing_units;
92
93 while (pu->num_blocks) {
94 msg_pdbg("%06zx..%06zx %6zx x %zd eraser %d\n", pu->offset,
95 pu->offset + pu->num_blocks * pu->block_size - 1,
96 pu->block_size, pu->num_blocks,
97 pu->block_eraser_index);
98 pu++;
99 }
100}
101
102/*
Vadim Bendebury622128c2018-06-21 15:50:28 -0700103 * Do not allow use of unsupported erasers functions.
104 *
105 * On some Intel platforms the ICH SPI controller is restricting the set of
106 * SPI command codes the AP can issue, in particular limiting the set of erase
107 * functions to just two of them.
108 *
109 * This function creates a local copy of the flash chip descriptor found in
110 * the main table, filtering out unsupported erase function pointers, when
111 * necessary.
112 *
Vadim Bendebury066143d2018-07-16 18:20:33 -0700113 * flash: pointer to the master flash context, including the original chip
114 * descriptor.
115 * chip: pointer to a flash chip descriptor copy, potentially with just a
116 * subset of erasers included.
Vadim Bendebury622128c2018-06-21 15:50:28 -0700117 */
118static void fix_erasers_if_needed(struct flashchip *chip,
Vadim Bendebury066143d2018-07-16 18:20:33 -0700119 struct flashctx *flash)
Vadim Bendebury622128c2018-06-21 15:50:28 -0700120{
121 int i;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700122
123 /* Need to copy no matter what. */
Vadim Bendebury066143d2018-07-16 18:20:33 -0700124 *chip = *flash->chip;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700125
Edward O'Callaghan5231cdd2020-11-28 17:52:44 +1100126#if IS_X86
Vadim Bendebury066143d2018-07-16 18:20:33 -0700127 /*
128 * ich_generation is set to the chipset type when running on an x86
129 * device, even when flashrom was invoked to program the EC.
130 *
131 * But ICH type does not affect EC programming path, so no need to
132 * check if the eraser is supported in that case.
133 */
Edward O'Callaghan0a217dd2020-11-28 18:00:01 +1100134 if ((ich_generation == CHIPSET_ICH_UNKNOWN) || programming_ec()) {
Vadim Bendebury622128c2018-06-21 15:50:28 -0700135 msg_pdbg("%s: kept all erasers\n", __func__);
136 return;
137 }
Edward O'Callaghan5231cdd2020-11-28 17:52:44 +1100138#else
139 msg_pdbg("%s: kept all erasers on non-x86\n", __func__);
140 return;
141#endif /* !IS_X86 */
Vadim Bendebury622128c2018-06-21 15:50:28 -0700142
Vadim Bendebury066143d2018-07-16 18:20:33 -0700143 /*
144 * We are dealing with an Intel controller; different chipsets allow
145 * different erase commands. Let's check the commands and allow only
146 * those which the controller accepts.
147 */
Edward O'Callaghand13334a2020-07-23 12:51:00 +1000148 dry_run = true;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700149 for (i = 0; i < NUM_ERASEFUNCTIONS; i++) {
Vadim Bendebury622128c2018-06-21 15:50:28 -0700150
151 /* Assume it is not allowed. */
Vadim Bendebury066143d2018-07-16 18:20:33 -0700152 if (!chip->block_erasers[i].block_erase)
153 continue;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700154
Vadim Bendebury066143d2018-07-16 18:20:33 -0700155 if (!chip->block_erasers[i].block_erase
156 (flash, 0, flash->chip->total_size * 1024)) {
157 msg_pdbg("%s: kept eraser at %d\n", __func__, i);
158 continue;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700159 }
Vadim Bendebury066143d2018-07-16 18:20:33 -0700160
161 chip->block_erasers[i].block_erase = NULL;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700162 }
Edward O'Callaghand13334a2020-07-23 12:51:00 +1000163 dry_run = false;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700164}
165
166/*
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700167 * Prepare a list of erasers available on this chip, sorted by the block size,
168 * from lower to higher.
169 *
Vadim Bendebury066143d2018-07-16 18:20:33 -0700170 * @flash pointer to the flash context
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700171 * @erase_size maximum offset which needs to be erased
172 * @sorted_erasers pointer to the array of eraser structures, large enough to
173 * fit NUM_ERASEFUNCTIONS elements.
174 *
175 * Returns number of elements put into the 'sorted_erasers' array.
176 */
Vadim Bendebury066143d2018-07-16 18:20:33 -0700177static size_t fill_sorted_erasers(struct flashctx *flash,
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700178 size_t erase_size,
179 struct eraser *sorted_erasers)
180{
181 size_t j, k;
182 size_t chip_eraser;
183 size_t chip_region;
Vadim Bendebury622128c2018-06-21 15:50:28 -0700184 struct flashchip chip; /* Local copy, potentially altered. */
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700185 /*
186 * In case chip description does not include any functions covering
187 * the entire space (this could happen when the description comes from
188 * the Chrome OS TP driver for instance), use the best effort.
189 *
190 * The structure below saves information about the eraser which covers
191 * the most of the chip space, it is used if no valid functions were
192 * found, which allows programming to succeed.
193 *
194 * The issue be further investigated under b/110474116.
195 */
196 struct {
197 int max_total;
198 int alt_function;
199 int alt_region;
200 } fallback = {};
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700201
Vadim Bendebury066143d2018-07-16 18:20:33 -0700202 fix_erasers_if_needed(&chip, flash);
Vadim Bendebury622128c2018-06-21 15:50:28 -0700203
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700204 /* Iterate over all available erase functions/block sizes. */
205 for (j = k = 0; k < NUM_ERASEFUNCTIONS; k++) {
206 size_t new_block_size;
Nikolai Artemievb7957292021-02-12 11:40:58 +1100207 size_t m, n;
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700208
209 /* Make sure there is a function in is slot */
Vadim Bendebury622128c2018-06-21 15:50:28 -0700210 if (!chip.block_erasers[k].block_erase)
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700211 continue;
212
213 /*
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700214 * Make sure there is a (block size * count) combination which
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700215 * would erase up to required offset into the chip.
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700216 *
217 * If this is not the case, but the current total size exceeds
218 * the previously saved fallback total size, make the current
219 * block the best available fallback case.
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700220 */
221 for (n = 0; n < NUM_ERASEREGIONS; n++) {
222 const struct eraseblock *eb =
Vadim Bendebury622128c2018-06-21 15:50:28 -0700223 chip.block_erasers[k].eraseblocks + n;
Nikolai Artemievb7957292021-02-12 11:40:58 +1100224 size_t total = eb->size * eb->count;
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700225
226 if (total >= erase_size)
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700227 break;
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700228
Nikolai Artemievb7957292021-02-12 11:40:58 +1100229 if (total > (size_t)fallback.max_total) {
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700230 fallback.max_total = total;
231 fallback.alt_region = n;
232 fallback.alt_function = k;
233 }
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700234 }
235
236 if (n == NUM_ERASEREGIONS) {
237 /*
238 * This function will not erase far enough into the
239 * chip.
240 */
241 continue;
242 }
243
Vadim Bendebury622128c2018-06-21 15:50:28 -0700244 new_block_size = chip.block_erasers[k].eraseblocks[n].size;
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700245
246 /*
247 * Place this block in the sorted position in the
248 * sorted_erasers array.
249 */
250 for (m = 0; m < j; m++) {
251 size_t old_block_size;
252
253 chip_eraser = sorted_erasers[m].eraser_index;
254 chip_region = sorted_erasers[m].region_index;
255
Vadim Bendebury622128c2018-06-21 15:50:28 -0700256 old_block_size = chip.block_erasers
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700257 [chip_eraser].eraseblocks[chip_region].size;
258
259 if (old_block_size < new_block_size)
260 continue;
261
262 /* Do not keep duplicates in the sorted array. */
263 if (old_block_size == new_block_size) {
264 j--;
265 break;
266 }
267
268 memmove(sorted_erasers + m + 1,
269 sorted_erasers + m,
270 sizeof(sorted_erasers[0]) * (j - m));
271 break;
272 }
273 sorted_erasers[m].eraser_index = k;
274 sorted_erasers[m].region_index = n;
275 j++;
276 }
277
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700278 if (j) {
279 msg_pdbg("%s: found %zd valid erasers\n", __func__, j);
280 return j;
281 }
282
283 if (!fallback.max_total) {
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700284 msg_cerr("No erasers found for this chip (%s:%s)!\n",
Vadim Bendebury622128c2018-06-21 15:50:28 -0700285 chip.vendor, chip.name);
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700286 exit(1);
287 }
288
Vadim Bendeburyadbd7062018-06-19 21:36:45 -0700289 sorted_erasers[0].eraser_index = fallback.alt_function;
290 sorted_erasers[0].region_index = fallback.alt_region;
291 msg_pwarn("%s: using fallback eraser: "
292 "region %d, function %d total %#x vs %#zx\n",
293 __func__, fallback.alt_region, fallback.alt_function,
294 fallback.max_total, erase_size);
295
296 return 1;
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700297}
298
299/*
300 * When it is determined that the larger block will have to be erased because
301 * a large enough number of the blocks of the previous smaller size need to be
302 * erased, all blocks of smaller sizes falling into the range of addresses of
303 * this larger block will not have to be erased/written individually, so they
304 * need to be unmarked for erase/change.
305 *
306 * This function recursively invokes itself to clean all smaller size blocks
307 * which are in the range of the current larger block.
308 *
309 * @upper_level_map pointer to the element of the range map array where the
310 * current block belongs.
311 * @block_index index of the current block in the map of the blocks of
312 * the current range map element.
313 * @i index of this range map in the array of range maps,
314 * guaranteed to be 1 or above, so that there is always a
315 * smaller block size range map at i - 1.
316 */
317static void clear_all_nested(struct range_map *upper_level_map,
318 size_t block_index,
319 unsigned i)
320{
321 struct range_map *this_level_map = upper_level_map - 1;
Nikolai Artemievb7957292021-02-12 11:40:58 +1100322 size_t range_start;
323 size_t range_end;
324 size_t j;
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700325
326 range_start = upper_level_map->block_size * block_index;
327 range_end = range_start + upper_level_map->block_size;
328
329 for (j = range_start / this_level_map->block_size;
330 j < range_end / this_level_map->block_size;
331 j++) {
332 this_level_map->block_map[j].need_change = 0;
333 this_level_map->block_map[j].need_erase = 0;
334 if (i > 1)
335 clear_all_nested(this_level_map, j, i - 1);
336 }
337}
338
339/*
340 * Once all lowest range size blocks which need to be erased have been
341 * identified, we need to see if there are so many of them that they maybe be
342 * folded into larger size blocks, so that a single larger erase operation is
343 * required instead of many smaller ones.
344 *
345 * @maps pointer to the array of range_map structures, sorted by block
346 * size from lower to higher, only the lower size bock map has
347 * been filled up.
348 * @num_maps number of elements in the maps array.
349 * @chip_size size of the flash chip, in bytes.
350 */
351static void fold_range_maps(struct range_map *maps,
352 size_t num_maps,
353 size_t chip_size)
354{
Nikolai Artemievb7957292021-02-12 11:40:58 +1100355 size_t block_index;
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700356 unsigned i;
357 struct range_map *map;
358
359 /*
360 * First go from bottom to top, marking higher size blocks which need
361 * to be erased based on the count of lower size blocks marked for
362 * erasing which fall into the range of addresses covered by the
363 * larger size block.
364 *
365 * Starting from the second element of the array, as the first element
366 * is the only one filled up so far.
367 */
368 for (i = 1; i < num_maps; i++) {
369 int block_mult;
370
371 map = maps + i;
372
373 /* How many lower size blocks fit into this block. */
374 block_mult = map->block_size / map[-1].block_size;
375
376 for (block_index = 0;
377 block_index < (chip_size/map->block_size);
378 block_index++) {
379 int lower_start;
380 int lower_end;
381 int lower_index;
382 int erase_marked_blocks;
383 int change_marked_blocks;
384
385 lower_start = block_index * block_mult;
386 lower_end = lower_start + block_mult;
387 erase_marked_blocks = 0;
388 change_marked_blocks = 0;
389
390 for (lower_index = lower_start;
391 lower_index < lower_end;
392 lower_index++) {
393
394 if (map[-1].block_map[lower_index].need_erase)
395 erase_marked_blocks++;
396
397 if (map[-1].block_map[lower_index].need_change)
398 change_marked_blocks++;
399 }
400
401 /*
402 * Mark larger block for erasing; if any of the
403 * smaller size blocks was marked as 'need_change',
404 * mark the larger size block as well.
405 */
406 if (erase_marked_blocks > map[-1].limit) {
407 map->block_map[block_index].need_erase = 1;
408 map->block_map[block_index].need_change =
409 change_marked_blocks ? 1 : 0;
410 }
411 }
412 }
413
414 /*
415 * Now let's go larger to smaller block sizes, to make sure that all
416 * nested blocks of a bigger block marked for erasing are not marked
417 * for erasing any more; erasing the encompassing block will sure
418 * erase all nested blocks of all smaller sizes.
419 */
420 for (i = num_maps - 1; i > 0; i--) {
421 map = maps + i;
422
423 for (block_index = 0;
424 block_index < (chip_size/map->block_size);
425 block_index++) {
426 if (!map->block_map[block_index].need_erase)
427 continue;
428
429 clear_all_nested(map, block_index, i);
430 }
431 }
432}
433
434/*
435 * A function to fill the processing_units array of the action descriptor with
436 * a set of processing units, which describe flash chip blocks which need to
437 * be erased/programmed to to accomplish the action requested by user when
438 * invoking flashrom.
439 *
440 * This set of processing units is determined based on comparing old and new
441 * flashrom contents.
442 *
443 * First, blocks which are required to be erased and/or written are identified
444 * at the finest block size granularity.
445 *
446 * Then the distribution of those blocks is analyzed, and if enough of smaller
447 * blocks in a single larger block address range need to be erased, the larger
448 * block is marked for erasing.
449 *
450 * This same process is applied again to increasingly larger block sizes until
451 * the largest granularity blocks are marked as appropriate.
452 *
453 * After this the range map array is scanned from larger block sizes to
454 * smaller; each time when a larger block marked for erasing is detected, all
455 * smaller size blocks in the same address range are unmarked for erasing.
456 *
457 * In the end only blocks which need to be modified remain marked, and at the
458 * finest possible granularity. The list of these blocks is added to the
459 * 'processing_units' array of the descriptor and becomes the list of actions
460 * to be take to program the flash chip.
461 *
462 * @descriptor descriptor structure to fill, allocated by the caller.
463 * @sorted_erasers pointer to an array of eraser descriptors, sorted by
464 * block size.
465 * @chip_erasers pointer to the array of erasers from this flash
466 * chip's descriptor.
467 * @chip_size size of this chip in bytes
468 * @num_sorted_erasers size of the sorted_erasers array
469 * @erased_value value contained in all bytes of the erased flash
470 */
471static void fill_action_descriptor(struct action_descriptor *descriptor,
472 struct eraser *sorted_erasers,
473 struct block_eraser* chip_erasers,
474 size_t chip_size,
475 size_t num_sorted_erasers,
476 unsigned erased_value)
477{
478 const uint8_t *newc;
479 const uint8_t *oldc;
480 int consecutive_blocks;
481 size_t block_size;
482 struct b_map *block_map;
483 struct range_map range_maps[num_sorted_erasers];
484 unsigned i;
485 unsigned pu_index;
486
487 /*
488 * This array has enough room to hold helper structures, one for each
489 * available block size.
490 */
491 memset(range_maps, 0, sizeof(range_maps));
492
493 /*
494 * Initialize range_maps array: allocate space for block_map arrays on
495 * every entry (block maps are used to keep track of blocks which need
496 * to be erased/written) and calculate the limit where smaller blocks
497 * should be replaced by the next larger size block.
498 */
499 for (i = 0; i < num_sorted_erasers; i++) {
500 size_t larger_block_size;
501 size_t map_size;
502 size_t num_blocks;
503 unsigned function;
504 unsigned region;
505
506 function = sorted_erasers[i].eraser_index;
507 region = sorted_erasers[i].region_index;
508 block_size = chip_erasers[function].eraseblocks[region].size;
509
510 range_maps[i].block_size = block_size;
511
512 /*
513 * Allocate room for the map where blocks which require
514 * writing/erasing will be marked.
515 */
516 num_blocks = chip_size/block_size;
517 map_size = num_blocks * sizeof(struct b_map);
518 range_maps[i].block_map = malloc(map_size);
519 if (!range_maps[i].block_map) {
520 msg_cerr("%s: Failed to allocate %zd bytes\n",
521 __func__, map_size);
522 exit(1);
523 }
524 memset(range_maps[i].block_map, 0, map_size);
525
526 /*
527 * Limit is calculated for all block sizes but the largest
528 * one, because there is no way to further consolidate the
529 * largest blocks.
530 */
531 if (i < (num_sorted_erasers - 1)) {
532 function = sorted_erasers[i + 1].eraser_index;
533 region = sorted_erasers[i + 1].region_index;
534 larger_block_size = chip_erasers
535 [function].eraseblocks[region].size;
536
537 /*
538 * How many of the lower size blocks need to be have
539 * to be erased before it is worth moving to the
540 * larger size.
541 *
542 * The admittedly arbitrary rule of thumb here is if
543 * 70% or more of the lower size blocks need to be
544 * erased, forget the lower size blocks and move to
545 * the higher size one.
546 */
547 range_maps[i].limit = ((larger_block_size /
548 block_size) * 7) / 10;
549 }
550 }
551
552 /* Cache pointers to 'before' and 'after' contents. */
553 oldc = descriptor->oldcontents;
554 newc = descriptor->newcontents;
555
556 /* Now, let's fill up the map for the smallest bock size. */
557 block_size = range_maps[0].block_size;
558 block_map = range_maps[0].block_map;
559 for (i = 0; i < chip_size; i++) {
560 int block_index;
561
562 if (oldc[i] == newc[i])
563 continue;
564
565 block_index = i/block_size;
566
567 if (oldc[i] != erased_value)
568 block_map[block_index].need_erase = 1;
569
570 if (newc[i] != erased_value)
571 block_map[block_index].need_change = 1;
572
573 if (block_map[block_index].need_erase &&
574 block_map[block_index].need_change) {
575 /* Can move to the next block. */
576 i += range_maps[0].block_size;
577 i &= ~(range_maps[0].block_size - 1);
Martin Roth93107fc2018-11-16 00:21:16 -0700578 i--; /* adjust for increment in the for loop */
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700579 }
580 }
581
582 /* Now let's see what can be folded into larger blocks. */
583 fold_range_maps(range_maps, num_sorted_erasers, chip_size);
584
585 /* Finally we can fill the action descriptor. */
586 consecutive_blocks = 0;
587 pu_index = 0; /* Number of initialized processing units. */
588 for (i = 0; i < num_sorted_erasers; i++) {
Nikolai Artemievb7957292021-02-12 11:40:58 +1100589 size_t j;
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700590 struct processing_unit *pu;
591 size_t map_size = chip_size/range_maps[i].block_size;
592
593 for (j = 0; j < map_size; j++) {
594
595 block_map = range_maps[i].block_map + j;
596
597 if (block_map->need_erase || block_map->need_change) {
598 consecutive_blocks++;
599 continue;
600 }
601
602 if (!consecutive_blocks)
603 continue;
604
605 /* Add programming/erasing uint. */
606 pu = descriptor->processing_units + pu_index++;
607
608 pu->block_size = range_maps[i].block_size;
609 pu->offset = (j - consecutive_blocks) * pu->block_size;
610 pu->num_blocks = consecutive_blocks;
611 pu->block_eraser_index = sorted_erasers[i].eraser_index;
612 pu->block_region_index = sorted_erasers[i].region_index;
613
614 consecutive_blocks = 0;
615 }
616
617 free(range_maps[i].block_map);
618
619 if (!consecutive_blocks)
620 continue;
621
622 /*
623 * Add last programming/erasing unit for current block
624 * size.
625 */
626 pu = descriptor->processing_units + pu_index++;
627
628 pu->block_size = range_maps[i].block_size;
629 pu->offset = (j - consecutive_blocks) * pu->block_size;
630 pu->num_blocks = consecutive_blocks;
631 pu->block_eraser_index = sorted_erasers[i].eraser_index;
632 pu->block_region_index = sorted_erasers[i].region_index;
633 consecutive_blocks = 0;
634 }
635
636 descriptor->processing_units[pu_index].num_blocks = 0;
637}
638
Edward O'Callaghan5f646462020-11-02 00:48:50 +1100639/*
640 * In case layout is used, return the largest offset of the end of all
641 * included sections. If layout is not used, return zero.
642 */
Daniel Campellodf477722021-04-05 16:53:33 -0600643static size_t top_section_offset(const struct flashrom_layout *layout)
Edward O'Callaghan5f646462020-11-02 00:48:50 +1100644{
645 size_t top = 0;
Nikolai Artemievb7957292021-02-12 11:40:58 +1100646 size_t i;
Edward O'Callaghan5f646462020-11-02 00:48:50 +1100647
648 for (i = 0; i < layout->num_entries; i++) {
649
650 if (!layout->entries[i].included)
651 continue;
652
653 if (layout->entries[i].end > top)
654 top = layout->entries[i].end;
655 }
656
657 return top;
658}
659
Nikolai Artemievb7957292021-02-12 11:40:58 +1100660bool is_dry_run(void)
Edward O'Callaghand13334a2020-07-23 12:51:00 +1000661{
662 return dry_run;
663}
664
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700665struct action_descriptor *prepare_action_descriptor(struct flashctx *flash,
666 void *oldcontents,
Vadim Bendebury2f346a32018-05-21 10:24:18 -0700667 void *newcontents,
668 int do_diff)
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700669{
670 struct eraser sorted_erasers[NUM_ERASEFUNCTIONS];
671 size_t i;
672 size_t num_erasers;
673 int max_units;
Vadim Bendebury2b4dcef2018-05-21 10:47:18 -0700674 size_t block_size = 0;
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700675 struct action_descriptor *descriptor;
676 size_t chip_size = flash->chip->total_size * 1024;
677
678 /*
679 * Find the maximum size of the area which might have to be erased,
680 * this is needed to ensure that the picked erase function can go all
681 * the way to the requred offset, as some of the erase functions
682 * operate only on part of the chip starting at offset zero.
683 *
684 * Not an efficient way to do it, but this is acceptable on the host.
685 */
Vadim Bendebury2f346a32018-05-21 10:24:18 -0700686 if (do_diff) {
687 /*
688 * If we are doing diffs, look for the largest offset where
689 * the difference is, this is the highest offset which might
690 * need to be erased.
691 */
692 for (i = 0; i < chip_size; i++)
693 if (((uint8_t *)newcontents)[i] !=
694 ((uint8_t *)oldcontents)[i])
695 block_size = i + 1;
696 } else {
697 /*
698 * We are not doing diffs, if user specified sections to
699 * program - use the highest offset of the highest section as
700 * the limit.
701 */
Daniel Campellodf477722021-04-05 16:53:33 -0600702 block_size = top_section_offset(get_layout(flash));
Vadim Bendebury2f346a32018-05-21 10:24:18 -0700703
704 if (!block_size)
705 /* User did not specify any sections. */
706 block_size = chip_size;
707 }
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700708
Vadim Bendebury066143d2018-07-16 18:20:33 -0700709 num_erasers = fill_sorted_erasers(flash, block_size, sorted_erasers);
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700710
711 /*
712 * Let's allocate enough memory for the worst case action descriptor
713 * size, when we need to program half the chip using the smallest block
714 * size.
715 */
716 block_size = flash->chip->block_erasers
717 [sorted_erasers[0].eraser_index].eraseblocks
718 [sorted_erasers[0].region_index].size;
719 max_units = chip_size / (2 * block_size) + 1;
720 descriptor = malloc(sizeof(struct action_descriptor) +
721 sizeof(struct processing_unit) * max_units);
722 if (!descriptor) {
723 msg_cerr("Failed to allocate room for %d processing units!\n",
724 max_units);
725 exit(1);
726 }
727
728 descriptor->newcontents = newcontents;
729 descriptor->oldcontents = oldcontents;
730
731 fill_action_descriptor(descriptor, sorted_erasers,
732 flash->chip->block_erasers, chip_size,
Edward O'Callaghanef783e32020-08-10 19:54:27 +1000733 num_erasers, ERASED_VALUE(flash));
Vadim Bendebury5f6a97d2018-05-15 10:38:14 -0700734
735 dump_descriptor(descriptor);
736
737 return descriptor;
738}