admin管理员组

文章数量:1532473

2024年1月10日发(作者:)

list_for_each_entry(seb, &si->corr, ) printk(KERN_CONT " %d", seb->pnum); printk(KERN_CONT "n"); }

/* * In case of unknown erase counter we use the mean erase counter * value. */ ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { ubi_rb_for_each_entry(rb2, seb, &sv->root, ) if (seb->ec == UBI_SCAN_UNKNOWN_EC) seb->ec = si->mean_ec; }

list_for_each_entry(seb, &si->free, ) { if (seb->ec == UBI_SCAN_UNKNOWN_EC) seb->ec = si->mean_ec; }

list_for_each_entry(seb, &si->corr, ) if (seb->ec == UBI_SCAN_UNKNOWN_EC) seb->ec = si->mean_ec;

list_for_each_entry(seb, &si->erase, ) if (seb->ec == UBI_SCAN_UNKNOWN_EC) seb->ec = si->mean_ec;

err = paranoid_check_si(ubi, si); if (err) { if (err > 0) err = -EINVAL; goto out_vidh; }

ec = be64_to_cpu(ech->ec); if (ec > UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. The EC headers have 64 bits * reserved, but we anyway make use of only 31 bit * values, as this seems to be enough for any existing * flash. Upgrade UBI and use 64-bit erase counters * internally. */ ubi_err("erase counter overflow, max is %d",

ubi_dbg_dump_ec_hdr(ech); return -EINVAL; }

/* * Make sure that all PEBs have the same image sequence number. * This allows us to detect situations when users flash UBI * images incorrectly, so that the flash has the new UBI image * and leftovers from the old one. This feature was added * relatively recently, and the sequence number was always * zero, because old UBI implementations always set it to zero. * For this reasons, we do not panic if some PEBs have zero * sequence number, while other PEBs have non-zero sequence * number. */ image_seq = be32_to_cpu(ech->image_seq); if (!ubi->image_seq && image_seq) ubi->image_seq = image_seq; if (ubi->image_seq && image_seq && ubi->image_seq != image_seq) { ubi_err("bad image sequence number %d in PEB %d, " "expected %d", image_seq, pnum, ubi->image_seq); ubi_dbg_dump_ec_hdr(ech);

/* Unsupported internal volume */ switch (vidh->compat) { case UBI_COMPAT_DELETE: ubi_msg(""delete" compatible internal volume %d:%d" " found, remove it", vol_id, lnum); err = add_to_list(si, pnum, ec, &si->corr); if (err) return err; break;

case UBI_COMPAT_RO: ubi_msg("read-only compatible internal volume %d:%d" " found, switch to read-only mode", vol_id, lnum); ubi->ro_mode = 1; break;

case UBI_COMPAT_PRESERVE: ubi_msg(""preserve" compatible internal volume %d:%d" " found", vol_id, lnum); err = add_to_list(si, pnum, ec, &si->alien); if (err) return err; si->alien_peb_count += 1; return 0;

case UBI_COMPAT_REJECT: ubi_err("incompatible internal volume %d:%d found", vol_id, lnum); return -EINVAL; } }

if (cmp_res < 0) return cmp_res;

if (cmp_res & 1) { /* * This logical eraseblock is newer then the one * found earlier. */ err = validate_vid_hdr(vid_hdr, sv, pnum); if (err) return err;

if (cmp_res & 4) err = add_to_list(si, seb->pnum, seb->ec, &si->corr); else err = add_to_list(si, seb->pnum, seb->ec, &si->erase); if (err) return err;

seb->ec = ec; seb->pnum = pnum; seb->scrub = ((cmp_res & 2) || bitflips); seb->sqnum = sqnum;

if (sv->highest_lnum == lnum) sv->last_data_size = be32_to_cpu(vid_hdr->data_size);

return 0;

/* * This logical eraseblock is older than the one found

err = -ENOTSUPP; goto out_free; }

err = init_constants_sb(c); if (err) goto out_free;

sz = ALIGN(c->max_idx_node_sz, c->min_io_size); sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size); c->cbuf = kmalloc(sz, GFP_NOFS); if (!c->cbuf) { err = -ENOMEM; goto out_free; }

sprintf(c->bgt_name, BGT_NAME_PATTERN, c->_num, c->_id); if (!mounted_read_only) { err = alloc_wbufs(c); if (err) goto out_cbuf;

/* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); if (IS_ERR(c->bgt)) { err = PTR_ERR(c->bgt); c->bgt = NULL; ubifs_err("cannot spawn "%s", error %d", c->bgt_name, err); goto out_wbufs; } wake_up_process(c->bgt); }

err = ubifs_read_master(c); if (err) goto out_master;

init_constants_master(c);

if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { ubifs_msg("recovery needed"); c->need_recovery = 1; if (!mounted_read_only) { err = ubifs_recover_inl_heads(c, c->sbuf); if (err) goto out_master; } } else if (!mounted_read_only) { /* * Set the "dirty" flag so that if we reboot uncleanly we * will notice this immediately on the next mount. */ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); err = ubifs_write_master(c); if (err) goto out_master; }

err = ubifs_lpt_init(c, 1, !mounted_read_only); if (err) goto out_lpt;

err = dbg_check_idx_size(c, c->old_idx_sz); if (err) goto out_lpt;

err = ubifs_replay_journal(c);

if (err) goto out_journal;

/* Calculate 'min_idx_lebs' after journal replay */ c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);

err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only); if (err) goto out_orphans;

if (!mounted_read_only) { int lnum;

err = check_free_space(c); if (err) goto out_orphans;

/* Check for enough log space */ lnum = c->lhead_lnum + 1; if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) lnum = UBIFS_LOG_LNUM; if (lnum == c->ltail_lnum) { err = ubifs_consolidate_log(c); if (err) goto out_orphans; }

if (c->need_recovery) { err = ubifs_recover_size(c); if (err) goto out_orphans; err = ubifs_rcvry_gc_commit(c);

err = take_gc_lnum(c);

if (err) goto out_orphans;

/* * GC LEB may contain garbage if there was an unclean * reboot, and it should be un-mapped. */ err = ubifs_leb_unmap(c, c->gc_lnum); if (err) return err; }

err = dbg_check_lprops(c); if (err) goto out_orphans;

err = ubifs_recover_size(c); if (err) goto out_orphans;

/* * Even if we mount read-only, we have to set space in GC LEB * to proper value because this affects UBIFS free space * reporting. We do not want to have a situation when * re-mounting from R/O to R/W changes amount of free space. */ err = take_gc_lnum(c); if (err) goto out_orphans; }

spin_lock(&ubifs_infos_lock); list_add_tail(&c->infos_list, &ubifs_infos); spin_unlock(&ubifs_infos_lock);

if (c->need_recovery) { if (mounted_read_only) ubifs_msg("recovery deferred"); else { c->need_recovery = 0; ubifs_msg("recovery completed"); /* * GC LEB has to be empty and taken at this point. But * the journal head LEBs may also be accounted as * "empty taken" if they are empty. */ ubifs_assert(c->_empty_lebs > 0); } } else ubifs_assert(c->_empty_lebs > 0);

err = dbg_check_filesystem(c); if (err) goto out_infos;

err = dbg_debugfs_init_fs(c); if (err) goto out_infos;

c->always_chk_crc = 0;

ubifs_msg("mounted UBI device %d, volume %d, name "%s"", c->_num, c->_id, c->); if (mounted_read_only) ubifs_msg("mounted read-only"); x = (long long)c->main_lebs * c->leb_size; ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d " "LEBs)", x, x >> 10, x >> 20, c->main_lebs);

x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes; ubifs_msg("journal size: %lld bytes (%lld KiB, %lld MiB, %d " "LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt); ubifs_msg("media format: w%d/r%d (latest is w%d/r%d)", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION); ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr)); ubifs_msg("reserved for root: %llu bytes (%llu KiB)", c->report_rp_size, c->report_rp_size >> 10);

dbg_msg("compiled on: " __DATE__ " at " __TIME__); dbg_msg("min. I/O unit size: %d bytes", c->min_io_size); dbg_msg("LEB size: %d bytes (%d KiB)", c->leb_size, c->leb_size >> 10); dbg_msg("data journal heads: %d", c->jhead_cnt - NONDATA_JHEADS_CNT); dbg_msg("UUID: %02X%02X%02X%02X-%02X%02X" "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X", c->uuid[0], c->uuid[1], c->uuid[2], c->uuid[3], c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7], c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11], c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]); dbg_msg("big_lpt %d", c->big_lpt); dbg_msg("log LEBs: %d (%d - %d)", c->log_lebs, UBIFS_LOG_LNUM, c->log_last); dbg_msg("LPT area LEBs: %d (%d - %d)", c->lpt_lebs, c->lpt_first, c->lpt_last); dbg_msg("orphan area LEBs: %d (%d - %d)", c->orph_lebs, c->orph_first, c->orph_last); dbg_msg("main area LEBs: %d (%d - %d)", c->main_lebs, c->main_first, c->leb_cnt - 1); dbg_msg("index LEBs: %d", c->_lebs); dbg_msg("total index bytes: %lld (%lld KiB, %lld MiB)", c->old_idx_sz, c->old_idx_sz >> 10, c->old_idx_sz >> 20);

dbg_msg("key hash type: %d", c->key_hash_type); dbg_msg("tree fanout: %d", c->fanout); dbg_msg("reserved GC LEB: %d", c->gc_lnum); dbg_msg("first main LEB: %d", c->main_first); dbg_msg("max. znode size %d", c->max_znode_sz); dbg_msg("max. index node size %d", c->max_idx_node_sz); dbg_msg("node sizes: data %zu, inode %zu, dentry %zu", UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ); dbg_msg("node sizes: trun %zu, sb %zu, master %zu", UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ); dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu", UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ); dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu",

dbg_msg("dead watermark: %d", c->dead_wm); dbg_msg("dark watermark: %d", c->dark_wm); dbg_msg("LEB overhead: %d", c->leb_overhead); x = (long long)c->main_lebs * c->dark_wm; dbg_msg("max. dark space: %lld (%lld KiB, %lld MiB)", x, x >> 10, x >> 20); dbg_msg("maximum bud bytes: %lld (%lld KiB, %lld MiB)", c->max_bud_bytes, c->max_bud_bytes >> 10, c->max_bud_bytes >> 20); dbg_msg("BG commit bud bytes: %lld (%lld KiB, %lld MiB)", c->bg_bud_bytes, c->bg_bud_bytes >> 10, c->bg_bud_bytes >> 20); dbg_msg("current bud bytes %lld (%lld KiB, %lld MiB)", c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20); dbg_msg("max. seq. number: %llu", c->max_sqnum); dbg_msg("commit number: %llu", c->cmt_no);

return 0;

jnl_lebs = UBIFS_MIN_JNL_LEBS; if (jnl_lebs * c->leb_size > DEFAULT_MAX_JNL) jnl_lebs = DEFAULT_MAX_JNL / c->leb_size;

/* * The log should be large enough to fit reference nodes for all bud * LEBs. Because buds do not have to start from the beginning of LEBs * (half of the LEB may contain committed data), the log should * generally be larger, make it twice as large. */ tmp = 2 * (c->ref_node_alsz * jnl_lebs) + c->leb_size - 1; log_lebs = tmp / c->leb_size; /* Plus one LEB reserved for commit */ log_lebs += 1; if (c->leb_cnt - min_leb_cnt > 8) { /* And some extra space to allow writes while committing */ log_lebs += 1; min_leb_cnt += 1; }

max_buds = jnl_lebs - log_lebs; if (max_buds < UBIFS_MIN_BUD_LEBS) max_buds = UBIFS_MIN_BUD_LEBS;

/* * Orphan nodes are stored in a separate area. One node can store a lot * of orphan inode numbers, but when new orphan comes we just add a new * orphan node. At some point the nodes are consolidated into one * orphan node. */ //An orphan is an inode number whose inode node has been committed to the index with a link count of zero. Thathappens when an open file is deleted (unlinked) and then a commit is run //The orphan area is a fixed number of LEBs situated between the LPT area and the main area

lpt_first + lpt_lebs - 1);

main_first = c->leb_cnt - main_lebs;

/* Create default superblock */ tmp = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size); sup = kzalloc(tmp, GFP_KERNEL); if (!sup) return -ENOMEM;

tmp64 = (long long)max_buds * c->leb_size; if (big_lpt) sup_flags |= UBIFS_FLG_BIGLPT; //初始化superblock节点 sup->_type = UBIFS_SB_NODE; sup->key_hash = UBIFS_KEY_HASH_R5; sup->flags = cpu_to_le32(sup_flags); sup->min_io_size = cpu_to_le32(c->min_io_size); sup->leb_size = cpu_to_le32(c->leb_size); sup->leb_cnt = cpu_to_le32(c->leb_cnt); sup->max_leb_cnt = cpu_to_le32(c->max_leb_cnt); sup->max_bud_bytes = cpu_to_le64(tmp64); sup->log_lebs = cpu_to_le32(log_lebs); sup->lpt_lebs = cpu_to_le32(lpt_lebs); sup->orph_lebs = cpu_to_le32(orph_lebs); sup->jhead_cnt = cpu_to_le32(DEFAULT_JHEADS_CNT); sup->fanout = cpu_to_le32(DEFAULT_FANOUT); sup->lsave_cnt = cpu_to_le32(c->lsave_cnt); sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION); sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN); if (c->mount_de_compr) sup->default_compr = cpu_to_le16(c->mount__type); else sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);

本文标签: 源码分析