diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 25e7f5bc2dad..85e221f7d240 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -338,6 +338,7 @@ Currently, the following pairs of encryption modes are supported: - AES-128-CBC for contents and AES-128-CTS-CBC for filenames - Adiantum for both contents and filenames - AES-256-XTS for contents and AES-256-HCTR2 for filenames (v2 policies only) +- SM4-XTS for contents and SM4-CTS-CBC for filenames (v2 policies only) If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair. @@ -369,6 +370,12 @@ CONFIG_CRYPTO_HCTR2 must be enabled. Also, fast implementations of XCTR and POLYVAL should be enabled, e.g. CRYPTO_POLYVAL_ARM64_CE and CRYPTO_AES_ARM64_CE_BLK for ARM64. +SM4 is a Chinese block cipher that is an alternative to AES. It has +not seen as much security review as AES, and it only has a 128-bit key +size. It may be useful in cases where its use is mandated. +Otherwise, it should not be used. For SM4 support to be available, it +also needs to be enabled in the kernel crypto API. + New encryption modes can be added relatively easily, without changes to individual filesystems. However, authenticated encryption (AE) modes are not currently supported because of the difficulty of dealing diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index eef0096db5f8..24e7f1fae38b 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -27,7 +27,7 @@ void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void purge_kernel_dcache_range_asm(unsigned long, unsigned long); -void flush_kernel_dcache_page_asm(void *); +void flush_kernel_dcache_page_asm(const void *addr); void flush_kernel_icache_page(void *); /* Cache flush operations */ @@ -36,7 +36,7 @@ void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); -void flush_kernel_dcache_page_addr(void *addr); +void flush_kernel_dcache_page_addr(const void *addr); #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); @@ -97,7 +97,7 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma } #define ARCH_HAS_FLUSH_ON_KUNMAP -static inline void kunmap_flush_on_unmap(void *addr) +static inline void kunmap_flush_on_unmap(const void *addr) { flush_kernel_dcache_page_addr(addr); } diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 394e6e14e5c4..f82ff6681a55 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -448,7 +448,7 @@ extern void purge_kernel_dcache_page_asm(unsigned long); extern void clear_user_page_asm(void *, unsigned long); extern void copy_user_page_asm(void *, void *, unsigned long); -void flush_kernel_dcache_page_addr(void *addr) +void flush_kernel_dcache_page_addr(const void *addr) { unsigned long flags; diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 47c9b672690d..c0bc79f923f1 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -39,6 +39,13 @@ const struct blk_crypto_mode blk_crypto_modes[] = { .security_strength = 32, .ivsize = 32, }, + [BLK_ENCRYPTION_MODE_SM4_XTS] = { + .name = "SM4-XTS", + .cipher_str = "xts(sm4)", + .keysize = 32, + .security_strength = 16, + .ivsize = 16, + }, }; /* diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 14e0ef5e9a20..12bd61d20f69 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -86,7 +86,8 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) /** * fscrypt_fname_encrypt() - encrypt a filename * @inode: inode of the parent directory (for regular filenames) - * or of the symlink (for symlink targets) + * or of the symlink (for symlink targets). Key must already be + * set up. * @iname: the filename to encrypt * @out: (output) the encrypted filename * @olen: size of the encrypted filename. It must be at least @iname->len. @@ -137,6 +138,7 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, return 0; } +EXPORT_SYMBOL_GPL(fscrypt_fname_encrypt); /** * fname_decrypt() - decrypt a filename @@ -264,9 +266,9 @@ static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst) return bp - dst; } -bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, - u32 orig_len, u32 max_len, - u32 *encrypted_len_ret) +bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, + u32 orig_len, u32 max_len, + u32 *encrypted_len_ret) { int padding = 4 << (fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAGS_PAD_MASK); @@ -280,6 +282,29 @@ bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, return true; } +/** + * fscrypt_fname_encrypted_size() - calculate length of encrypted filename + * @inode: parent inode of dentry name being encrypted. Key must + * already be set up. + * @orig_len: length of the original filename + * @max_len: maximum length to return + * @encrypted_len_ret: where calculated length should be returned (on success) + * + * Filenames that are shorter than the maximum length may have their lengths + * increased slightly by encryption, due to padding that is applied. + * + * Return: false if the orig_len is greater than max_len. Otherwise, true and + * fill out encrypted_len_ret with the length (up to max_len). + */ +bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, + u32 max_len, u32 *encrypted_len_ret) +{ + return __fscrypt_fname_encrypted_size(&inode->i_crypt_info->ci_policy, + orig_len, max_len, + encrypted_len_ret); +} +EXPORT_SYMBOL_GPL(fscrypt_fname_encrypted_size); + /** * fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames * @max_encrypted_len: maximum length of encrypted filenames the buffer will be @@ -435,8 +460,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, return ret; if (fscrypt_has_encryption_key(dir)) { - if (!fscrypt_fname_encrypted_size(&dir->i_crypt_info->ci_policy, - iname->len, NAME_MAX, + if (!fscrypt_fname_encrypted_size(dir, iname->len, NAME_MAX, &fname->crypto_buf.len)) return -ENAMETOOLONG; fname->crypto_buf.name = kmalloc(fname->crypto_buf.len, diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index bbc6a0450103..98d1901db9cb 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -318,14 +318,11 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci); /* fname.c */ -int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, - u8 *out, unsigned int olen); -bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, - u32 orig_len, u32 max_len, - u32 *encrypted_len_ret); +bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, + u32 orig_len, u32 max_len, + u32 *encrypted_len_ret); /* hkdf.c */ - struct fscrypt_hkdf { struct crypto_shash *hmac_tfm; }; @@ -498,13 +495,7 @@ struct fscrypt_master_key_secret { struct fscrypt_master_key { /* - * Back-pointer to the super_block of the filesystem to which this - * master key has been added. Only valid if ->mk_active_refs > 0. - */ - struct super_block *mk_sb; - - /* - * Link in ->mk_sb->s_master_keys->key_hashtable. + * Link in ->s_master_keys->key_hashtable. * Only valid if ->mk_active_refs > 0. */ struct hlist_node mk_node; @@ -515,7 +506,7 @@ struct fscrypt_master_key { /* * Active and structural reference counts. An active ref guarantees * that the struct continues to exist, continues to be in the keyring - * ->mk_sb->s_master_keys, and that any embedded subkeys (e.g. + * ->s_master_keys, and that any embedded subkeys (e.g. * ->mk_direct_keys) that have been prepared continue to exist. * A structural ref only guarantees that the struct continues to exist. * @@ -628,7 +619,8 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) void fscrypt_put_master_key(struct fscrypt_master_key *mk); -void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk); +void fscrypt_put_master_key_activeref(struct super_block *sb, + struct fscrypt_master_key *mk); struct fscrypt_master_key * fscrypt_find_master_key(struct super_block *sb, diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index be5c650e4957..7b8c5a1104b5 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -224,9 +224,9 @@ int fscrypt_prepare_symlink(struct inode *dir, const char *target, * counting it (even though it is meaningless for ciphertext) is simpler * for now since filesystems will assume it is there and subtract it. */ - if (!fscrypt_fname_encrypted_size(policy, len, - max_len - sizeof(struct fscrypt_symlink_data), - &disk_link->len)) + if (!__fscrypt_fname_encrypted_size(policy, len, + max_len - sizeof(struct fscrypt_symlink_data), + &disk_link->len)) return -ENAMETOOLONG; disk_link->len += sizeof(struct fscrypt_symlink_data); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 672c3fbda0f3..d43c7e3bfe31 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -79,10 +79,9 @@ void fscrypt_put_master_key(struct fscrypt_master_key *mk) call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key); } -void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk) +void fscrypt_put_master_key_activeref(struct super_block *sb, + struct fscrypt_master_key *mk) { - struct super_block *sb = mk->mk_sb; - struct fscrypt_keyring *keyring = sb->s_master_keys; size_t i; if (!refcount_dec_and_test(&mk->mk_active_refs)) @@ -93,9 +92,9 @@ void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk) * destroying any subkeys embedded in it. */ - spin_lock(&keyring->lock); + spin_lock(&sb->s_master_keys->lock); hlist_del_rcu(&mk->mk_node); - spin_unlock(&keyring->lock); + spin_unlock(&sb->s_master_keys->lock); /* * ->mk_active_refs == 0 implies that ->mk_secret is not present and @@ -243,7 +242,7 @@ void fscrypt_destroy_keyring(struct super_block *sb) WARN_ON(refcount_read(&mk->mk_struct_refs) != 1); WARN_ON(!is_master_key_secret_present(&mk->mk_secret)); wipe_master_key_secret(&mk->mk_secret); - fscrypt_put_master_key_activeref(mk); + fscrypt_put_master_key_activeref(sb, mk); } } kfree_sensitive(keyring); @@ -424,7 +423,6 @@ static int add_new_master_key(struct super_block *sb, if (!mk) return -ENOMEM; - mk->mk_sb = sb; init_rwsem(&mk->mk_sem); refcount_set(&mk->mk_struct_refs, 1); mk->mk_spec = *mk_spec; @@ -1099,7 +1097,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) err = -ENOKEY; if (is_master_key_secret_present(&mk->mk_secret)) { wipe_master_key_secret(&mk->mk_secret); - fscrypt_put_master_key_activeref(mk); + fscrypt_put_master_key_activeref(sb, mk); err = 0; } inodes_remain = refcount_read(&mk->mk_active_refs) > 0; diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 2c375f09af1e..6d4ae17dbf22 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -44,6 +44,21 @@ struct fscrypt_mode fscrypt_modes[] = { .security_strength = 16, .ivsize = 16, }, + [FSCRYPT_MODE_SM4_XTS] = { + .friendly_name = "SM4-XTS", + .cipher_str = "xts(sm4)", + .keysize = 32, + .security_strength = 16, + .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_SM4_XTS, + }, + [FSCRYPT_MODE_SM4_CTS] = { + .friendly_name = "SM4-CTS-CBC", + .cipher_str = "cts(cbc(sm4))", + .keysize = 16, + .security_strength = 16, + .ivsize = 16, + }, [FSCRYPT_MODE_ADIANTUM] = { .friendly_name = "Adiantum", .cipher_str = "adiantum(xchacha12,aes)", @@ -556,7 +571,7 @@ static void put_crypt_info(struct fscrypt_info *ci) spin_lock(&mk->mk_decrypted_inodes_lock); list_del(&ci->ci_master_key_link); spin_unlock(&mk->mk_decrypted_inodes_lock); - fscrypt_put_master_key_activeref(mk); + fscrypt_put_master_key_activeref(ci->ci_inode->i_sb, mk); } memzero_explicit(ci, sizeof(*ci)); kmem_cache_free(fscrypt_info_cachep, ci); diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index b1f3427c4b46..410d744cfb52 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -61,6 +61,13 @@ fscrypt_get_dummy_policy(struct super_block *sb) return sb->s_cop->get_dummy_policy(sb); } +/* + * Return %true if the given combination of encryption modes is supported for v1 + * (and later) encryption policies. + * + * Do *not* add anything new here, since v1 encryption policies are deprecated. + * New combinations of modes should go in fscrypt_valid_enc_modes_v2() only. + */ static bool fscrypt_valid_enc_modes_v1(u32 contents_mode, u32 filenames_mode) { if (contents_mode == FSCRYPT_MODE_AES_256_XTS && @@ -83,6 +90,11 @@ static bool fscrypt_valid_enc_modes_v2(u32 contents_mode, u32 filenames_mode) if (contents_mode == FSCRYPT_MODE_AES_256_XTS && filenames_mode == FSCRYPT_MODE_AES_256_HCTR2) return true; + + if (contents_mode == FSCRYPT_MODE_SM4_XTS && + filenames_mode == FSCRYPT_MODE_SM4_CTS) + return true; + return fscrypt_valid_enc_modes_v1(contents_mode, filenames_mode); } @@ -693,6 +705,32 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir) return fscrypt_get_dummy_policy(dir->i_sb); } +/** + * fscrypt_context_for_new_inode() - create an encryption context for a new inode + * @ctx: where context should be written + * @inode: inode from which to fetch policy and nonce + * + * Given an in-core "prepared" (via fscrypt_prepare_new_inode) inode, + * generate a new context and write it to ctx. ctx _must_ be at least + * FSCRYPT_SET_CONTEXT_MAX_SIZE bytes. + * + * Return: size of the resulting context or a negative error code. + */ +int fscrypt_context_for_new_inode(void *ctx, struct inode *inode) +{ + struct fscrypt_info *ci = inode->i_crypt_info; + + BUILD_BUG_ON(sizeof(union fscrypt_context) != + FSCRYPT_SET_CONTEXT_MAX_SIZE); + + /* fscrypt_prepare_new_inode() should have set up the key already. */ + if (WARN_ON_ONCE(!ci)) + return -ENOKEY; + + return fscrypt_new_context(ctx, &ci->ci_policy, ci->ci_nonce); +} +EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode); + /** * fscrypt_set_context() - Set the fscrypt context of a new inode * @inode: a new inode @@ -709,12 +747,9 @@ int fscrypt_set_context(struct inode *inode, void *fs_data) union fscrypt_context ctx; int ctxsize; - /* fscrypt_prepare_new_inode() should have set up the key already. */ - if (WARN_ON_ONCE(!ci)) - return -ENOKEY; - - BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE); - ctxsize = fscrypt_new_context(&ctx, &ci->ci_policy, ci->ci_nonce); + ctxsize = fscrypt_context_for_new_inode(&ctx, inode); + if (ctxsize < 0) + return ctxsize; /* * This may be the first time the inode number is available, so do any diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 35b1894eef3b..6d4479962498 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -76,14 +76,10 @@ static void __read_end_io(struct bio *bio) bio_for_each_segment_all(bv, bio, iter_all) { page = bv->bv_page; - /* PG_error was set if verity failed. */ - if (bio->bi_status || PageError(page)) { + if (bio->bi_status) ClearPageUptodate(page); - /* will re-read again later */ - ClearPageError(page); - } else { + else SetPageUptodate(page); - } unlock_page(page); } if (bio->bi_private) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 0a51841b5cf5..181ecbd15db0 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -1707,50 +1707,27 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task) } } -/* - * Update and unlock the cluster's pagecache pages, and release the reference to - * the decompress_io_ctx that was being held for I/O completion. - */ -static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, - bool in_task) -{ - int i; - - for (i = 0; i < dic->cluster_size; i++) { - struct page *rpage = dic->rpages[i]; - - if (!rpage) - continue; - - /* PG_error was set if verity failed. */ - if (failed || PageError(rpage)) { - ClearPageUptodate(rpage); - /* will re-read again later */ - ClearPageError(rpage); - } else { - SetPageUptodate(rpage); - } - unlock_page(rpage); - } - - f2fs_put_dic(dic, in_task); -} - static void f2fs_verify_cluster(struct work_struct *work) { struct decompress_io_ctx *dic = container_of(work, struct decompress_io_ctx, verity_work); int i; - /* Verify the cluster's decompressed pages with fs-verity. */ + /* Verify, update, and unlock the decompressed pages. */ for (i = 0; i < dic->cluster_size; i++) { struct page *rpage = dic->rpages[i]; - if (rpage && !fsverity_verify_page(rpage)) - SetPageError(rpage); + if (!rpage) + continue; + + if (fsverity_verify_page(rpage)) + SetPageUptodate(rpage); + else + ClearPageUptodate(rpage); + unlock_page(rpage); } - __f2fs_decompress_end_io(dic, false, true); + f2fs_put_dic(dic, true); } /* @@ -1760,6 +1737,8 @@ static void f2fs_verify_cluster(struct work_struct *work) void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, bool in_task) { + int i; + if (!failed && dic->need_verity) { /* * Note that to avoid deadlocks, the verity work can't be done @@ -1769,9 +1748,28 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, */ INIT_WORK(&dic->verity_work, f2fs_verify_cluster); fsverity_enqueue_verify_work(&dic->verity_work); - } else { - __f2fs_decompress_end_io(dic, failed, in_task); + return; } + + /* Update and unlock the cluster's pagecache pages. */ + for (i = 0; i < dic->cluster_size; i++) { + struct page *rpage = dic->rpages[i]; + + if (!rpage) + continue; + + if (failed) + ClearPageUptodate(rpage); + else + SetPageUptodate(rpage); + unlock_page(rpage); + } + + /* + * Release the reference to the decompress_io_ctx that was being held + * for I/O completion. + */ + f2fs_put_dic(dic, in_task); } /* diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 5ce01485979d..6ef1089df4c7 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -115,43 +115,56 @@ struct bio_post_read_ctx { struct f2fs_sb_info *sbi; struct work_struct work; unsigned int enabled_steps; + /* + * decompression_attempted keeps track of whether + * f2fs_end_read_compressed_page() has been called on the pages in the + * bio that belong to a compressed cluster yet. + */ + bool decompression_attempted; block_t fs_blkaddr; }; +/* + * Update and unlock a bio's pages, and free the bio. + * + * This marks pages up-to-date only if there was no error in the bio (I/O error, + * decryption error, or verity error), as indicated by bio->bi_status. + * + * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk) + * aren't marked up-to-date here, as decompression is done on a per-compression- + * cluster basis rather than a per-bio basis. Instead, we only must do two + * things for each compressed page here: call f2fs_end_read_compressed_page() + * with failed=true if an error occurred before it would have normally gotten + * called (i.e., I/O error or decryption error, but *not* verity error), and + * release the bio's reference to the decompress_io_ctx of the page's cluster. + */ static void f2fs_finish_read_bio(struct bio *bio, bool in_task) { struct bio_vec *bv; struct bvec_iter_all iter_all; + struct bio_post_read_ctx *ctx = bio->bi_private; - /* - * Update and unlock the bio's pagecache pages, and put the - * decompression context for any compressed pages. - */ bio_for_each_segment_all(bv, bio, iter_all) { struct page *page = bv->bv_page; if (f2fs_is_compressed_page(page)) { - if (bio->bi_status) + if (ctx && !ctx->decompression_attempted) f2fs_end_read_compressed_page(page, true, 0, in_task); f2fs_put_page_dic(page, in_task); continue; } - /* PG_error was set if verity failed. */ - if (bio->bi_status || PageError(page)) { + if (bio->bi_status) ClearPageUptodate(page); - /* will re-read again later */ - ClearPageError(page); - } else { + else SetPageUptodate(page); - } dec_page_count(F2FS_P_SB(page), __read_io_type(page)); unlock_page(page); } - if (bio->bi_private) - mempool_free(bio->bi_private, bio_post_read_ctx_pool); + if (ctx) + mempool_free(ctx, bio_post_read_ctx_pool); bio_put(bio); } @@ -184,8 +197,10 @@ static void f2fs_verify_bio(struct work_struct *work) struct page *page = bv->bv_page; if (!f2fs_is_compressed_page(page) && - !fsverity_verify_page(page)) - SetPageError(page); + !fsverity_verify_page(page)) { + bio->bi_status = BLK_STS_IOERR; + break; + } } } else { fsverity_verify_bio(bio); @@ -244,6 +259,8 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx, blkaddr++; } + ctx->decompression_attempted = true; + /* * Optimization: if all the bio's pages are compressed, then scheduling * the per-bio verity work is unnecessary, as verity will be fully @@ -1066,6 +1083,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, ctx->sbi = sbi; ctx->enabled_steps = post_read_steps; ctx->fs_blkaddr = blkaddr; + ctx->decompression_attempted = false; bio->bi_private = ctx; } iostat_alloc_and_bind_ctx(sbi, bio, ctx); @@ -1092,7 +1110,6 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page, bio_put(bio); return -EFAULT; } - ClearPageError(page); inc_page_count(sbi, F2FS_RD_DATA); f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE); __submit_bio(sbi, bio, DATA); @@ -2157,7 +2174,6 @@ submit_and_realloc: inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO, F2FS_BLKSIZE); - ClearPageError(page); *last_block_in_bio = block_nr; goto out; confused: @@ -2184,7 +2200,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, sector_t last_block_in_file; const unsigned blocksize = blks_to_bytes(inode, 1); struct decompress_io_ctx *dic = NULL; - struct extent_info ei = {0, }; + struct extent_info ei = {}; bool from_dnode = true; int i; int ret = 0; @@ -2311,7 +2327,6 @@ submit_and_realloc: inc_page_count(sbi, F2FS_RD_DATA); f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); - ClearPageError(page); *last_block_in_bio = blkaddr; } @@ -2328,7 +2343,6 @@ out: for (i = 0; i < cc->cluster_size; i++) { if (cc->rpages[i]) { ClearPageUptodate(cc->rpages[i]); - ClearPageError(cc->rpages[i]); unlock_page(cc->rpages[i]); } } @@ -2425,7 +2439,6 @@ read_single_page: #ifdef CONFIG_F2FS_FS_COMPRESSION set_error_page: #endif - SetPageError(page); zero_user_segment(page, 0, PAGE_SIZE); unlock_page(page); } diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 1bd38a78ebba..342af24b2f8c 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -546,7 +546,8 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, struct extent_node *en; bool ret = false; - f2fs_bug_on(sbi, !et); + if (!et) + return false; trace_f2fs_lookup_extent_tree_start(inode, pgofs, type); @@ -881,12 +882,14 @@ static unsigned long long __calculate_block_age(unsigned long long new, } /* This returns a new age and allocated blocks in ei */ -static int __get_new_block_age(struct inode *inode, struct extent_info *ei) +static int __get_new_block_age(struct inode *inode, struct extent_info *ei, + block_t blkaddr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); loff_t f_size = i_size_read(inode); unsigned long long cur_blocks = atomic64_read(&sbi->allocated_data_blocks); + struct extent_info tei = *ei; /* only fofs and len are valid */ /* * When I/O is not aligned to a PAGE_SIZE, update will happen to the last @@ -894,20 +897,20 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei) * block here. */ if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) && - ei->blk == NEW_ADDR) + blkaddr == NEW_ADDR) return -EINVAL; - if (__lookup_extent_tree(inode, ei->fofs, ei, EX_BLOCK_AGE)) { + if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) { unsigned long long cur_age; - if (cur_blocks >= ei->last_blocks) - cur_age = cur_blocks - ei->last_blocks; + if (cur_blocks >= tei.last_blocks) + cur_age = cur_blocks - tei.last_blocks; else /* allocated_data_blocks overflow */ - cur_age = ULLONG_MAX - ei->last_blocks + cur_blocks; + cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks; - if (ei->age) - ei->age = __calculate_block_age(cur_age, ei->age); + if (tei.age) + ei->age = __calculate_block_age(cur_age, tei.age); else ei->age = cur_age; ei->last_blocks = cur_blocks; @@ -915,14 +918,14 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei) return 0; } - f2fs_bug_on(sbi, ei->blk == NULL_ADDR); + f2fs_bug_on(sbi, blkaddr == NULL_ADDR); /* the data block was allocated for the first time */ - if (ei->blk == NEW_ADDR) + if (blkaddr == NEW_ADDR) goto out; - if (__is_valid_data_blkaddr(ei->blk) && - !f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE)) { + if (__is_valid_data_blkaddr(blkaddr) && + !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { f2fs_bug_on(sbi, 1); return -EINVAL; } @@ -938,7 +941,7 @@ out: static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type) { - struct extent_info ei; + struct extent_info ei = {}; if (!__may_extent_tree(dn->inode, type)) return; @@ -953,8 +956,7 @@ static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type typ else ei.blk = dn->data_blkaddr; } else if (type == EX_BLOCK_AGE) { - ei.blk = dn->data_blkaddr; - if (__get_new_block_age(dn->inode, &ei)) + if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr)) return; } __update_extent_tree_range(dn->inode, &ei, type); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 4f510b960123..cd971d580f61 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2548,7 +2548,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, struct f2fs_map_blocks map = { .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, .m_may_create = false }; - struct extent_info ei = {0, }; + struct extent_info ei = {}; pgoff_t pg_start, pg_end, next_pgofs; unsigned int blk_per_seg = sbi->blocks_per_seg; unsigned int total = 0, sec_num; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index a351e56fb477..6dbf8bfe512c 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -662,8 +662,7 @@ init_thread: if (IS_ERR(fcc->f2fs_issue_flush)) { int err = PTR_ERR(fcc->f2fs_issue_flush); - kfree(fcc); - SM_I(sbi)->fcc_info = NULL; + fcc->f2fs_issue_flush = NULL; return err; } @@ -3162,7 +3161,7 @@ static int __get_segment_type_4(struct f2fs_io_info *fio) static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct extent_info ei; + struct extent_info ei = {}; if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) { if (!ei.age) @@ -5139,11 +5138,9 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) init_f2fs_rwsem(&sm_info->curseg_lock); - if (!f2fs_readonly(sbi->sb)) { - err = f2fs_create_flush_cmd_control(sbi); - if (err) - return err; - } + err = f2fs_create_flush_cmd_control(sbi); + if (err) + return err; err = create_discard_cmd_control(sbi); if (err) diff --git a/fs/verity/Kconfig b/fs/verity/Kconfig index 24d1b54de807..54598cd80145 100644 --- a/fs/verity/Kconfig +++ b/fs/verity/Kconfig @@ -3,6 +3,7 @@ config FS_VERITY bool "FS Verity (read-only file-based authenticity protection)" select CRYPTO + select CRYPTO_HASH_INFO # SHA-256 is implied as it's intended to be the default hash algorithm. # To avoid bloat, other wanted algorithms must be selected explicitly. # Note that CRYPTO_SHA256 denotes the generic C implementation, but diff --git a/fs/verity/enable.c b/fs/verity/enable.c index 60a4372aa4d7..d52872c808ff 100644 --- a/fs/verity/enable.c +++ b/fs/verity/enable.c @@ -202,7 +202,7 @@ static int enable_verity(struct file *filp, const struct fsverity_operations *vops = inode->i_sb->s_vop; struct merkle_tree_params params = { }; struct fsverity_descriptor *desc; - size_t desc_size = sizeof(*desc) + arg->sig_size; + size_t desc_size = struct_size(desc, signature, arg->sig_size); struct fsverity_info *vi; int err; @@ -281,7 +281,7 @@ static int enable_verity(struct file *filp, * from disk. This is simpler, and it serves as an extra check that the * metadata we're writing is valid before actually enabling verity. */ - vi = fsverity_create_info(inode, desc, desc_size); + vi = fsverity_create_info(inode, desc); if (IS_ERR(vi)) { err = PTR_ERR(vi); goto rollback; diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h index a7920434bae5..41890ecf068c 100644 --- a/fs/verity/fsverity_private.h +++ b/fs/verity/fsverity_private.h @@ -14,7 +14,6 @@ #define pr_fmt(fmt) "fs-verity: " fmt -#include #include #include @@ -26,12 +25,6 @@ struct ahash_request; */ #define FS_VERITY_MAX_LEVELS 8 -/* - * Largest digest size among all hash algorithms supported by fs-verity. - * Currently assumed to be <= size of fsverity_descriptor::root_hash. - */ -#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE - /* A hash algorithm supported by fs-verity */ struct fsverity_hash_alg { struct crypto_ahash *tfm; /* hash tfm, allocated on demand */ @@ -39,6 +32,11 @@ struct fsverity_hash_alg { unsigned int digest_size; /* digest size in bytes, e.g. 32 for SHA-256 */ unsigned int block_size; /* block size in bytes, e.g. 64 for SHA-256 */ mempool_t req_pool; /* mempool with a preallocated hash request */ + /* + * The HASH_ALGO_* constant for this algorithm. This is different from + * FS_VERITY_HASH_ALG_*, which uses a different numbering scheme. + */ + enum hash_algo algo_id; }; /* Merkle tree parameters: hash algorithm, initial hash state, and topology */ @@ -122,16 +120,14 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, const u8 *salt, size_t salt_size); struct fsverity_info *fsverity_create_info(const struct inode *inode, - struct fsverity_descriptor *desc, - size_t desc_size); + struct fsverity_descriptor *desc); void fsverity_set_info(struct inode *inode, struct fsverity_info *vi); void fsverity_free_info(struct fsverity_info *vi); int fsverity_get_descriptor(struct inode *inode, - struct fsverity_descriptor **desc_ret, - size_t *desc_size_ret); + struct fsverity_descriptor **desc_ret); int __init fsverity_init_info_cache(void); void __init fsverity_exit_info_cache(void); diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c index 71d0fccb6d4c..6f8170cf4ae7 100644 --- a/fs/verity/hash_algs.c +++ b/fs/verity/hash_algs.c @@ -16,11 +16,13 @@ struct fsverity_hash_alg fsverity_hash_algs[] = { .name = "sha256", .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE, + .algo_id = HASH_ALGO_SHA256, }, [FS_VERITY_HASH_ALG_SHA512] = { .name = "sha512", .digest_size = SHA512_DIGEST_SIZE, .block_size = SHA512_BLOCK_SIZE, + .algo_id = HASH_ALGO_SHA512, }, }; @@ -324,5 +326,9 @@ void __init fsverity_check_hash_algs(void) */ BUG_ON(!is_power_of_2(alg->digest_size)); BUG_ON(!is_power_of_2(alg->block_size)); + + /* Verify that there is a valid mapping to HASH_ALGO_*. */ + BUG_ON(alg->algo_id == 0); + BUG_ON(alg->digest_size != hash_digest_size[alg->algo_id]); } } diff --git a/fs/verity/measure.c b/fs/verity/measure.c index f0d7b30c62db..5c79ea1b2468 100644 --- a/fs/verity/measure.c +++ b/fs/verity/measure.c @@ -57,3 +57,31 @@ int fsverity_ioctl_measure(struct file *filp, void __user *_uarg) return 0; } EXPORT_SYMBOL_GPL(fsverity_ioctl_measure); + +/** + * fsverity_get_digest() - get a verity file's digest + * @inode: inode to get digest of + * @digest: (out) pointer to the digest + * @alg: (out) pointer to the hash algorithm enumeration + * + * Return the file hash algorithm and digest of an fsverity protected file. + * Assumption: before calling this, the file must have been opened. + * + * Return: 0 on success, -errno on failure + */ +int fsverity_get_digest(struct inode *inode, + u8 digest[FS_VERITY_MAX_DIGEST_SIZE], + enum hash_algo *alg) +{ + const struct fsverity_info *vi; + const struct fsverity_hash_alg *hash_alg; + + vi = fsverity_get_info(inode); + if (!vi) + return -ENODATA; /* not a verity file */ + + hash_alg = vi->tree_params.hash_alg; + memcpy(digest, vi->file_digest, hash_alg->digest_size); + *alg = hash_alg->algo_id; + return 0; +} diff --git a/fs/verity/open.c b/fs/verity/open.c index 92df87f5fa38..81ff94442f7b 100644 --- a/fs/verity/open.c +++ b/fs/verity/open.c @@ -147,8 +147,7 @@ static int compute_file_digest(struct fsverity_hash_alg *hash_alg, * fsverity_descriptor must have already undergone basic validation. */ struct fsverity_info *fsverity_create_info(const struct inode *inode, - struct fsverity_descriptor *desc, - size_t desc_size) + struct fsverity_descriptor *desc) { struct fsverity_info *vi; int err; @@ -264,8 +263,7 @@ static bool validate_fsverity_descriptor(struct inode *inode, * the filesystem, and do basic validation of it. */ int fsverity_get_descriptor(struct inode *inode, - struct fsverity_descriptor **desc_ret, - size_t *desc_size_ret) + struct fsverity_descriptor **desc_ret) { int res; struct fsverity_descriptor *desc; @@ -297,7 +295,6 @@ int fsverity_get_descriptor(struct inode *inode, } *desc_ret = desc; - *desc_size_ret = res; return 0; } @@ -306,17 +303,16 @@ static int ensure_verity_info(struct inode *inode) { struct fsverity_info *vi = fsverity_get_info(inode); struct fsverity_descriptor *desc; - size_t desc_size; int err; if (vi) return 0; - err = fsverity_get_descriptor(inode, &desc, &desc_size); + err = fsverity_get_descriptor(inode, &desc); if (err) return err; - vi = fsverity_create_info(inode, desc, desc_size); + vi = fsverity_create_info(inode, desc); if (IS_ERR(vi)) { err = PTR_ERR(vi); goto out_free_desc; diff --git a/fs/verity/read_metadata.c b/fs/verity/read_metadata.c index 7e2d0c7bdf0d..2aefc5565152 100644 --- a/fs/verity/read_metadata.c +++ b/fs/verity/read_metadata.c @@ -53,14 +53,14 @@ static int fsverity_read_merkle_tree(struct inode *inode, break; } - virt = kmap(page); + virt = kmap_local_page(page); if (copy_to_user(buf, virt + offs_in_page, bytes_to_copy)) { - kunmap(page); + kunmap_local(virt); put_page(page); err = -EFAULT; break; } - kunmap(page); + kunmap_local(virt); put_page(page); retval += bytes_to_copy; @@ -101,7 +101,7 @@ static int fsverity_read_descriptor(struct inode *inode, size_t desc_size; int res; - res = fsverity_get_descriptor(inode, &desc, &desc_size); + res = fsverity_get_descriptor(inode, &desc); if (res) return res; @@ -119,10 +119,9 @@ static int fsverity_read_signature(struct inode *inode, void __user *buf, u64 offset, int length) { struct fsverity_descriptor *desc; - size_t desc_size; int res; - res = fsverity_get_descriptor(inode, &desc, &desc_size); + res = fsverity_get_descriptor(inode, &desc); if (res) return res; diff --git a/fs/verity/verify.c b/fs/verity/verify.c index 0adb970f4e73..da185bd12cd5 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -39,16 +39,6 @@ static void hash_at_level(const struct merkle_tree_params *params, (params->log_blocksize - params->log_arity); } -/* Extract a hash from a hash page */ -static void extract_hash(struct page *hpage, unsigned int hoffset, - unsigned int hsize, u8 *out) -{ - void *virt = kmap_atomic(hpage); - - memcpy(out, virt + hoffset, hsize); - kunmap_atomic(virt); -} - static inline int cmp_hashes(const struct fsverity_info *vi, const u8 *want_hash, const u8 *real_hash, pgoff_t index, int level) @@ -129,7 +119,7 @@ static bool verify_page(struct inode *inode, const struct fsverity_info *vi, } if (PageChecked(hpage)) { - extract_hash(hpage, hoffset, hsize, _want_hash); + memcpy_from_page(_want_hash, hpage, hoffset, hsize); want_hash = _want_hash; put_page(hpage); pr_debug_ratelimited("Hash page already checked, want %s:%*phN\n", @@ -158,7 +148,7 @@ descend: if (err) goto out; SetPageChecked(hpage); - extract_hash(hpage, hoffset, hsize, _want_hash); + memcpy_from_page(_want_hash, hpage, hoffset, hsize); want_hash = _want_hash; put_page(hpage); pr_debug("Verified hash page at level %d, now want %s:%*phN\n", @@ -210,9 +200,8 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page); * @bio: the bio to verify * * Verify a set of pages that have just been read from a verity file. The pages - * must be pagecache pages that are still locked and not yet uptodate. Pages - * that fail verification are set to the Error state. Verification is skipped - * for pages already in the Error state, e.g. due to fscrypt decryption failure. + * must be pagecache pages that are still locked and not yet uptodate. If a + * page fails verification, then bio->bi_status is set to an error status. * * This is a helper function for use by the ->readpages() method of filesystems * that issue bios to read data directly into the page cache. Filesystems that @@ -254,9 +243,10 @@ void fsverity_verify_bio(struct bio *bio) unsigned long level0_ra_pages = min(max_ra_pages, params->level0_blocks - level0_index); - if (!PageError(page) && - !verify_page(inode, vi, req, page, level0_ra_pages)) - SetPageError(page); + if (!verify_page(inode, vi, req, page, level0_ra_pages)) { + bio->bi_status = BLK_STS_IOERR; + break; + } } fsverity_free_hash_request(params->hash_alg, req); diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h index 44d19e451b63..a8a7cd233b26 100644 --- a/include/linux/blk-crypto.h +++ b/include/linux/blk-crypto.h @@ -13,6 +13,7 @@ enum blk_crypto_mode_num { BLK_ENCRYPTION_MODE_AES_256_XTS, BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, BLK_ENCRYPTION_MODE_ADIANTUM, + BLK_ENCRYPTION_MODE_SM4_XTS, BLK_ENCRYPTION_MODE_MAX, }; diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 696768a5e387..6a95b750ab18 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -288,6 +288,7 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg); int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg); int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); int fscrypt_has_permitted_context(struct inode *parent, struct inode *child); +int fscrypt_context_for_new_inode(void *ctx, struct inode *inode); int fscrypt_set_context(struct inode *inode, void *fs_data); struct fscrypt_dummy_policy { @@ -331,6 +332,10 @@ void fscrypt_free_inode(struct inode *inode); int fscrypt_drop_inode(struct inode *inode); /* fname.c */ +int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen); +bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, + u32 max_len, u32 *encrypted_len_ret); int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname, int lookup, struct fscrypt_name *fname); diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index 19c118e5ad83..270ba08df011 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -12,8 +12,16 @@ #define _LINUX_FSVERITY_H #include +#include +#include #include +/* + * Largest digest size among all hash algorithms supported by fs-verity. + * Currently assumed to be <= size of fsverity_descriptor::root_hash. + */ +#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE + /* Verity operations for filesystems */ struct fsverity_operations { @@ -131,6 +139,9 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *arg); /* measure.c */ int fsverity_ioctl_measure(struct file *filp, void __user *arg); +int fsverity_get_digest(struct inode *inode, + u8 digest[FS_VERITY_MAX_DIGEST_SIZE], + enum hash_algo *alg); /* open.c */ @@ -170,6 +181,13 @@ static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg) return -EOPNOTSUPP; } +static inline int fsverity_get_digest(struct inode *inode, + u8 digest[FS_VERITY_MAX_DIGEST_SIZE], + enum hash_algo *alg) +{ + return -EOPNOTSUPP; +} + /* open.c */ static inline int fsverity_file_open(struct inode *inode, struct file *filp) diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 4aa1031d3e4c..7e6cfb1a2c2e 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -8,7 +8,7 @@ #ifdef CONFIG_KMAP_LOCAL void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); void *__kmap_local_page_prot(struct page *page, pgprot_t prot); -void kunmap_local_indexed(void *vaddr); +void kunmap_local_indexed(const void *vaddr); void kmap_local_fork(struct task_struct *tsk); void __kmap_local_sched_out(void); void __kmap_local_sched_in(void); @@ -83,7 +83,7 @@ static inline void *kmap_local_pfn(unsigned long pfn) return __kmap_local_pfn_prot(pfn, kmap_prot); } -static inline void __kunmap_local(void *vaddr) +static inline void __kunmap_local(const void *vaddr) { kunmap_local_indexed(vaddr); } @@ -115,7 +115,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn) return __kmap_local_pfn_prot(pfn, kmap_prot); } -static inline void __kunmap_atomic(void *addr) +static inline void __kunmap_atomic(const void *addr) { kunmap_local_indexed(addr); pagefault_enable(); @@ -181,7 +181,7 @@ static inline void *kmap_local_pfn(unsigned long pfn) return kmap_local_page(pfn_to_page(pfn)); } -static inline void __kunmap_local(void *addr) +static inline void __kunmap_local(const void *addr) { #ifdef ARCH_HAS_FLUSH_ON_KUNMAP kunmap_flush_on_unmap(addr); @@ -208,7 +208,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn) return kmap_atomic(pfn_to_page(pfn)); } -static inline void __kunmap_atomic(void *addr) +static inline void __kunmap_atomic(const void *addr) { #ifdef ARCH_HAS_FLUSH_ON_KUNMAP kunmap_flush_on_unmap(addr); diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 5b0918f7f699..024def3ad43d 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -26,6 +26,8 @@ #define FSCRYPT_MODE_AES_256_CTS 4 #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 +#define FSCRYPT_MODE_SM4_XTS 7 +#define FSCRYPT_MODE_SM4_CTS 8 #define FSCRYPT_MODE_ADIANTUM 9 #define FSCRYPT_MODE_AES_256_HCTR2 10 /* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */ @@ -188,8 +190,6 @@ struct fscrypt_get_key_status_arg { #define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS #define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC #define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS -#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */ -#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */ #define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM #define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX #define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE diff --git a/mm/highmem.c b/mm/highmem.c index 4f942678e9da..0695b972973c 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -564,7 +564,7 @@ void *__kmap_local_page_prot(struct page *page, pgprot_t prot) } EXPORT_SYMBOL(__kmap_local_page_prot); -void kunmap_local_indexed(void *vaddr) +void kunmap_local_indexed(const void *vaddr) { unsigned long addr = (unsigned long) vaddr & PAGE_MASK; pte_t *kmap_pte;