ANDROID: incremental fs: Fix race between truncate and write last block

Also fix race whereby multiple providers writinig the same block would
actually write out the same block.

Note that multiple_providers_test started failing when incfs was ported
to 5.15, and these fixes are needed to make the test reliable

Bug: 264703896
Test: incfs-test passes, specifically multiple_providers_test. Ran 100
      times
Change-Id: I05ad5b2b2f62cf218256222cecb79bbe9953bd97
Signed-off-by: Paul Lawrence <paullawrence@google.com>
This commit is contained in:
Paul Lawrence
2023-01-06 13:36:29 -08:00
committed by Treehugger Robot
parent e6c9473f25
commit 9e6e86b3ff
3 changed files with 37 additions and 22 deletions

View File

@@ -1381,7 +1381,8 @@ ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
} }
int incfs_process_new_data_block(struct data_file *df, int incfs_process_new_data_block(struct data_file *df,
struct incfs_fill_block *block, u8 *data) struct incfs_fill_block *block, u8 *data,
bool *complete)
{ {
struct mount_info *mi = NULL; struct mount_info *mi = NULL;
struct backing_file_context *bfc = NULL; struct backing_file_context *bfc = NULL;
@@ -1420,27 +1421,42 @@ int incfs_process_new_data_block(struct data_file *df,
if (error) if (error)
return error; return error;
if (is_data_block_present(&existing_block)) { if (is_data_block_present(&existing_block))
/* Block is already present, nothing to do here */ /* Block is already present, nothing to do here */
return 0; return 0;
}
error = down_write_killable(&segment->rwsem); error = down_write_killable(&segment->rwsem);
if (error) if (error)
return error; return error;
error = mutex_lock_interruptible(&bfc->bc_mutex); /* Recheck inside write lock */
if (!error) { error = get_data_file_block(df, block->block_index, &existing_block);
error = incfs_write_data_block_to_backing_file( if (error)
bfc, range(data, block->data_len), block->block_index, goto out_up_write;
df->df_blockmap_off, flags);
mutex_unlock(&bfc->bc_mutex);
}
if (!error) {
notify_pending_reads(mi, segment, block->block_index);
atomic_inc(&df->df_data_blocks_written);
}
if (is_data_block_present(&existing_block))
goto out_up_write;
error = mutex_lock_interruptible(&bfc->bc_mutex);
if (error)
goto out_up_write;
error = incfs_write_data_block_to_backing_file(bfc,
range(data, block->data_len), block->block_index,
df->df_blockmap_off, flags);
if (error)
goto out_mutex_unlock;
if (atomic_inc_return(&df->df_data_blocks_written)
>= df->df_data_block_count)
*complete = true;
out_mutex_unlock:
mutex_unlock(&bfc->bc_mutex);
if (!error)
notify_pending_reads(mi, segment, block->block_index);
out_up_write:
up_write(&segment->rwsem); up_write(&segment->rwsem);
if (error) if (error)

View File

@@ -441,7 +441,8 @@ int incfs_get_filled_blocks(struct data_file *df,
int incfs_read_file_signature(struct data_file *df, struct mem_range dst); int incfs_read_file_signature(struct data_file *df, struct mem_range dst);
int incfs_process_new_data_block(struct data_file *df, int incfs_process_new_data_block(struct data_file *df,
struct incfs_fill_block *block, u8 *data); struct incfs_fill_block *block, u8 *data,
bool *complete);
int incfs_process_new_hash_block(struct data_file *df, int incfs_process_new_hash_block(struct data_file *df,
struct incfs_fill_block *block, u8 *data); struct incfs_fill_block *block, u8 *data);

View File

@@ -668,8 +668,7 @@ out:
dput(file); dput(file);
} }
static void maybe_delete_incomplete_file(struct file *f, static void handle_file_completed(struct file *f, struct data_file *df)
struct data_file *df)
{ {
struct backing_file_context *bfc; struct backing_file_context *bfc;
struct mount_info *mi = df->df_mount_info; struct mount_info *mi = df->df_mount_info;
@@ -678,9 +677,6 @@ static void maybe_delete_incomplete_file(struct file *f,
const struct cred *old_cred = override_creds(mi->mi_owner); const struct cred *old_cred = override_creds(mi->mi_owner);
int error; int error;
if (atomic_read(&df->df_data_blocks_written) < df->df_data_block_count)
goto out;
/* Truncate file to remove any preallocated space */ /* Truncate file to remove any preallocated space */
bfc = df->df_backing_file_context; bfc = df->df_backing_file_context;
if (bfc) { if (bfc) {
@@ -739,6 +735,7 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg)
u8 *data_buf = NULL; u8 *data_buf = NULL;
ssize_t error = 0; ssize_t error = 0;
int i = 0; int i = 0;
bool complete = false;
if (!df) if (!df)
return -EBADF; return -EBADF;
@@ -780,7 +777,7 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg)
data_buf); data_buf);
} else { } else {
error = incfs_process_new_data_block(df, &fill_block, error = incfs_process_new_data_block(df, &fill_block,
data_buf); data_buf, &complete);
} }
if (error) if (error)
break; break;
@@ -789,7 +786,8 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg)
if (data_buf) if (data_buf)
free_pages((unsigned long)data_buf, get_order(data_buf_size)); free_pages((unsigned long)data_buf, get_order(data_buf_size));
maybe_delete_incomplete_file(f, df); if (complete)
handle_file_completed(f, df);
/* /*
* Only report the error if no records were processed, otherwise * Only report the error if no records were processed, otherwise