Commit 39a014f6 authored by Ondrej Kozina's avatar Ondrej Kozina

dm backend with support for multi-segment devices.

Support for multi-segment devices is requirement for online
reencryption to work. Introducing modififed dm backend that
splits data structures describing active device and individual
dm target (or segment).
parent 1e22160e
......@@ -194,39 +194,19 @@ int INTEGRITY_create_dmd_device(struct crypt_device *cd,
return -EINVAL;
*dmd = (struct crypt_dm_active_device) {
.target = DM_INTEGRITY,
.data_device = crypt_data_device(cd),
.flags = flags,
.u.integrity = {
.offset = crypt_get_data_offset(cd),
.tag_size = crypt_get_integrity_tag_size(cd),
.sector_size = crypt_get_sector_size(cd),
.vk = vk,
.journal_crypt_key = journal_crypt_key,
.journal_integrity_key = journal_mac_key,
}
};
if (dmd->data_device != crypt_metadata_device(cd))
dmd->u.integrity.meta_device = crypt_metadata_device(cd);
r = INTEGRITY_data_sectors(cd, crypt_metadata_device(cd),
dmd->u.integrity.offset * SECTOR_SIZE, &dmd->size);
crypt_get_data_offset(cd) * SECTOR_SIZE, &dmd->size);
if (r < 0)
return r;
if (params) {
dmd->u.integrity.journal_size = params->journal_size;
dmd->u.integrity.journal_watermark = params->journal_watermark;
dmd->u.integrity.journal_commit_time = params->journal_commit_time;
dmd->u.integrity.interleave_sectors = params->interleave_sectors;
dmd->u.integrity.buffer_sectors = params->buffer_sectors;
dmd->u.integrity.integrity = params->integrity;
dmd->u.integrity.journal_integrity = params->journal_integrity;
dmd->u.integrity.journal_crypt = params->journal_crypt;
}
return r;
return dm_integrity_target_set(&dmd->segment, 0, dmd->size,
crypt_metadata_device(cd), crypt_data_device(cd),
crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd),
crypt_get_sector_size(cd), vk, journal_crypt_key,
journal_mac_key, params);
}
int INTEGRITY_activate_dmd_device(struct crypt_device *cd,
......@@ -235,17 +215,21 @@ int INTEGRITY_activate_dmd_device(struct crypt_device *cd,
{
int r;
uint32_t dmi_flags;
struct dm_target *tgt = &dmd->segment;
if (!single_segment(dmd) || tgt->type != DM_INTEGRITY)
return -EINVAL;
log_dbg(cd, "Trying to activate INTEGRITY device on top of %s, using name %s, tag size %d, provided sectors %" PRIu64".",
device_path(dmd->data_device), name, dmd->u.integrity.tag_size, dmd->size);
device_path(tgt->data_device), name, tgt->u.integrity.tag_size, dmd->size);
r = device_block_adjust(cd, dmd->data_device, DEV_EXCL,
dmd->u.integrity.offset, NULL, &dmd->flags);
r = device_block_adjust(cd, tgt->data_device, DEV_EXCL,
tgt->u.integrity.offset, NULL, &dmd->flags);
if (r)
return r;
if (dmd->u.integrity.meta_device) {
r = device_block_adjust(cd, dmd->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
if (tgt->u.integrity.meta_device) {
r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
if (r)
return r;
}
......@@ -273,7 +257,9 @@ int INTEGRITY_activate(struct crypt_device *cd,
if (r < 0)
return r;
return INTEGRITY_activate_dmd_device(cd, name, &dmd);
r = INTEGRITY_activate_dmd_device(cd, name, &dmd);
dm_targets_free(cd, &dmd);
return r;
}
int INTEGRITY_format(struct crypt_device *cd,
......@@ -284,64 +270,56 @@ int INTEGRITY_format(struct crypt_device *cd,
uint32_t dmi_flags;
char tmp_name[64], tmp_uuid[40];
struct crypt_dm_active_device dmdi = {
.target = DM_INTEGRITY,
.data_device = crypt_data_device(cd),
.size = 8,
.flags = CRYPT_ACTIVATE_PRIVATE, /* We always create journal but it can be unused later */
.u.integrity = {
.offset = crypt_get_data_offset(cd),
.tag_size = crypt_get_integrity_tag_size(cd),
.sector_size = crypt_get_sector_size(cd),
.journal_crypt_key = journal_crypt_key,
.journal_integrity_key = journal_mac_key,
}
};
struct dm_target *tgt = &dmdi.segment;
int r;
uuid_t tmp_uuid_bin;
if (dmdi.data_device != crypt_metadata_device(cd))
dmdi.u.integrity.meta_device = crypt_metadata_device(cd);
if (params) {
dmdi.u.integrity.journal_size = params->journal_size;
dmdi.u.integrity.journal_watermark = params->journal_watermark;
dmdi.u.integrity.journal_commit_time = params->journal_commit_time;
dmdi.u.integrity.interleave_sectors = params->interleave_sectors;
dmdi.u.integrity.buffer_sectors = params->buffer_sectors;
dmdi.u.integrity.journal_integrity = params->journal_integrity;
dmdi.u.integrity.journal_crypt = params->journal_crypt;
dmdi.u.integrity.integrity = params->integrity;
}
struct volume_key *vk = NULL;
uuid_generate(tmp_uuid_bin);
uuid_unparse(tmp_uuid_bin, tmp_uuid);
snprintf(tmp_name, sizeof(tmp_name), "temporary-cryptsetup-%s", tmp_uuid);
/* There is no data area, we can actually use fake zeroed key */
if (params && params->integrity_key_size)
vk = crypt_alloc_volume_key(params->integrity_key_size, NULL);
r = dm_integrity_target_set(tgt, 0, dmdi.size, crypt_metadata_device(cd),
crypt_data_device(cd), crypt_get_integrity_tag_size(cd),
crypt_get_data_offset(cd), crypt_get_sector_size(cd), vk,
journal_crypt_key, journal_mac_key, params);
if (r < 0) {
crypt_free_volume_key(vk);
return r;
}
log_dbg(cd, "Trying to format INTEGRITY device on top of %s, tmp name %s, tag size %d.",
device_path(dmdi.data_device), tmp_name, dmdi.u.integrity.tag_size);
device_path(tgt->data_device), tmp_name, tgt->u.integrity.tag_size);
r = device_block_adjust(cd, dmdi.data_device, DEV_EXCL, dmdi.u.integrity.offset, NULL, NULL);
r = device_block_adjust(cd, tgt->data_device, DEV_EXCL, tgt->u.integrity.offset, NULL, NULL);
if (r < 0 && (dm_flags(cd, DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) {
log_err(cd, _("Kernel doesn't support dm-integrity mapping."));
return -ENOTSUP;
r = -ENOTSUP;
}
if (r)
if (r) {
dm_targets_free(cd, &dmdi);
return r;
}
if (dmdi.u.integrity.meta_device) {
r = device_block_adjust(cd, dmdi.u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
if (r)
if (tgt->u.integrity.meta_device) {
r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
if (r) {
dm_targets_free(cd, &dmdi);
return r;
}
}
/* There is no data area, we can actually use fake zeroed key */
if (params && params->integrity_key_size)
dmdi.u.integrity.vk = crypt_alloc_volume_key(params->integrity_key_size, NULL);
r = dm_create_device(cd, tmp_name, "INTEGRITY", &dmdi);
crypt_free_volume_key(dmdi.u.integrity.vk);
crypt_free_volume_key(vk);
dm_targets_free(cd, &dmdi);
if (r)
return r;
......
......@@ -65,6 +65,13 @@
# define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
#define MOVE_REF(x, y) \
do { \
typeof (x) *_px = &(x), *_py = &(y); \
*_px = *_py; \
*_py = NULL; \
} while (0)
struct crypt_device;
struct volume_key {
......
This diff is collapsed.
......@@ -203,25 +203,15 @@ int LOOPAES_activate(struct crypt_device *cd,
struct volume_key *vk,
uint32_t flags)
{
char *cipher = NULL;
uint32_t req_flags, dmc_flags;
int r;
uint32_t req_flags, dmc_flags;
char *cipher = NULL;
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.size = 0,
.flags = flags,
.data_device = crypt_data_device(cd),
.u.crypt = {
.cipher = NULL,
.vk = vk,
.offset = crypt_get_data_offset(cd),
.iv_offset = crypt_get_iv_offset(cd),
.sector_size = crypt_get_sector_size(cd),
}
.flags = flags,
};
r = device_block_adjust(cd, dmd.data_device, DEV_EXCL,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
r = device_block_adjust(cd, crypt_data_device(cd), DEV_EXCL,
crypt_get_data_offset(cd), &dmd.size, &dmd.flags);
if (r)
return r;
......@@ -235,9 +225,18 @@ int LOOPAES_activate(struct crypt_device *cd,
if (r < 0)
return -ENOMEM;
dmd.u.crypt.cipher = cipher;
r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
vk, cipher, crypt_get_iv_offset(cd),
crypt_get_data_offset(cd), crypt_get_integrity(cd),
crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
if (r) {
free(cipher);
return r;
}
log_dbg(cd, "Trying to activate loop-AES device %s using cipher %s.",
name, dmd.u.crypt.cipher);
name, cipher);
r = dm_create_device(cd, name, CRYPT_LOOPAES, &dmd);
......@@ -247,6 +246,8 @@ int LOOPAES_activate(struct crypt_device *cd,
r = -ENOTSUP;
}
dm_targets_free(cd, &dmd);
free(cipher);
return r;
}
......@@ -58,25 +58,15 @@ static int LUKS_endec_template(char *src, size_t srcLength,
char name[PATH_MAX], path[PATH_MAX];
char cipher_spec[MAX_CIPHER_LEN * 3];
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.uuid = NULL,
.flags = CRYPT_ACTIVATE_PRIVATE,
.data_device = crypt_metadata_device(ctx),
.u.crypt = {
.cipher = cipher_spec,
.vk = vk,
.offset = sector,
.iv_offset = 0,
.sector_size = SECTOR_SIZE,
}
.flags = CRYPT_ACTIVATE_PRIVATE,
};
int r, devfd = -1;
size_t bsize, keyslot_alignment, alignment;
log_dbg(ctx, "Using dmcrypt to access keyslot area.");
bsize = device_block_size(ctx, dmd.data_device);
alignment = device_alignment(dmd.data_device);
bsize = device_block_size(ctx, crypt_metadata_device(ctx));
alignment = device_alignment(crypt_metadata_device(ctx));
if (!bsize || !alignment)
return -EINVAL;
......@@ -96,26 +86,33 @@ static int LUKS_endec_template(char *src, size_t srcLength,
if (snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", cipher, cipher_mode) < 0)
return -ENOMEM;
r = device_block_adjust(ctx, dmd.data_device, DEV_OK,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
r = device_block_adjust(ctx, crypt_metadata_device(ctx), DEV_OK,
sector, &dmd.size, &dmd.flags);
if (r < 0) {
log_err(ctx, _("Device %s doesn't exist or access denied."),
device_path(dmd.data_device));
device_path(crypt_metadata_device(ctx)));
return -EIO;
}
if (mode != O_RDONLY && dmd.flags & CRYPT_ACTIVATE_READONLY) {
log_err(ctx, _("Cannot write to device %s, permission denied."),
device_path(dmd.data_device));
device_path(crypt_metadata_device(ctx)));
return -EACCES;
}
r = dm_crypt_target_set(&dmd.segment, 0, dmd.size,
crypt_metadata_device(ctx), vk, cipher_spec, 0, sector,
NULL, 0, SECTOR_SIZE);
if (r)
goto out;
r = dm_create_device(ctx, name, "TEMP", &dmd);
if (r < 0) {
if (r != -EACCES && r != -ENOTSUP)
_error_hint(ctx, device_path(dmd.data_device),
_error_hint(ctx, device_path(crypt_metadata_device(ctx)),
cipher, cipher_mode, vk->keylength * 8);
return -EIO;
r = -EIO;
goto out;
}
devfd = open(path, mode | O_DIRECT | O_SYNC);
......@@ -132,6 +129,7 @@ static int LUKS_endec_template(char *src, size_t srcLength,
} else
r = 0;
out:
dm_targets_free(ctx, &dmd);
if (devfd != -1)
close(devfd);
dm_remove_device(ctx, name, CRYPT_DEACTIVATE_FORCE);
......
......@@ -1156,22 +1156,22 @@ int LUKS1_activate(struct crypt_device *cd,
struct volume_key *vk,
uint32_t flags)
{
int r;
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.uuid = crypt_get_uuid(cd),
.flags = flags,
.size = 0,
.data_device = crypt_data_device(cd),
.u.crypt = {
.cipher = crypt_get_cipher_spec(cd),
.vk = vk,
.offset = crypt_get_data_offset(cd),
.iv_offset = 0,
.sector_size = crypt_get_sector_size(cd),
}
.flags = flags,
.uuid = crypt_get_uuid(cd),
};
return create_or_reload_device(cd, name, CRYPT_LUKS1, &dmd);
r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
crypt_get_data_offset(cd), crypt_get_integrity(cd),
crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
if (!r)
r = create_or_reload_device(cd, name, CRYPT_LUKS1, &dmd);
dm_targets_free(cd, &dmd);
return r;
}
int LUKS_wipe_header_areas(struct luks_phdr *hdr,
......
......@@ -1861,33 +1861,28 @@ int LUKS2_activate(struct crypt_device *cd,
{
int r;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
struct crypt_dm_active_device dmdi, dmd = {
.target = DM_CRYPT,
struct crypt_dm_active_device dmdi = {}, dmd = {
.uuid = crypt_get_uuid(cd),
.size = 0,
.data_device = crypt_data_device(cd),
.u.crypt = {
.vk = vk,
.offset = crypt_get_data_offset(cd),
.cipher = LUKS2_get_cipher(hdr, CRYPT_DEFAULT_SEGMENT),
.integrity = crypt_get_integrity(cd),
.iv_offset = 0,
.tag_size = crypt_get_integrity_tag_size(cd),
.sector_size = crypt_get_sector_size(cd)
}
};
/* do not allow activation when particular requirements detected */
if ((r = LUKS2_unmet_requirements(cd, hdr, 0, 0)))
return r;
r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
crypt_get_data_offset(cd), crypt_get_integrity(cd) ?: "none",
crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
if (r < 0)
return r;
/* Add persistent activation flags */
if (!(flags & CRYPT_ACTIVATE_IGNORE_PERSISTENT))
LUKS2_config_get_flags(cd, hdr, &dmd.flags);
dmd.flags |= flags;
if (dmd.u.crypt.tag_size) {
if (crypt_get_integrity_tag_size(cd)) {
if (!LUKS2_integrity_compatible(hdr)) {
log_err(cd, "Unsupported device integrity configuration.");
return -EINVAL;
......@@ -1897,17 +1892,17 @@ int LUKS2_activate(struct crypt_device *cd,
if (r)
return r;
/* Space for IV metadata only */
if (!dmd.u.crypt.integrity)
dmd.u.crypt.integrity = "none";
dmd.segment.u.crypt.offset = 0;
dmd.segment.size = dmdi.segment.size;
dmd.u.crypt.offset = 0;
dmd.size = dmdi.size;
r = create_or_reload_device_with_integrity(cd, name, CRYPT_LUKS2, &dmd, &dmdi);
} else
r = create_or_reload_device(cd, name, CRYPT_LUKS2, &dmd);
return create_or_reload_device_with_integrity(cd, name, CRYPT_LUKS2, &dmd, &dmdi);
}
dm_targets_free(cd, &dmd);
dm_targets_free(cd, &dmdi);
return create_or_reload_device(cd, name, CRYPT_LUKS2, &dmd);
return r;
}
int LUKS2_unmet_requirements(struct crypt_device *cd, struct luks2_hdr *hdr, uint32_t reqs_mask, int quiet)
......
This diff is collapsed.
......@@ -723,24 +723,18 @@ int TCRYPT_activate(struct crypt_device *cd,
struct crypt_params_tcrypt *params,
uint32_t flags)
{
char cipher[MAX_CIPHER_LEN], dm_name[PATH_MAX], dm_dev_name[PATH_MAX];
char dm_name[PATH_MAX], dm_dev_name[PATH_MAX], cipher_spec[MAX_CIPHER_LEN*2+1];
char *part_path;
struct device *device = NULL, *part_device = NULL;
unsigned int i;
int r;
uint32_t req_flags, dmc_flags;
struct tcrypt_algs *algs;
enum devcheck device_check;
uint64_t offset = crypt_get_data_offset(cd);
struct volume_key *vk = NULL;
struct device *ptr_dev = crypt_data_device(cd), *device = NULL, *part_device = NULL;
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.size = 0,
.data_device = crypt_data_device(cd),
.u.crypt = {
.cipher = cipher,
.offset = crypt_get_data_offset(cd),
.iv_offset = crypt_get_iv_offset(cd),
.sector_size = crypt_get_sector_size(cd),
}
.flags = flags
};
if (!hdr->d.version) {
......@@ -784,15 +778,15 @@ int TCRYPT_activate(struct crypt_device *cd,
device_check = DEV_EXCL;
if ((params->flags & CRYPT_TCRYPT_SYSTEM_HEADER) &&
!crypt_dev_is_partition(device_path(dmd.data_device))) {
part_path = crypt_get_partition_device(device_path(dmd.data_device),
dmd.u.crypt.offset, dmd.size);
!crypt_dev_is_partition(device_path(crypt_data_device(cd)))) {
part_path = crypt_get_partition_device(device_path(crypt_data_device(cd)),
crypt_get_data_offset(cd), dmd.size);
if (part_path) {
if (!device_alloc(cd, &part_device, part_path)) {
log_verbose(cd, _("Activating TCRYPT system encryption for partition %s."),
part_path);
dmd.data_device = part_device;
dmd.u.crypt.offset = 0;
ptr_dev = part_device;
offset = 0;
}
free(part_path);
} else
......@@ -803,19 +797,17 @@ int TCRYPT_activate(struct crypt_device *cd,
device_check = DEV_OK;
}
r = device_block_adjust(cd, dmd.data_device, device_check,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
if (r) {
device_free(cd, part_device);
return r;
}
r = device_block_adjust(cd, ptr_dev, device_check,
offset, &dmd.size, &dmd.flags);
if (r)
goto out;
/* From here, key size for every cipher must be the same */
dmd.u.crypt.vk = crypt_alloc_volume_key(algs->cipher[0].key_size +
algs->cipher[0].key_extra_size, NULL);
if (!dmd.u.crypt.vk) {
device_free(cd, part_device);
return -ENOMEM;
vk = crypt_alloc_volume_key(algs->cipher[0].key_size +
algs->cipher[0].key_extra_size, NULL);
if (!vk) {
r = -ENOMEM;
goto out;
}
for (i = algs->chain_count; i > 0; i--) {
......@@ -828,11 +820,8 @@ int TCRYPT_activate(struct crypt_device *cd,
dmd.flags = flags | CRYPT_ACTIVATE_PRIVATE;
}
snprintf(cipher, sizeof(cipher), "%s-%s",
algs->cipher[i-1].name, algs->mode);
TCRYPT_copy_key(&algs->cipher[i-1], algs->mode,
dmd.u.crypt.vk->key, hdr->d.keys);
vk->key, hdr->d.keys);
if (algs->chain_count != i) {
snprintf(dm_dev_name, sizeof(dm_dev_name), "%s/%s_%d",
......@@ -840,14 +829,29 @@ int TCRYPT_activate(struct crypt_device *cd,
r = device_alloc(cd, &device, dm_dev_name);
if (r)
break;
dmd.data_device = device;
dmd.u.crypt.offset = 0;
ptr_dev = device;
offset = 0;
}
r = snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", algs->cipher[i-1].name, algs->mode);
if (r < 0 || (size_t)r >= sizeof(cipher_spec)) {
r = -ENOMEM;
break;
}
r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, ptr_dev, vk,
cipher_spec, crypt_get_iv_offset(cd), offset,
crypt_get_integrity(cd),
crypt_get_integrity_tag_size(cd),
crypt_get_sector_size(cd));
if (r)
break;
log_dbg(cd, "Trying to activate TCRYPT device %s using cipher %s.",
dm_name, dmd.u.crypt.cipher);
dm_name, dmd.segment.u.crypt.cipher);
r = dm_create_device(cd, dm_name, CRYPT_TCRYPT, &dmd);
dm_targets_free(cd, &dmd);
device_free(cd, device);
device = NULL;
......@@ -861,15 +865,17 @@ int TCRYPT_activate(struct crypt_device *cd,
r = -ENOTSUP;
}
out:
crypt_free_volume_key(vk);
device_free(cd, device);
device_free(cd, part_device);
crypt_free_volume_key(dmd.u.crypt.vk);
return r;
}
static int TCRYPT_remove_one(struct crypt_device *cd, const char *name,
const char *base_uuid, int index, uint32_t flags)
{
struct crypt_dm_active_device dmd = {};
struct crypt_dm_active_device dmd;
char dm_name[PATH_MAX];
int r;
......@@ -890,7 +896,7 @@ static int TCRYPT_remove_one(struct crypt_device *cd, const char *name,
int TCRYPT_deactivate(struct crypt_device *cd, const char *name, uint32_t flags)
{
struct crypt_dm_active_device dmd = {};
struct crypt_dm_active_device dmd;
int r;
r = dm_query_device(cd, name, DM_ACTIVE_UUID, &dmd);
......@@ -908,8 +914,6 @@ int TCRYPT_deactivate(struct crypt_device *cd, const char *name, uint32_t flags)
goto out;
r = TCRYPT_remove_one(cd, name, dmd.uuid, 2, flags);
if (r < 0)
goto out;
out:
free(CONST_CAST(void*)dmd.uuid);
return (r == -ENODEV) ? 0 : r;
......@@ -920,7 +924,8 @@ static int TCRYPT_status_one(struct crypt_device *cd, const char *name,
size_t *key_size, char *cipher,
uint64_t *data_offset, struct device **device)
{
struct crypt_dm_active_device dmd = {};
struct crypt_dm_active_device dmd;
struct dm_target *tgt = &dmd.segment;
char dm_name[PATH_MAX], *c;
int r;
......@@ -935,30 +940,35 @@ static int TCRYPT_status_one(struct crypt_device *cd, const char *name,
DM_ACTIVE_UUID |
DM_ACTIVE_CRYPT_CIPHER |
DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
if (r > 0)
r = 0;
if (!r && !strncmp(dmd.uuid, base_uuid, strlen(base_uuid))) {
if ((c = strchr(dmd.u.crypt.cipher, '-')))
if (r < 0)
return r;
if (!single_segment(&dmd) || tgt->type != DM_CRYPT) {
r = -ENOTSUP;
goto out;
}
r = 0;
if (!strncmp(dmd.uuid, base_uuid, strlen(base_uuid))) {
if ((c = strchr(tgt->u.crypt.cipher, '-')))
*c = '\0';
strcat(cipher, "-");
strncat(cipher, dmd.u.crypt.cipher, MAX_CIPHER_LEN);
*key_size += dmd.u.crypt.vk->keylength;
*data_offset = dmd.u.crypt.offset * SECTOR_SIZE;
strncat(cipher, tgt->u.crypt.cipher, MAX_CIPHER_LEN);
*key_size += tgt->u.crypt.vk->keylength;
*data_offset = tgt->u.crypt.offset * SECTOR_SIZE;
device_free(cd, *device);
*device = dmd.data_device;
} else {
device_free(cd, dmd.data_device);
MOVE_REF(*device, tgt->data_device);
} else
r = -ENODEV;
}
out:
dm_targets_free(cd, &dmd);
free(CONST_CAST(void*)dmd.uuid);
free(CONST_CAST(void*)dmd.u.crypt.cipher);
crypt_free_volume_key(dmd.u.crypt.vk);
return r;
}
int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
const struct crypt_dm_active_device *dmd,
const char *uuid,
const struct dm_target *tgt,
struct device **device,
struct crypt_params_tcrypt *tcrypt_params,
struct tcrypt_phdr *tcrypt_hdr)
......@@ -971,9 +981,9 @@ int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
memset(tcrypt_params, 0, sizeof(*tcrypt_params));
memset(tcrypt_hdr, 0, sizeof(*tcrypt_hdr));
tcrypt_hdr->d.sector_size = SECTOR_SIZE;
tcrypt_hdr->d.mk_offset = dmd->u.crypt.offset * SECTOR_SIZE;
tcrypt_hdr->d.mk_offset = tgt->u.crypt.offset * SECTOR_SIZE;
strncpy(cipher, dmd->u.crypt.cipher, MAX_CIPHER_LEN);
strncpy(cipher, tgt->u.crypt.cipher, MAX_CIPHER_LEN);
tmp = strchr(cipher, '-');
if (!tmp)
return -EINVAL;
......@@ -981,11 +991,11 @@ int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
mode[MAX_CIPHER_LEN] = '\0';
strncpy(mode, ++tmp, MAX_CIPHER_LEN);
key_size = dmd->u.crypt.vk->keylength;
r = TCRYPT_status_one(cd, name, dmd->uuid, 1, &key_size,
key_size = tgt->u.crypt.vk->keylength;
r = TCRYPT_status_one(cd, name, uuid, 1, &key_size,
cipher, &tcrypt_hdr->d.mk_offset, device);
if (!r)
r = TCRYPT_status_one(cd, name, dmd->uuid, 2, &key_size,
r = TCRYPT_status_one(cd, name, uuid, 2, &key_size,
cipher, &tcrypt_hdr->d.mk_offset, device);
if (r < 0 && r != -ENODEV)
......
......@@ -75,6 +75,7 @@ struct tcrypt_phdr {
struct crypt_device;
struct crypt_params_tcrypt;
struct crypt_dm_active_device;
struct dm_target;
struct volume_key;
struct device;
......@@ -83,7 +84,8 @@ int TCRYPT_read_phdr(struct crypt_device *cd,
struct crypt_params_tcrypt *params);
int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
const struct crypt_dm_active_device *dmd,
const char *uuid,
const struct dm_target *tgt,
struct device **device,
struct crypt_params_tcrypt *tcrypt_params,
struct tcrypt_phdr *tcrypt_hdr);
......
......@@ -31,6 +31,7 @@ struct crypt_device;
struct volume_key;
struct crypt_params_verity;
struct device;
struct crypt_params_integrity;