Commit b7cc9f14 by Ondrej Kozina

dm backend with support for multi-segment devices

Support for multi-segment devices is requirement for online
reencryption to work. Introducing modififed dm backend that
splits data structures describing active device and individual
dm target (or segment).
parent 72f7712c
......@@ -177,45 +177,41 @@ int INTEGRITY_activate(struct crypt_device *cd,
{
uint32_t dmi_flags;
struct crypt_dm_active_device dmdi = {
.target = DM_INTEGRITY,
.data_device = crypt_data_device(cd),
.flags = flags,
.u.integrity = {
.offset = crypt_get_data_offset(cd),
.tag_size = crypt_get_integrity_tag_size(cd),
.sector_size = crypt_get_sector_size(cd),
.vk = vk,
.journal_crypt_key = journal_crypt_key,
.journal_integrity_key = journal_mac_key,
}
.segment_count = 1
};
struct dm_target *tgt = dmdi.segment;
int r;
r = INTEGRITY_data_sectors(cd, dmdi.data_device,
dmdi.u.integrity.offset * SECTOR_SIZE, &dmdi.size);
r = INTEGRITY_data_sectors(cd, crypt_data_device(cd),
crypt_get_data_offset(cd) * SECTOR_SIZE, &dmdi.size);
if (r < 0)
return r;
if (params) {
dmdi.u.integrity.journal_size = params->journal_size;
dmdi.u.integrity.journal_watermark = params->journal_watermark;
dmdi.u.integrity.journal_commit_time = params->journal_commit_time;
dmdi.u.integrity.interleave_sectors = params->interleave_sectors;
dmdi.u.integrity.buffer_sectors = params->buffer_sectors;
dmdi.u.integrity.integrity = params->integrity;
dmdi.u.integrity.journal_integrity = params->journal_integrity;
dmdi.u.integrity.journal_crypt = params->journal_crypt;
}
r = dm_integrity_target_set(&dmdi.segment[0], 0, dmdi.size,
crypt_data_device(cd),
crypt_get_integrity_tag_size(cd),
crypt_get_data_offset(cd),
crypt_get_sector_size(cd),
vk,
journal_crypt_key,
journal_mac_key,
params);
if (r < 0)
return r;
log_dbg("Trying to activate INTEGRITY device on top of %s, using name %s, tag size %d, provided sectors %" PRIu64".",
device_path(dmdi.data_device), name, dmdi.u.integrity.tag_size, dmdi.size);
device_path(tgt->data_device), name, tgt->u.integrity.tag_size, dmdi.size);
r = device_block_adjust(cd, dmdi.data_device, DEV_EXCL,
dmdi.u.integrity.offset, NULL, &dmdi.flags);
if (r)
r = device_block_adjust(cd, tgt->data_device, DEV_EXCL,
tgt->u.integrity.offset, NULL, &dmdi.flags);
if (r) {
dm_targets_free(&dmdi);
return r;
}
r = dm_create_device(cd, name, "INTEGRITY", &dmdi, 0);
r = dm_create_device(cd, name, "INTEGRITY", &dmdi);
dm_targets_free(&dmdi);
if (r < 0 && (dm_flags(DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) {
log_err(cd, _("Kernel doesn't support dm-integrity mapping.\n"));
return -ENOTSUP;
......@@ -232,49 +228,46 @@ int INTEGRITY_format(struct crypt_device *cd,
uint32_t dmi_flags;
char tmp_name[64], tmp_uuid[40];
struct crypt_dm_active_device dmdi = {
.target = DM_INTEGRITY,
.data_device = crypt_data_device(cd),
.size = 8,
.flags = CRYPT_ACTIVATE_PRIVATE, /* We always create journal but it can be unused later */
.u.integrity = {
.offset = crypt_get_data_offset(cd),
.tag_size = crypt_get_integrity_tag_size(cd),
.sector_size = crypt_get_sector_size(cd),
.journal_crypt_key = journal_crypt_key,
.journal_integrity_key = journal_mac_key,
}
.segment_count = 1
};
struct dm_target *tgt = dmdi.segment;
int r;
uuid_t tmp_uuid_bin;
if (params) {
dmdi.u.integrity.journal_size = params->journal_size;
dmdi.u.integrity.journal_watermark = params->journal_watermark;
dmdi.u.integrity.journal_commit_time = params->journal_commit_time;
dmdi.u.integrity.interleave_sectors = params->interleave_sectors;
dmdi.u.integrity.buffer_sectors = params->buffer_sectors;
dmdi.u.integrity.journal_integrity = params->journal_integrity;
dmdi.u.integrity.journal_crypt = params->journal_crypt;
dmdi.u.integrity.integrity = params->integrity;
}
uuid_generate(tmp_uuid_bin);
uuid_unparse(tmp_uuid_bin, tmp_uuid);
snprintf(tmp_name, sizeof(tmp_name), "temporary-cryptsetup-%s", tmp_uuid);
r = dm_integrity_target_set(tgt, 0, dmdi.size,
crypt_data_device(cd),
crypt_get_integrity_tag_size(cd),
crypt_get_data_offset(cd),
crypt_get_sector_size(cd),
NULL,
journal_crypt_key,
journal_mac_key,
params);
if (r < 0)
return r;
log_dbg("Trying to format INTEGRITY device on top of %s, tmp name %s, tag size %d.",
device_path(dmdi.data_device), tmp_name, dmdi.u.integrity.tag_size);
device_path(tgt->data_device), tmp_name, tgt->u.integrity.tag_size);
r = device_block_adjust(cd, dmdi.data_device, DEV_EXCL, dmdi.u.integrity.offset, NULL, NULL);
r = device_block_adjust(cd, tgt->data_device, DEV_EXCL, tgt->u.integrity.offset, NULL, NULL);
if (r < 0 && (dm_flags(DM_INTEGRITY, &dmi_flags) || !(dmi_flags & DM_INTEGRITY_SUPPORTED))) {
log_err(cd, _("Kernel doesn't support dm-integrity mapping.\n"));
return -ENOTSUP;
r = -ENOTSUP;
}
if (r)
if (r) {
dm_targets_free(&dmdi);
return r;
}
r = dm_create_device(cd, tmp_name, "INTEGRITY", &dmdi, 0);
r = dm_create_device(cd, tmp_name, "INTEGRITY", &dmdi);
dm_targets_free(&dmdi);
if (r)
return r;
......
......@@ -57,6 +57,13 @@
#define at_least(a, b) ({ __typeof__(a) __at_least = (a); (__at_least >= (b))?__at_least:(b); })
#define move_ref(x, y) \
do { \
typeof (x) *_px = &(x), *_py = &(y); \
*_px = *_py; \
*_py = NULL; \
} while (0)
#define CRYPT_DEFAULT_SEGMENT 0
struct crypt_device;
......
......@@ -203,43 +203,54 @@ int LOOPAES_activate(struct crypt_device *cd,
struct volume_key *vk,
uint32_t flags)
{
const char *p_cipher, *mode;
char *cipher = NULL;
uint32_t req_flags, dmc_flags;
int r;
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.size = 0,
.flags = flags,
.data_device = crypt_data_device(cd),
.u.crypt = {
.cipher = NULL,
.vk = vk,
.offset = crypt_get_data_offset(cd),
.iv_offset = crypt_get_iv_offset(cd),
.sector_size = crypt_get_sector_size(cd),
}
.size = 0,
.flags = flags,
.segment_count = 1,
};
r = device_block_adjust(cd, dmd.data_device, DEV_EXCL,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
r = device_block_adjust(cd, crypt_data_device(cd), DEV_EXCL,
crypt_get_data_offset(cd), &dmd.size, &dmd.flags);
if (r)
return r;
if (keys_count == 1) {
req_flags = DM_PLAIN64_SUPPORTED;
r = asprintf(&cipher, "%s-%s", base_cipher, "cbc-plain64");
mode = "cbc-plain64";
p_cipher = base_cipher;
} else {
req_flags = DM_LMK_SUPPORTED;
r = asprintf(&cipher, "%s:%d-%s", base_cipher, 64, "cbc-lmk");
mode = "cbc-lmk";
r = asprintf(&cipher, "%s:%d", base_cipher, 64);
if (r < 0)
return -ENOMEM;
p_cipher = cipher;
}
r = dm_crypt_target_set(&dmd.segment[0], 0, dmd.size,
crypt_data_device(cd),
vk,
p_cipher,
mode,
crypt_get_iv_offset(cd),
crypt_get_data_offset(cd),
crypt_get_integrity(cd),
crypt_get_integrity_tag_size(cd),
crypt_get_sector_size(cd));
if (r) {
free(cipher);
return r;
}
if (r < 0)
return -ENOMEM;
dmd.u.crypt.cipher = cipher;
log_dbg("Trying to activate loop-AES device %s using cipher %s.",
name, dmd.u.crypt.cipher);
name, dmd.segment[0].u.crypt.cipher);
r = dm_create_device(cd, name, CRYPT_LOOPAES, &dmd, 0);
r = dm_create_device(cd, name, CRYPT_LOOPAES, &dmd);
if (r < 0 && !dm_flags(DM_CRYPT, &dmc_flags) &&
(dmc_flags & req_flags) != req_flags) {
......
......@@ -54,25 +54,16 @@ static int LUKS_endec_template(char *src, size_t srcLength,
char name[PATH_MAX], path[PATH_MAX];
char cipher_spec[MAX_CIPHER_LEN * 3];
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.uuid = NULL,
.flags = CRYPT_ACTIVATE_PRIVATE,
.data_device = crypt_metadata_device(ctx),
.u.crypt = {
.cipher = cipher_spec,
.vk = vk,
.offset = sector,
.iv_offset = 0,
.sector_size = SECTOR_SIZE,
}
.flags = CRYPT_ACTIVATE_PRIVATE,
.segment_count = 1
};
int r, devfd = -1;
size_t bsize, alignment;
log_dbg("Using dmcrypt to access keyslot area.");
bsize = device_block_size(dmd.data_device);
alignment = device_alignment(dmd.data_device);
bsize = device_block_size(crypt_metadata_device(ctx));
alignment = device_alignment(crypt_metadata_device(ctx));
if (!bsize || !alignment)
return -EINVAL;
......@@ -88,26 +79,40 @@ static int LUKS_endec_template(char *src, size_t srcLength,
if (snprintf(cipher_spec, sizeof(cipher_spec), "%s-%s", cipher, cipher_mode) < 0)
return -ENOMEM;
r = device_block_adjust(ctx, dmd.data_device, DEV_OK,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
r = device_block_adjust(ctx, crypt_metadata_device(ctx), DEV_OK,
sector, &dmd.size, &dmd.flags);
if (r < 0) {
log_err(ctx, _("Device %s doesn't exist or access denied.\n"),
device_path(dmd.data_device));
device_path(crypt_metadata_device(ctx)));
return -EIO;
}
if (mode != O_RDONLY && dmd.flags & CRYPT_ACTIVATE_READONLY) {
log_err(ctx, _("Cannot write to device %s, permission denied.\n"),
device_path(dmd.data_device));
device_path(crypt_metadata_device(ctx)));
return -EACCES;
}
r = dm_create_device(ctx, name, "TEMP", &dmd, 0);
r = dm_crypt_target_set(dmd.segment, 0, dmd.size,
crypt_metadata_device(ctx),
vk,
cipher,
cipher_mode,
0,
sector,
NULL,
0,
SECTOR_SIZE);
if (r)
goto out;
r = dm_create_device(ctx, name, "TEMP", &dmd);
if (r < 0) {
if (r != -EACCES && r != -ENOTSUP)
_error_hint(ctx, device_path(dmd.data_device),
_error_hint(ctx, device_path(crypt_metadata_device(ctx)),
cipher, cipher_mode, vk->keylength * 8);
return -EIO;
r = -EIO;
goto out;
}
devfd = open(path, mode | O_DIRECT | O_SYNC);
......@@ -124,6 +129,7 @@ static int LUKS_endec_template(char *src, size_t srcLength,
} else
r = 0;
out:
dm_targets_free(&dmd);
if (devfd != -1)
close(devfd);
dm_remove_device(ctx, name, CRYPT_DEACTIVATE_FORCE);
......
......@@ -1174,21 +1174,11 @@ int LUKS1_activate(struct crypt_device *cd,
uint32_t flags)
{
int r;
char *dm_cipher = NULL;
enum devcheck device_check;
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.uuid = crypt_get_uuid(cd),
.flags = flags,
.size = 0,
.data_device = crypt_data_device(cd),
.u.crypt = {
.cipher = NULL,
.vk = vk,
.offset = crypt_get_data_offset(cd),
.iv_offset = 0,
.sector_size = crypt_get_sector_size(cd),
}
.flags = flags,
.uuid = crypt_get_uuid(cd),
.segment_count = 1,
};
if (dmd.flags & CRYPT_ACTIVATE_SHARED)
......@@ -1196,18 +1186,25 @@ int LUKS1_activate(struct crypt_device *cd,
else
device_check = DEV_EXCL;
r = device_block_adjust(cd, dmd.data_device, device_check,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
r = device_block_adjust(cd, crypt_data_device(cd), device_check,
crypt_get_data_offset(cd), &dmd.size, &dmd.flags);
if (r)
return r;
r = asprintf(&dm_cipher, "%s-%s", crypt_get_cipher(cd), crypt_get_cipher_mode(cd));
if (r < 0)
return -ENOMEM;
r = dm_crypt_target_set(&dmd.segment[0], 0, dmd.size,
crypt_data_device(cd),
vk,
crypt_get_cipher(cd),
crypt_get_cipher_mode(cd),
crypt_get_iv_offset(cd),
crypt_get_data_offset(cd),
crypt_get_integrity(cd),
crypt_get_integrity_tag_size(cd),
crypt_get_sector_size(cd));
if (!r)
r = dm_create_device(cd, name, CRYPT_LUKS1, &dmd);
dmd.u.crypt.cipher = dm_cipher;
r = dm_create_device(cd, name, CRYPT_LUKS1, &dmd, 0);
dm_targets_free(&dmd);
free(dm_cipher);
return r;
}
......@@ -1727,23 +1727,12 @@ int LUKS2_activate(struct crypt_device *cd,
enum devcheck device_check;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.uuid = crypt_get_uuid(cd),
.flags = flags,
.size = 0,
.data_device = crypt_data_device(cd),
.u.crypt = {
.vk = vk,
.offset = crypt_get_data_offset(cd),
.cipher = LUKS2_get_cipher(hdr, 0),
.integrity = crypt_get_integrity(cd),
.iv_offset = 0,
.tag_size = crypt_get_integrity_tag_size(cd),
.sector_size = crypt_get_sector_size(cd)
}
.segment_count = 1
};
char dm_int_name[PATH_MAX], dm_int_dev_name[PATH_MAX];
struct device *device = NULL;
struct device *idevice = NULL;
/* do not allow activation when particular requirements detected */
if ((r = LUKS2_unmet_requirements(cd, hdr, 0, 0)))
......@@ -1758,7 +1747,7 @@ int LUKS2_activate(struct crypt_device *cd,
else
device_check = DEV_EXCL;
if (dmd.u.crypt.tag_size) {
if (crypt_get_integrity_tag_size(cd)) {
if (!LUKS2_integrity_compatible(hdr)) {
log_err(cd, "Unsupported device integrity configuration.\n");
return -EINVAL;
......@@ -1770,39 +1759,47 @@ int LUKS2_activate(struct crypt_device *cd,
return r;
snprintf(dm_int_dev_name, sizeof(dm_int_dev_name), "%s/%s", dm_get_dir(), dm_int_name);
r = device_alloc(&device, dm_int_dev_name);
r = device_alloc(&idevice, dm_int_dev_name);
if (r) {
dm_remove_device(cd, dm_int_name, 0);
return r;
}
/* Space for IV metadata only */
if (!dmd.u.crypt.integrity)
dmd.u.crypt.integrity = "none";
dmd.data_device = device;
dmd.u.crypt.offset = 0;
r = INTEGRITY_data_sectors(cd, crypt_data_device(cd),
crypt_get_data_offset(cd) * SECTOR_SIZE,
&dmd.size);
if (r < 0) {
log_err(cd, "Cannot detect integrity device size.\n");
device_free(device);
device_free(idevice);
dm_remove_device(cd, dm_int_name, 0);
return r;
}
}
r = device_block_adjust(cd, dmd.data_device, device_check,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
r = device_block_adjust(cd, idevice ?: crypt_data_device(cd), device_check,
idevice ? 0 : crypt_get_data_offset(cd), &dmd.size,
&dmd.flags);
if (!r)
r = dm_crypt_target_set(dmd.segment, 0, dmd.size,
idevice ?: crypt_data_device(cd),
vk,
crypt_get_cipher(cd),
crypt_get_cipher_mode(cd),
crypt_get_iv_offset(cd),
idevice ? 0 : crypt_get_data_offset(cd),
crypt_get_integrity(cd) ?: "none",
crypt_get_integrity_tag_size(cd),
crypt_get_sector_size(cd));
if (!r)
r = dm_create_device(cd, name, CRYPT_LUKS2, &dmd, 0);
r = dm_create_device(cd, name, CRYPT_LUKS2, &dmd);
if (r < 0 && dmd.u.crypt.integrity)
if (r < 0 && idevice)
dm_remove_device(cd, dm_int_name, 0);
device_free(device);
device_free(idevice);
dm_targets_free(&dmd);
return r;
}
......
......@@ -688,24 +688,19 @@ int TCRYPT_activate(struct crypt_device *cd,
struct crypt_params_tcrypt *params,
uint32_t flags)
{
char cipher[MAX_CIPHER_LEN], dm_name[PATH_MAX], dm_dev_name[PATH_MAX];
char dm_name[PATH_MAX], dm_dev_name[PATH_MAX];
char *part_path;
struct device *device = NULL, *part_device = NULL;
unsigned int i;
int r;
uint32_t req_flags, dmc_flags;
struct tcrypt_algs *algs;
enum devcheck device_check;
uint64_t offset = crypt_get_data_offset(cd);
struct volume_key *vk = NULL;
struct device *ptr_dev = crypt_data_device(cd), *device = NULL, *part_device = NULL;
struct crypt_dm_active_device dmd = {
.target = DM_CRYPT,
.size = 0,
.data_device = crypt_data_device(cd),
.u.crypt = {
.cipher = cipher,
.offset = crypt_get_data_offset(cd),
.iv_offset = crypt_get_iv_offset(cd),
.sector_size = crypt_get_sector_size(cd),
}
.segment_count = 1,
.flags = flags
};
if (!hdr->d.version) {
......@@ -749,15 +744,15 @@ int TCRYPT_activate(struct crypt_device *cd,
device_check = DEV_EXCL;
if ((params->flags & CRYPT_TCRYPT_SYSTEM_HEADER) &&
!crypt_dev_is_partition(device_path(dmd.data_device))) {
part_path = crypt_get_partition_device(device_path(dmd.data_device),
dmd.u.crypt.offset, dmd.size);
!crypt_dev_is_partition(device_path(crypt_data_device(cd)))) {
part_path = crypt_get_partition_device(device_path(crypt_data_device(cd)),
crypt_get_data_offset(cd), dmd.size);
if (part_path) {
if (!device_alloc(&part_device, part_path)) {
log_verbose(cd, _("Activating TCRYPT system encryption for partition %s.\n"),
part_path);
dmd.data_device = part_device;
dmd.u.crypt.offset = 0;
ptr_dev = part_device;
offset = 0;
}
free(part_path);
} else
......@@ -768,19 +763,19 @@ int TCRYPT_activate(struct crypt_device *cd,
device_check = DEV_SHARED;
}
r = device_block_adjust(cd, dmd.data_device, device_check,
dmd.u.crypt.offset, &dmd.size, &dmd.flags);
r = device_block_adjust(cd, ptr_dev, device_check,
offset, &dmd.size, &dmd.flags);
if (r) {
device_free(part_device);
return r;
r = -ENOMEM;
goto out;
}
/* From here, key size for every cipher must be the same */
dmd.u.crypt.vk = crypt_alloc_volume_key(algs->cipher[0].key_size +
algs->cipher[0].key_extra_size, NULL);
if (!dmd.u.crypt.vk) {
device_free(part_device);
return -ENOMEM;
/* Frome here, key size for every cipher must be the same */
vk = crypt_alloc_volume_key(algs->cipher[0].key_size +
algs->cipher[0].key_extra_size, NULL);
if (!vk) {
r = -ENOMEM;
goto out;
}
for (i = algs->chain_count; i > 0; i--) {
......@@ -793,11 +788,8 @@ int TCRYPT_activate(struct crypt_device *cd,
dmd.flags = flags | CRYPT_ACTIVATE_PRIVATE;
}
snprintf(cipher, sizeof(cipher), "%s-%s",
algs->cipher[i-1].name, algs->mode);
TCRYPT_copy_key(&algs->cipher[i-1], algs->mode,
dmd.u.crypt.vk->key, hdr->d.keys);
vk->key, hdr->d.keys);
if (algs->chain_count != i) {
snprintf(dm_dev_name, sizeof(dm_dev_name), "%s/%s_%d",
......@@ -805,14 +797,28 @@ int TCRYPT_activate(struct crypt_device *cd,
r = device_alloc(&device, dm_dev_name);
if (r)
break;
dmd.data_device = device;
dmd.u.crypt.offset = 0;
ptr_dev = device;
offset = 0;
}
r = dm_crypt_target_set(&dmd.segment[0], 0, dmd.size,
ptr_dev,
vk,
algs->cipher[i-1].name,
algs->mode,
crypt_get_iv_offset(cd),
offset,
crypt_get_integrity(cd),
crypt_get_integrity_tag_size(cd),
crypt_get_sector_size(cd));
if (r)
break;
log_dbg("Trying to activate TCRYPT device %s using cipher %s.",
dm_name, dmd.u.crypt.cipher);
r = dm_create_device(cd, dm_name, CRYPT_TCRYPT, &dmd, 0);
dm_name, dmd.segment[0].u.crypt.cipher);
r = dm_create_device(cd, dm_name, CRYPT_TCRYPT, &dmd);
dm_targets_free(&dmd);
device_free(device);
device = NULL;
......@@ -826,15 +832,17 @@ int TCRYPT_activate(struct crypt_device *cd,
r = -ENOTSUP;
}
out:
crypt_free_volume_key(vk);
device_free(device);
device_free(part_device);
crypt_free_volume_key(dmd.u.crypt.vk);
return r;
}
static int TCRYPT_remove_one(struct crypt_device *cd, const char *name,
const char *base_uuid, int index, uint32_t flags)
{
struct crypt_dm_active_device dmd = {};
struct crypt_dm_active_device dmd;
char dm_name[PATH_MAX];
int r;
......@@ -855,7 +863,7 @@ static int TCRYPT_remove_one(struct crypt_device *cd, const char *name,
int TCRYPT_deactivate(struct crypt_device *cd, const char *name, uint32_t flags)
{
struct crypt_dm_active_device dmd = {};
struct crypt_dm_active_device dmd;
int r;
r = dm_query_device(cd, name, DM_ACTIVE_UUID, &dmd);
......@@ -873,8 +881,6 @@ int TCRYPT_deactivate(struct crypt_device *cd, const char *name, uint32_t flags)
goto out;
r = TCRYPT_remove_one(cd, name, dmd.uuid, 2, flags);
if (r < 0)
goto out;
out:
free(CONST_CAST(void*)dmd.uuid);
return (r == -ENODEV) ? 0 : r;
......@@ -885,7 +891,8 @@ static int TCRYPT_status_one(struct crypt_device *cd, const char *name,
size_t *key_size, char *cipher,
uint64_t *data_offset, struct device **device)
{
struct crypt_dm_active_device dmd = {};
struct crypt_dm_active_device dmd;
struct dm_target *tgt = dmd.segment;
char dm_name[PATH_MAX], *c;
int r;
......@@ -900,30 +907,37 @@ static int TCRYPT_status_one(struct crypt_device *cd, const char *name,
DM_ACTIVE_UUID |
DM_ACTIVE_CRYPT_CIPHER |
DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
if (r > 0)
r = 0;
if (!r && !strncmp(dmd.uuid, base_uuid, strlen(base_uuid))) {
if ((c = strchr(dmd.u.crypt.cipher, '-')))
if (r < 0)
return r;
if (dmd.segment_count != 1 || tgt->type != DM_CRYPT) {
r = -ENOTSUP;
goto out;
}
r = 0;
if (!strncmp(dmd.uuid, base_uuid, strlen(base_uuid))) {
if ((c = strchr(tgt->u.crypt.cipher, '-')))
*c = '\0';
strcat(cipher, "-");
strncat(cipher, dmd.u.crypt.cipher, MAX_CIPHER_LEN);
*key_size += dmd.u.crypt.vk->keylength;
*data_offset = dmd.u.crypt.offset * SECTOR_SIZE;
strncat(cipher, tgt->u.crypt.cipher, MAX_CIPHER_LEN);
*key_size += tgt->u.crypt.vk->keylength;
*data_offset = tgt->u.crypt.offset * SECTOR_SIZE;
device_free(*device);
*device = dmd.data_device;
} else {
device_free(dmd.data_device);
/* TODO: move macro */
*device = tgt->data_device;
tgt->data_device = NULL;
} else
r = -ENODEV;
}
out:
dm_targets_free(&dmd);
free(CONST_CAST(void*)dmd.uuid);
free(CONST_CAST(void*)dmd.u.crypt.cipher);
crypt_free_volume_key(dmd.u.crypt.vk);
return r;
}
int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
const struct crypt_dm_active_device *dmd,
const char *uuid,
const struct dm_target *tgt,
struct device **device,
struct crypt_params_tcrypt *tcrypt_params,
struct tcrypt_phdr *tcrypt_hdr)
......@@ -936,9 +950,9 @@ int TCRYPT_init_by_name(struct crypt_device *cd, const char *name,
memset(tcrypt_params, 0, sizeof(*tcrypt_params));