ccp-crypto-sha.c 12.9 KB
Newer Older
1 2 3
/*
 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
 *
4
 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
5 6
 *
 * Author: Tom Lendacky <[email protected]>
7
 * Author: Gary R Hook <[email protected]>
8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
21
#include <crypto/hmac.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>

#include "ccp-crypto.h"

static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
{
	struct ahash_request *req = ahash_request_cast(async_req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
	unsigned int digest_size = crypto_ahash_digestsize(tfm);

	if (ret)
		goto e_free;

	if (rctx->hash_rem) {
		/* Save remaining data to buffer */
40
		unsigned int offset = rctx->nbytes - rctx->hash_rem;
41

42 43
		scatterwalk_map_and_copy(rctx->buf, rctx->src,
					 offset, rctx->hash_rem, 0);
44
		rctx->buf_count = rctx->hash_rem;
45
	} else {
46
		rctx->buf_count = 0;
47
	}
48

49
	/* Update result area if supplied */
50
	if (req->result && rctx->final)
51
		memcpy(req->result, rctx->ctx, digest_size);
52 53 54 55 56 57 58 59 60 61 62

e_free:
	sg_free_table(&rctx->data_sg);

	return ret;
}

static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
			     unsigned int final)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
63
	struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
64 65 66 67
	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
	struct scatterlist *sg;
	unsigned int block_size =
		crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
68
	unsigned int sg_count;
69
	gfp_t gfp;
70
	u64 len;
71 72
	int ret;

73 74 75
	len = (u64)rctx->buf_count + (u64)nbytes;

	if (!final && (len <= block_size)) {
76 77 78 79 80 81 82
		scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
					 0, nbytes, 0);
		rctx->buf_count += nbytes;

		return 0;
	}

83 84
	rctx->src = req->src;
	rctx->nbytes = nbytes;
85 86

	rctx->final = final;
87 88 89
	rctx->hash_rem = final ? 0 : len & (block_size - 1);
	rctx->hash_cnt = len - rctx->hash_rem;
	if (!final && !rctx->hash_rem) {
90 91 92 93 94 95 96 97 98
		/* CCP can't do zero length final, so keep some data around */
		rctx->hash_cnt -= block_size;
		rctx->hash_rem = block_size;
	}

	/* Initialize the context scatterlist */
	sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));

	sg = NULL;
99 100 101 102 103 104 105 106 107 108
	if (rctx->buf_count && nbytes) {
		/* Build the data scatterlist table - allocate enough entries
		 * for both data pieces (buffer and input data)
		 */
		gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
			GFP_KERNEL : GFP_ATOMIC;
		sg_count = sg_nents(req->src) + 1;
		ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
		if (ret)
			return ret;
109 110 111

		sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
		sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
112 113 114 115
		if (!sg) {
			ret = -EINVAL;
			goto e_free;
		}
116
		sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
117 118 119 120
		if (!sg) {
			ret = -EINVAL;
			goto e_free;
		}
121 122
		sg_mark_end(sg);

123 124 125 126 127 128 129 130 131
		sg = rctx->data_sg.sgl;
	} else if (rctx->buf_count) {
		sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);

		sg = &rctx->buf_sg;
	} else if (nbytes) {
		sg = req->src;
	}

132 133 134 135 136 137 138
	rctx->msg_bits += (rctx->hash_cnt << 3);	/* Total in bits */

	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
	INIT_LIST_HEAD(&rctx->cmd.entry);
	rctx->cmd.engine = CCP_ENGINE_SHA;
	rctx->cmd.u.sha.type = rctx->type;
	rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
139 140 141 142 143 144 145 146 147 148 149

	switch (rctx->type) {
	case CCP_SHA_TYPE_1:
		rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE;
		break;
	case CCP_SHA_TYPE_224:
		rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE;
		break;
	case CCP_SHA_TYPE_256:
		rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
		break;
150 151 152 153 154 155
	case CCP_SHA_TYPE_384:
		rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
		break;
	case CCP_SHA_TYPE_512:
		rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
		break;
156 157 158 159 160
	default:
		/* Should never get here */
		break;
	}

161
	rctx->cmd.u.sha.src = sg;
162
	rctx->cmd.u.sha.src_len = rctx->hash_cnt;
163 164 165 166 167
	rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
		&ctx->u.sha.opad_sg : NULL;
	rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
		ctx->u.sha.opad_count : 0;
	rctx->cmd.u.sha.first = rctx->first;
168 169 170 171 172 173 174
	rctx->cmd.u.sha.final = rctx->final;
	rctx->cmd.u.sha.msg_bits = rctx->msg_bits;

	rctx->first = 0;

	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);

175 176 177 178 179
	return ret;

e_free:
	sg_free_table(&rctx->data_sg);

180 181 182 183 184 185
	return ret;
}

static int ccp_sha_init(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
186
	struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
187 188 189
	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
	struct ccp_crypto_ahash_alg *alg =
		ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
190 191
	unsigned int block_size =
		crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
192 193 194 195 196 197

	memset(rctx, 0, sizeof(*rctx));

	rctx->type = alg->type;
	rctx->first = 1;

198 199 200 201 202 203
	if (ctx->u.sha.key_len) {
		/* Buffer the HMAC key for first update */
		memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
		rctx->buf_count = block_size;
	}

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	return 0;
}

static int ccp_sha_update(struct ahash_request *req)
{
	return ccp_do_sha_update(req, req->nbytes, 0);
}

static int ccp_sha_final(struct ahash_request *req)
{
	return ccp_do_sha_update(req, 0, 1);
}

static int ccp_sha_finup(struct ahash_request *req)
{
	return ccp_do_sha_update(req, req->nbytes, 1);
}

static int ccp_sha_digest(struct ahash_request *req)
{
224
	int ret;
225

226 227 228 229 230
	ret = ccp_sha_init(req);
	if (ret)
		return ret;

	return ccp_sha_finup(req);
231 232
}

233 234 235
static int ccp_sha_export(struct ahash_request *req, void *out)
{
	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
236
	struct ccp_sha_exp_ctx state;
237

238 239 240
	/* Don't let anything leak to 'out' */
	memset(&state, 0, sizeof(state));

241 242 243 244 245 246 247 248 249
	state.type = rctx->type;
	state.msg_bits = rctx->msg_bits;
	state.first = rctx->first;
	memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
	state.buf_count = rctx->buf_count;
	memcpy(state.buf, rctx->buf, sizeof(state.buf));

	/* 'out' may not be aligned so memcpy from local variable */
	memcpy(out, &state, sizeof(state));
250 251 252 253 254 255 256

	return 0;
}

static int ccp_sha_import(struct ahash_request *req, const void *in)
{
	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
257 258 259 260 261
	struct ccp_sha_exp_ctx state;

	/* 'in' may not be aligned so memcpy to local variable */
	memcpy(&state, in, sizeof(state));

262
	memset(rctx, 0, sizeof(*rctx));
263 264 265 266 267 268
	rctx->type = state.type;
	rctx->msg_bits = state.msg_bits;
	rctx->first = state.first;
	memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
	rctx->buf_count = state.buf_count;
	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
269 270 271 272

	return 0;
}

273 274 275 276
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
			  unsigned int key_len)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
277
	struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
278 279 280

	SHASH_DESC_ON_STACK(sdesc, shash);

281 282
	unsigned int block_size = crypto_shash_blocksize(shash);
	unsigned int digest_size = crypto_shash_digestsize(shash);
283 284 285 286 287 288 289 290 291 292 293 294
	int i, ret;

	/* Set to zero until complete */
	ctx->u.sha.key_len = 0;

	/* Clear key area to provide zero padding for keys smaller
	 * than the block size
	 */
	memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key));

	if (key_len > block_size) {
		/* Must hash the input key */
295 296
		sdesc->tfm = shash;
		sdesc->flags = crypto_ahash_get_flags(tfm) &
297 298
			CRYPTO_TFM_REQ_MAY_SLEEP;

299
		ret = crypto_shash_digest(sdesc, key, key_len,
300
					  ctx->u.sha.key);
301 302 303 304 305 306
		if (ret) {
			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
			return -EINVAL;
		}

		key_len = digest_size;
307
	} else {
308
		memcpy(ctx->u.sha.key, key, key_len);
309
	}
310 311

	for (i = 0; i < block_size; i++) {
312 313
		ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ HMAC_IPAD_VALUE;
		ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ HMAC_OPAD_VALUE;
314 315
	}

316 317 318
	sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
	ctx->u.sha.opad_count = block_size;

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	ctx->u.sha.key_len = key_len;

	return 0;
}

static int ccp_sha_cra_init(struct crypto_tfm *tfm)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);

	ctx->complete = ccp_sha_complete;
	ctx->u.sha.key_len = 0;

	crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));

	return 0;
}

static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
{
}

static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
	struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
345
	struct crypto_shash *hmac_tfm;
346

347
	hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0);
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	if (IS_ERR(hmac_tfm)) {
		pr_warn("could not load driver %s need for HMAC support\n",
			alg->child_alg);
		return PTR_ERR(hmac_tfm);
	}

	ctx->u.sha.hmac_tfm = hmac_tfm;

	return ccp_sha_cra_init(tfm);
}

static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);

	if (ctx->u.sha.hmac_tfm)
364
		crypto_free_shash(ctx->u.sha.hmac_tfm);
365 366 367 368 369

	ccp_sha_cra_exit(tfm);
}

struct ccp_sha_def {
370
	unsigned int version;
371 372 373 374 375 376 377 378 379
	const char *name;
	const char *drv_name;
	enum ccp_sha_type type;
	u32 digest_size;
	u32 block_size;
};

static struct ccp_sha_def sha_algs[] = {
	{
380
		.version	= CCP_VERSION(3, 0),
381 382 383 384 385 386 387
		.name		= "sha1",
		.drv_name	= "sha1-ccp",
		.type		= CCP_SHA_TYPE_1,
		.digest_size	= SHA1_DIGEST_SIZE,
		.block_size	= SHA1_BLOCK_SIZE,
	},
	{
388
		.version	= CCP_VERSION(3, 0),
389 390 391 392 393 394 395
		.name		= "sha224",
		.drv_name	= "sha224-ccp",
		.type		= CCP_SHA_TYPE_224,
		.digest_size	= SHA224_DIGEST_SIZE,
		.block_size	= SHA224_BLOCK_SIZE,
	},
	{
396
		.version	= CCP_VERSION(3, 0),
397 398 399 400 401 402
		.name		= "sha256",
		.drv_name	= "sha256-ccp",
		.type		= CCP_SHA_TYPE_256,
		.digest_size	= SHA256_DIGEST_SIZE,
		.block_size	= SHA256_BLOCK_SIZE,
	},
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	{
		.version	= CCP_VERSION(5, 0),
		.name		= "sha384",
		.drv_name	= "sha384-ccp",
		.type		= CCP_SHA_TYPE_384,
		.digest_size	= SHA384_DIGEST_SIZE,
		.block_size	= SHA384_BLOCK_SIZE,
	},
	{
		.version	= CCP_VERSION(5, 0),
		.name		= "sha512",
		.drv_name	= "sha512-ccp",
		.type		= CCP_SHA_TYPE_512,
		.digest_size	= SHA512_DIGEST_SIZE,
		.block_size	= SHA512_BLOCK_SIZE,
	},
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
};

static int ccp_register_hmac_alg(struct list_head *head,
				 const struct ccp_sha_def *def,
				 const struct ccp_crypto_ahash_alg *base_alg)
{
	struct ccp_crypto_ahash_alg *ccp_alg;
	struct ahash_alg *alg;
	struct hash_alg_common *halg;
	struct crypto_alg *base;
	int ret;

	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
	if (!ccp_alg)
		return -ENOMEM;

	/* Copy the base algorithm and only change what's necessary */
436
	*ccp_alg = *base_alg;
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	INIT_LIST_HEAD(&ccp_alg->entry);

	strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);

	alg = &ccp_alg->alg;
	alg->setkey = ccp_sha_setkey;

	halg = &alg->halg;

	base = &halg->base;
	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name);
	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s",
		 def->drv_name);
	base->cra_init = ccp_hmac_sha_cra_init;
	base->cra_exit = ccp_hmac_sha_cra_exit;

	ret = crypto_register_ahash(alg);
	if (ret) {
		pr_err("%s ahash algorithm registration error (%d)\n",
456
		       base->cra_name, ret);
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
		kfree(ccp_alg);
		return ret;
	}

	list_add(&ccp_alg->entry, head);

	return ret;
}

static int ccp_register_sha_alg(struct list_head *head,
				const struct ccp_sha_def *def)
{
	struct ccp_crypto_ahash_alg *ccp_alg;
	struct ahash_alg *alg;
	struct hash_alg_common *halg;
	struct crypto_alg *base;
	int ret;

	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
	if (!ccp_alg)
		return -ENOMEM;

	INIT_LIST_HEAD(&ccp_alg->entry);

	ccp_alg->type = def->type;

	alg = &ccp_alg->alg;
	alg->init = ccp_sha_init;
	alg->update = ccp_sha_update;
	alg->final = ccp_sha_final;
	alg->finup = ccp_sha_finup;
	alg->digest = ccp_sha_digest;
489 490
	alg->export = ccp_sha_export;
	alg->import = ccp_sha_import;
491 492 493

	halg = &alg->halg;
	halg->digestsize = def->digest_size;
494
	halg->statesize = sizeof(struct ccp_sha_exp_ctx);
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513

	base = &halg->base;
	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
		 def->drv_name);
	base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
			  CRYPTO_ALG_KERN_DRIVER_ONLY |
			  CRYPTO_ALG_NEED_FALLBACK;
	base->cra_blocksize = def->block_size;
	base->cra_ctxsize = sizeof(struct ccp_ctx);
	base->cra_priority = CCP_CRA_PRIORITY;
	base->cra_type = &crypto_ahash_type;
	base->cra_init = ccp_sha_cra_init;
	base->cra_exit = ccp_sha_cra_exit;
	base->cra_module = THIS_MODULE;

	ret = crypto_register_ahash(alg);
	if (ret) {
		pr_err("%s ahash algorithm registration error (%d)\n",
514
		       base->cra_name, ret);
515 516 517 518 519 520 521 522 523 524 525 526 527 528
		kfree(ccp_alg);
		return ret;
	}

	list_add(&ccp_alg->entry, head);

	ret = ccp_register_hmac_alg(head, def, ccp_alg);

	return ret;
}

int ccp_register_sha_algs(struct list_head *head)
{
	int i, ret;
529
	unsigned int ccpversion = ccp_version();
530 531

	for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
532 533
		if (sha_algs[i].version > ccpversion)
			continue;
534 535 536 537 538 539 540
		ret = ccp_register_sha_alg(head, &sha_algs[i]);
		if (ret)
			return ret;
	}

	return 0;
}