1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Asynchronous Compression operations
*
* Copyright (c) 2016, Intel Corporation
* Authors: Weigang Li <weigang.li@intel.com>
* Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
#ifndef _CRYPTO_ACOMP_INT_H
#define _CRYPTO_ACOMP_INT_H
#include <crypto/acompress.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/compiler_types.h>
#include <linux/cpumask_types.h>
#include <linux/spinlock.h>
#include <linux/workqueue_types.h>
#define ACOMP_FBREQ_ON_STACK(name, req) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
struct acomp_req *name = acomp_fbreq_on_stack_init( \
__##name##_req, (req))
/**
* struct acomp_alg - asynchronous compression algorithm
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @init: Initialize the cryptographic transformation object.
* This function is used to initialize the cryptographic
* transformation object. This function is called only once at
* the instantiation time, right after the transformation context
* was allocated. In case the cryptographic hardware has some
* special requirements which need to be handled by software, this
* function shall check for the precise requirement of the
* transformation and put any software fallbacks in place.
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
*
* @base: Common crypto API algorithm data structure
* @calg: Cmonn algorithm data structure shared with scomp
*/
struct acomp_alg {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
int (*init)(struct crypto_acomp *tfm);
void (*exit)(struct crypto_acomp *tfm);
union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
};
struct crypto_acomp_stream {
spinlock_t lock;
void *ctx;
};
struct crypto_acomp_streams {
/* These must come first because of struct scomp_alg. */
void *(*alloc_ctx)(void);
void (*free_ctx)(void *);
struct crypto_acomp_stream __percpu *streams;
struct work_struct stream_work;
cpumask_t stream_want;
};
struct acomp_walk {
union {
/* Virtual address of the source. */
struct {
struct {
const void *const addr;
} virt;
} src;
/* Private field for the API, do not use. */
struct scatter_walk in;
};
union {
/* Virtual address of the destination. */
struct {
struct {
void *const addr;
} virt;
} dst;
/* Private field for the API, do not use. */
struct scatter_walk out;
};
unsigned int slen;
unsigned int dlen;
int flags;
};
/*
* Transform internal helpers.
*/
static inline void *acomp_request_ctx(struct acomp_req *req)
{
return req->__ctx;
}
static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
{
return tfm->base.__crt_ctx;
}
static inline void acomp_request_complete(struct acomp_req *req,
int err)
{
crypto_request_complete(&req->base, err);
}
/**
* crypto_register_acomp() -- Register asynchronous compression algorithm
*
* Function registers an implementation of an asynchronous
* compression algorithm
*
* @alg: algorithm definition
*
* Return: zero on success; error code in case of error
*/
int crypto_register_acomp(struct acomp_alg *alg);
/**
* crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
*
* Function unregisters an implementation of an asynchronous
* compression algorithm
*
* @alg: algorithm definition
*/
void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
static inline bool acomp_request_issg(struct acomp_req *req)
{
return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
CRYPTO_ACOMP_REQ_DST_VIRT));
}
static inline bool acomp_request_src_isvirt(struct acomp_req *req)
{
return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
}
static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
{
return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
}
static inline bool acomp_request_isvirt(struct acomp_req *req)
{
return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
CRYPTO_ACOMP_REQ_DST_VIRT);
}
static inline bool acomp_request_src_isnondma(struct acomp_req *req)
{
return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
}
static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
{
return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
}
static inline bool acomp_request_isnondma(struct acomp_req *req)
{
return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
CRYPTO_ACOMP_REQ_DST_NONDMA);
}
static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
{
return crypto_tfm_req_virt(&tfm->base);
}
void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
struct crypto_acomp_streams *s) __acquires(stream);
static inline void crypto_acomp_unlock_stream_bh(
struct crypto_acomp_stream *stream) __releases(stream)
{
spin_unlock_bh(&stream->lock);
}
void acomp_walk_done_src(struct acomp_walk *walk, int used);
void acomp_walk_done_dst(struct acomp_walk *walk, int used);
int acomp_walk_next_src(struct acomp_walk *walk);
int acomp_walk_next_dst(struct acomp_walk *walk);
int acomp_walk_virt(struct acomp_walk *__restrict walk,
struct acomp_req *__restrict req, bool atomic);
static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
{
return walk->slen != cur;
}
static inline u32 acomp_request_flags(struct acomp_req *req)
{
return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
}
static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
{
return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
}
static inline struct acomp_req *acomp_fbreq_on_stack_init(
char *buf, struct acomp_req *old)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
struct acomp_req *req = (void *)buf;
crypto_stack_request_init(&req->base,
crypto_acomp_tfm(crypto_acomp_fb(tfm)));
acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
req->src = old->src;
req->dst = old->dst;
req->slen = old->slen;
req->dlen = old->dlen;
return req;
}
#endif
|