Commit b507146b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

   - bug in ahash SG list walking that may lead to crashes

   - resource leak in qat

   - missing RSA dependency that causes it to fail"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: rsa - select crypto mgr dependency
  crypto: hash - Fix page length clamping in hash walk
  crypto: qat - fix adf_ctl_drv.c:undefined reference to adf_init_pf_wq
  crypto: qat - fix invalid pf2vf_resp_wq logic
parents 26acc792 58446fef
...@@ -96,6 +96,7 @@ config CRYPTO_AKCIPHER ...@@ -96,6 +96,7 @@ config CRYPTO_AKCIPHER
config CRYPTO_RSA config CRYPTO_RSA
tristate "RSA algorithm" tristate "RSA algorithm"
select CRYPTO_AKCIPHER select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
select MPILIB select MPILIB
select ASN1 select ASN1
help help
......
...@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) ...@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
struct scatterlist *sg; struct scatterlist *sg;
sg = walk->sg; sg = walk->sg;
walk->pg = sg_page(sg);
walk->offset = sg->offset; walk->offset = sg->offset;
walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length; walk->entrylen = sg->length;
if (walk->entrylen > walk->total) if (walk->entrylen > walk->total)
......
...@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, ...@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask); uint32_t vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
#else #else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{ {
...@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) ...@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{ {
} }
static inline int adf_init_pf_wq(void)
{
return 0;
}
static inline void adf_exit_pf_wq(void)
{
}
#endif #endif
#endif #endif
...@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void) ...@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_aer()) if (adf_init_aer())
goto err_aer; goto err_aer;
if (adf_init_pf_wq())
goto err_pf_wq;
if (qat_crypto_register()) if (qat_crypto_register())
goto err_crypto_register; goto err_crypto_register;
return 0; return 0;
err_crypto_register: err_crypto_register:
adf_exit_pf_wq();
err_pf_wq:
adf_exit_aer(); adf_exit_aer();
err_aer: err_aer:
adf_chr_drv_destroy(); adf_chr_drv_destroy();
...@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void) ...@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
{ {
adf_chr_drv_destroy(); adf_chr_drv_destroy();
adf_exit_aer(); adf_exit_aer();
adf_exit_pf_wq();
qat_crypto_unregister(); qat_crypto_unregister();
adf_clean_vf_map(false); adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock); mutex_destroy(&adf_ctl_lock);
......
...@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) ...@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
int i; int i;
u32 reg; u32 reg;
/* Workqueue for PF2VF responses */
pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
if (!pf2vf_resp_wq)
return -ENOMEM;
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
i++, vf_info++) { i++, vf_info++) {
/* This ptr will be populated when VFs will be created */ /* This ptr will be populated when VFs will be created */
...@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) ...@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
kfree(accel_dev->pf.vf_info); kfree(accel_dev->pf.vf_info);
accel_dev->pf.vf_info = NULL; accel_dev->pf.vf_info = NULL;
if (pf2vf_resp_wq) {
destroy_workqueue(pf2vf_resp_wq);
pf2vf_resp_wq = NULL;
}
} }
EXPORT_SYMBOL_GPL(adf_disable_sriov); EXPORT_SYMBOL_GPL(adf_disable_sriov);
...@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) ...@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return numvfs; return numvfs;
} }
EXPORT_SYMBOL_GPL(adf_sriov_configure); EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
void adf_exit_pf_wq(void)
{
if (pf2vf_resp_wq) {
destroy_workqueue(pf2vf_resp_wq);
pf2vf_resp_wq = NULL;
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment