Commit d4c2456a1888d7914502f4237b2ca4e262dd4c4d
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt: "Some more P8 related bits, a bunch of fixes for our P7+/P8 HW crypto drivers, some added workarounds for those radeons that don't do proper 64-bit MSIs and a couple of other trivialities by myself." * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc/pseries: Make 32-bit MSI quirk work on systems lacking firmware support powerpc/powernv: Build a zImage.epapr powerpc: Make radeon 32-bit MSI quirk work on powernv powerpc: Context switch more PMU related SPRs powerpc/powernv: Fix condition for when to invalidate the TCE cache powerpc/pci: Fix bogus message at boot about empty memory resources powerpc: Fix TLB cleanup at boot on POWER8 drivers/crypto/nx: Fixes for multiple races and issues
Showing 18 changed files Side-by-side Diff
- arch/powerpc/include/asm/pci-bridge.h
- arch/powerpc/include/asm/processor.h
- arch/powerpc/kernel/asm-offsets.c
- arch/powerpc/kernel/cpu_setup_power.S
- arch/powerpc/kernel/entry_64.S
- arch/powerpc/kernel/pci-common.c
- arch/powerpc/kernel/pci_64.c
- arch/powerpc/kernel/pci_dn.c
- arch/powerpc/platforms/powernv/Kconfig
- arch/powerpc/platforms/powernv/pci-ioda.c
- arch/powerpc/platforms/powernv/pci.c
- arch/powerpc/platforms/pseries/msi.c
- drivers/crypto/nx/nx-aes-cbc.c
- drivers/crypto/nx/nx-aes-ecb.c
- drivers/crypto/nx/nx-aes-gcm.c
- drivers/crypto/nx/nx-sha256.c
- drivers/crypto/nx/nx-sha512.c
- drivers/crypto/nx/nx.c
arch/powerpc/include/asm/pci-bridge.h
... | ... | @@ -174,6 +174,8 @@ |
174 | 174 | /* Get the pointer to a device_node's pci_dn */ |
175 | 175 | #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) |
176 | 176 | |
177 | +extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev); | |
178 | + | |
177 | 179 | extern void * update_dn_pci_info(struct device_node *dn, void *data); |
178 | 180 | |
179 | 181 | static inline int pci_device_from_OF_node(struct device_node *np, |
arch/powerpc/include/asm/processor.h
... | ... | @@ -284,6 +284,12 @@ |
284 | 284 | unsigned long ebbrr; |
285 | 285 | unsigned long ebbhr; |
286 | 286 | unsigned long bescr; |
287 | + unsigned long siar; | |
288 | + unsigned long sdar; | |
289 | + unsigned long sier; | |
290 | + unsigned long mmcr0; | |
291 | + unsigned long mmcr2; | |
292 | + unsigned long mmcra; | |
287 | 293 | #endif |
288 | 294 | }; |
289 | 295 |
arch/powerpc/kernel/asm-offsets.c
... | ... | @@ -127,6 +127,12 @@ |
127 | 127 | DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr)); |
128 | 128 | DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr)); |
129 | 129 | DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr)); |
130 | + DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar)); | |
131 | + DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar)); | |
132 | + DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier)); | |
133 | + DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0)); | |
134 | + DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2)); | |
135 | + DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra)); | |
130 | 136 | #endif |
131 | 137 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
132 | 138 | DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); |
arch/powerpc/kernel/cpu_setup_power.S
... | ... | @@ -135,8 +135,12 @@ |
135 | 135 | blr |
136 | 136 | |
137 | 137 | __init_TLB: |
138 | - /* Clear the TLB */ | |
139 | - li r6,128 | |
138 | + /* | |
139 | + * Clear the TLB using the "IS 3" form of tlbiel instruction | |
140 | + * (invalidate by congruence class). P7 has 128 CCs, P8 has 512 | |
141 | + * so we just always do 512 | |
142 | + */ | |
143 | + li r6,512 | |
140 | 144 | mtctr r6 |
141 | 145 | li r7,0xc00 /* IS field = 0b11 */ |
142 | 146 | ptesync |
arch/powerpc/kernel/entry_64.S
... | ... | @@ -465,6 +465,20 @@ |
465 | 465 | std r0, THREAD_EBBHR(r3) |
466 | 466 | mfspr r0, SPRN_EBBRR |
467 | 467 | std r0, THREAD_EBBRR(r3) |
468 | + | |
469 | + /* PMU registers made user read/(write) by EBB */ | |
470 | + mfspr r0, SPRN_SIAR | |
471 | + std r0, THREAD_SIAR(r3) | |
472 | + mfspr r0, SPRN_SDAR | |
473 | + std r0, THREAD_SDAR(r3) | |
474 | + mfspr r0, SPRN_SIER | |
475 | + std r0, THREAD_SIER(r3) | |
476 | + mfspr r0, SPRN_MMCR0 | |
477 | + std r0, THREAD_MMCR0(r3) | |
478 | + mfspr r0, SPRN_MMCR2 | |
479 | + std r0, THREAD_MMCR2(r3) | |
480 | + mfspr r0, SPRN_MMCRA | |
481 | + std r0, THREAD_MMCRA(r3) | |
468 | 482 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
469 | 483 | #endif |
470 | 484 | |
... | ... | @@ -559,6 +573,20 @@ |
559 | 573 | mtspr SPRN_EBBHR, r0 |
560 | 574 | ld r0, THREAD_EBBRR(r4) |
561 | 575 | mtspr SPRN_EBBRR, r0 |
576 | + | |
577 | + /* PMU registers made user read/(write) by EBB */ | |
578 | + ld r0, THREAD_SIAR(r4) | |
579 | + mtspr SPRN_SIAR, r0 | |
580 | + ld r0, THREAD_SDAR(r4) | |
581 | + mtspr SPRN_SDAR, r0 | |
582 | + ld r0, THREAD_SIER(r4) | |
583 | + mtspr SPRN_SIER, r0 | |
584 | + ld r0, THREAD_MMCR0(r4) | |
585 | + mtspr SPRN_MMCR0, r0 | |
586 | + ld r0, THREAD_MMCR2(r4) | |
587 | + mtspr SPRN_MMCR2, r0 | |
588 | + ld r0, THREAD_MMCRA(r4) | |
589 | + mtspr SPRN_MMCRA, r0 | |
562 | 590 | |
563 | 591 | ld r0,THREAD_TAR(r4) |
564 | 592 | mtspr SPRN_TAR,r0 |
arch/powerpc/kernel/pci-common.c
... | ... | @@ -1520,9 +1520,10 @@ |
1520 | 1520 | for (i = 0; i < 3; ++i) { |
1521 | 1521 | res = &hose->mem_resources[i]; |
1522 | 1522 | if (!res->flags) { |
1523 | - printk(KERN_ERR "PCI: Memory resource 0 not set for " | |
1524 | - "host bridge %s (domain %d)\n", | |
1525 | - hose->dn->full_name, hose->global_number); | |
1523 | + if (i == 0) | |
1524 | + printk(KERN_ERR "PCI: Memory resource 0 not set for " | |
1525 | + "host bridge %s (domain %d)\n", | |
1526 | + hose->dn->full_name, hose->global_number); | |
1526 | 1527 | continue; |
1527 | 1528 | } |
1528 | 1529 | offset = hose->mem_offset[i]; |
arch/powerpc/kernel/pci_64.c
... | ... | @@ -266,4 +266,14 @@ |
266 | 266 | } |
267 | 267 | EXPORT_SYMBOL(pcibus_to_node); |
268 | 268 | #endif |
269 | + | |
270 | +static void quirk_radeon_32bit_msi(struct pci_dev *dev) | |
271 | +{ | |
272 | + struct pci_dn *pdn = pci_get_pdn(dev); | |
273 | + | |
274 | + if (pdn) | |
275 | + pdn->force_32bit_msi = 1; | |
276 | +} | |
277 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi); | |
278 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi); |
arch/powerpc/kernel/pci_dn.c
... | ... | @@ -32,6 +32,14 @@ |
32 | 32 | #include <asm/ppc-pci.h> |
33 | 33 | #include <asm/firmware.h> |
34 | 34 | |
35 | +struct pci_dn *pci_get_pdn(struct pci_dev *pdev) | |
36 | +{ | |
37 | + struct device_node *dn = pci_device_to_OF_node(pdev); | |
38 | + if (!dn) | |
39 | + return NULL; | |
40 | + return PCI_DN(dn); | |
41 | +} | |
42 | + | |
35 | 43 | /* |
36 | 44 | * Traverse_func that inits the PCI fields of the device node. |
37 | 45 | * NOTE: this *must* be done before read/write config to the device. |
arch/powerpc/platforms/powernv/Kconfig
arch/powerpc/platforms/powernv/pci-ioda.c
... | ... | @@ -68,16 +68,6 @@ |
68 | 68 | define_pe_printk_level(pe_warn, KERN_WARNING); |
69 | 69 | define_pe_printk_level(pe_info, KERN_INFO); |
70 | 70 | |
71 | -static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) | |
72 | -{ | |
73 | - struct device_node *np; | |
74 | - | |
75 | - np = pci_device_to_OF_node(dev); | |
76 | - if (!np) | |
77 | - return NULL; | |
78 | - return PCI_DN(np); | |
79 | -} | |
80 | - | |
81 | 71 | static int pnv_ioda_alloc_pe(struct pnv_phb *phb) |
82 | 72 | { |
83 | 73 | unsigned long pe; |
... | ... | @@ -110,7 +100,7 @@ |
110 | 100 | { |
111 | 101 | struct pci_controller *hose = pci_bus_to_host(dev->bus); |
112 | 102 | struct pnv_phb *phb = hose->private_data; |
113 | - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); | |
103 | + struct pci_dn *pdn = pci_get_pdn(dev); | |
114 | 104 | |
115 | 105 | if (!pdn) |
116 | 106 | return NULL; |
... | ... | @@ -173,7 +163,7 @@ |
173 | 163 | |
174 | 164 | /* Add to all parents PELT-V */ |
175 | 165 | while (parent) { |
176 | - struct pci_dn *pdn = pnv_ioda_get_pdn(parent); | |
166 | + struct pci_dn *pdn = pci_get_pdn(parent); | |
177 | 167 | if (pdn && pdn->pe_number != IODA_INVALID_PE) { |
178 | 168 | rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, |
179 | 169 | pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); |
... | ... | @@ -252,7 +242,7 @@ |
252 | 242 | { |
253 | 243 | struct pci_controller *hose = pci_bus_to_host(dev->bus); |
254 | 244 | struct pnv_phb *phb = hose->private_data; |
255 | - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); | |
245 | + struct pci_dn *pdn = pci_get_pdn(dev); | |
256 | 246 | struct pnv_ioda_pe *pe; |
257 | 247 | int pe_num; |
258 | 248 | |
... | ... | @@ -323,7 +313,7 @@ |
323 | 313 | struct pci_dev *dev; |
324 | 314 | |
325 | 315 | list_for_each_entry(dev, &bus->devices, bus_list) { |
326 | - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); | |
316 | + struct pci_dn *pdn = pci_get_pdn(dev); | |
327 | 317 | |
328 | 318 | if (pdn == NULL) { |
329 | 319 | pr_warn("%s: No device node associated with device !\n", |
... | ... | @@ -436,7 +426,7 @@ |
436 | 426 | |
437 | 427 | static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) |
438 | 428 | { |
439 | - struct pci_dn *pdn = pnv_ioda_get_pdn(pdev); | |
429 | + struct pci_dn *pdn = pci_get_pdn(pdev); | |
440 | 430 | struct pnv_ioda_pe *pe; |
441 | 431 | |
442 | 432 | /* |
... | ... | @@ -768,6 +758,7 @@ |
768 | 758 | unsigned int is_64, struct msi_msg *msg) |
769 | 759 | { |
770 | 760 | struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); |
761 | + struct pci_dn *pdn = pci_get_pdn(dev); | |
771 | 762 | struct irq_data *idata; |
772 | 763 | struct irq_chip *ichip; |
773 | 764 | unsigned int xive_num = hwirq - phb->msi_base; |
... | ... | @@ -783,6 +774,10 @@ |
783 | 774 | if (pe->mve_number < 0) |
784 | 775 | return -ENXIO; |
785 | 776 | |
777 | + /* Force 32-bit MSI on some broken devices */ | |
778 | + if (pdn && pdn->force_32bit_msi) | |
779 | + is_64 = 0; | |
780 | + | |
786 | 781 | /* Assign XIVE to PE */ |
787 | 782 | rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); |
788 | 783 | if (rc) { |
... | ... | @@ -1035,7 +1030,7 @@ |
1035 | 1030 | if (!phb->initialized) |
1036 | 1031 | return 0; |
1037 | 1032 | |
1038 | - pdn = pnv_ioda_get_pdn(dev); | |
1033 | + pdn = pci_get_pdn(dev); | |
1039 | 1034 | if (!pdn || pdn->pe_number == IODA_INVALID_PE) |
1040 | 1035 | return -EINVAL; |
1041 | 1036 |
arch/powerpc/platforms/powernv/pci.c
... | ... | @@ -47,7 +47,11 @@ |
47 | 47 | { |
48 | 48 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
49 | 49 | struct pnv_phb *phb = hose->private_data; |
50 | + struct pci_dn *pdn = pci_get_pdn(pdev); | |
50 | 51 | |
52 | + if (pdn && pdn->force_32bit_msi && !phb->msi32_support) | |
53 | + return -ENODEV; | |
54 | + | |
51 | 55 | return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; |
52 | 56 | } |
53 | 57 | |
... | ... | @@ -367,7 +371,7 @@ |
367 | 371 | while (npages--) |
368 | 372 | *(tcep++) = 0; |
369 | 373 | |
370 | - if (tbl->it_type & TCE_PCI_SWINV_CREATE) | |
374 | + if (tbl->it_type & TCE_PCI_SWINV_FREE) | |
371 | 375 | pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); |
372 | 376 | } |
373 | 377 |
arch/powerpc/platforms/pseries/msi.c
... | ... | @@ -26,26 +26,6 @@ |
26 | 26 | #define RTAS_CHANGE_MSIX_FN 4 |
27 | 27 | #define RTAS_CHANGE_32MSI_FN 5 |
28 | 28 | |
29 | -static struct pci_dn *get_pdn(struct pci_dev *pdev) | |
30 | -{ | |
31 | - struct device_node *dn; | |
32 | - struct pci_dn *pdn; | |
33 | - | |
34 | - dn = pci_device_to_OF_node(pdev); | |
35 | - if (!dn) { | |
36 | - dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n"); | |
37 | - return NULL; | |
38 | - } | |
39 | - | |
40 | - pdn = PCI_DN(dn); | |
41 | - if (!pdn) { | |
42 | - dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n"); | |
43 | - return NULL; | |
44 | - } | |
45 | - | |
46 | - return pdn; | |
47 | -} | |
48 | - | |
49 | 29 | /* RTAS Helpers */ |
50 | 30 | |
51 | 31 | static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs) |
... | ... | @@ -91,7 +71,7 @@ |
91 | 71 | { |
92 | 72 | struct pci_dn *pdn; |
93 | 73 | |
94 | - pdn = get_pdn(pdev); | |
74 | + pdn = pci_get_pdn(pdev); | |
95 | 75 | if (!pdn) |
96 | 76 | return; |
97 | 77 | |
... | ... | @@ -152,7 +132,7 @@ |
152 | 132 | struct pci_dn *pdn; |
153 | 133 | const u32 *req_msi; |
154 | 134 | |
155 | - pdn = get_pdn(pdev); | |
135 | + pdn = pci_get_pdn(pdev); | |
156 | 136 | if (!pdn) |
157 | 137 | return -ENODEV; |
158 | 138 | |
... | ... | @@ -394,6 +374,23 @@ |
394 | 374 | return 0; |
395 | 375 | } |
396 | 376 | |
377 | +static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev) | |
378 | +{ | |
379 | + u32 addr_hi, addr_lo; | |
380 | + | |
381 | + /* | |
382 | + * We should only get in here for IODA1 configs. This is based on the | |
383 | + * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS | |
384 | + * support, and we are in a PCIe Gen2 slot. | |
385 | + */ | |
386 | + dev_info(&pdev->dev, | |
387 | + "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n"); | |
388 | + pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi); | |
389 | + addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4); | |
390 | + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo); | |
391 | + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0); | |
392 | +} | |
393 | + | |
397 | 394 | static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) |
398 | 395 | { |
399 | 396 | struct pci_dn *pdn; |
400 | 397 | |
... | ... | @@ -401,8 +398,9 @@ |
401 | 398 | struct msi_desc *entry; |
402 | 399 | struct msi_msg msg; |
403 | 400 | int nvec = nvec_in; |
401 | + int use_32bit_msi_hack = 0; | |
404 | 402 | |
405 | - pdn = get_pdn(pdev); | |
403 | + pdn = pci_get_pdn(pdev); | |
406 | 404 | if (!pdn) |
407 | 405 | return -ENODEV; |
408 | 406 | |
409 | 407 | |
410 | 408 | |
411 | 409 | |
... | ... | @@ -428,15 +426,31 @@ |
428 | 426 | */ |
429 | 427 | again: |
430 | 428 | if (type == PCI_CAP_ID_MSI) { |
431 | - if (pdn->force_32bit_msi) | |
429 | + if (pdn->force_32bit_msi) { | |
432 | 430 | rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); |
433 | - else | |
431 | + if (rc < 0) { | |
432 | + /* | |
433 | + * We only want to run the 32 bit MSI hack below if | |
434 | + * the max bus speed is Gen2 speed | |
435 | + */ | |
436 | + if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) | |
437 | + return rc; | |
438 | + | |
439 | + use_32bit_msi_hack = 1; | |
440 | + } | |
441 | + } else | |
442 | + rc = -1; | |
443 | + | |
444 | + if (rc < 0) | |
434 | 445 | rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); |
435 | 446 | |
436 | - if (rc < 0 && !pdn->force_32bit_msi) { | |
447 | + if (rc < 0) { | |
437 | 448 | pr_debug("rtas_msi: trying the old firmware call.\n"); |
438 | 449 | rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec); |
439 | 450 | } |
451 | + | |
452 | + if (use_32bit_msi_hack && rc > 0) | |
453 | + rtas_hack_32bit_msi_gen2(pdev); | |
440 | 454 | } else |
441 | 455 | rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); |
442 | 456 | |
... | ... | @@ -517,14 +531,4 @@ |
517 | 531 | return 0; |
518 | 532 | } |
519 | 533 | arch_initcall(rtas_msi_init); |
520 | - | |
521 | -static void quirk_radeon(struct pci_dev *dev) | |
522 | -{ | |
523 | - struct pci_dn *pdn = get_pdn(dev); | |
524 | - | |
525 | - if (pdn) | |
526 | - pdn->force_32bit_msi = 1; | |
527 | -} | |
528 | -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon); | |
529 | -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon); |
drivers/crypto/nx/nx-aes-cbc.c
... | ... | @@ -126,6 +126,7 @@ |
126 | 126 | .cra_blocksize = AES_BLOCK_SIZE, |
127 | 127 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
128 | 128 | .cra_type = &crypto_blkcipher_type, |
129 | + .cra_alignmask = 0xf, | |
129 | 130 | .cra_module = THIS_MODULE, |
130 | 131 | .cra_init = nx_crypto_ctx_aes_cbc_init, |
131 | 132 | .cra_exit = nx_crypto_ctx_exit, |
drivers/crypto/nx/nx-aes-ecb.c
... | ... | @@ -123,6 +123,7 @@ |
123 | 123 | .cra_priority = 300, |
124 | 124 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
125 | 125 | .cra_blocksize = AES_BLOCK_SIZE, |
126 | + .cra_alignmask = 0xf, | |
126 | 127 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
127 | 128 | .cra_type = &crypto_blkcipher_type, |
128 | 129 | .cra_module = THIS_MODULE, |
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx-sha256.c
... | ... | @@ -69,7 +69,7 @@ |
69 | 69 | * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 |
70 | 70 | * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover |
71 | 71 | */ |
72 | - if (len + sctx->count <= SHA256_BLOCK_SIZE) { | |
72 | + if (len + sctx->count < SHA256_BLOCK_SIZE) { | |
73 | 73 | memcpy(sctx->buf + sctx->count, data, len); |
74 | 74 | sctx->count += len; |
75 | 75 | goto out; |
... | ... | @@ -110,7 +110,8 @@ |
110 | 110 | atomic_inc(&(nx_ctx->stats->sha256_ops)); |
111 | 111 | |
112 | 112 | /* copy the leftover back into the state struct */ |
113 | - memcpy(sctx->buf, data + len - leftover, leftover); | |
113 | + if (leftover) | |
114 | + memcpy(sctx->buf, data + len - leftover, leftover); | |
114 | 115 | sctx->count = leftover; |
115 | 116 | |
116 | 117 | csbcpb->cpb.sha256.message_bit_length += (u64) |
... | ... | @@ -130,6 +131,7 @@ |
130 | 131 | struct nx_sg *in_sg, *out_sg; |
131 | 132 | int rc; |
132 | 133 | |
134 | + | |
133 | 135 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { |
134 | 136 | /* we've hit the nx chip previously, now we're finalizing, |
135 | 137 | * so copy over the partial digest */ |
... | ... | @@ -162,7 +164,7 @@ |
162 | 164 | |
163 | 165 | atomic_inc(&(nx_ctx->stats->sha256_ops)); |
164 | 166 | |
165 | - atomic64_add(csbcpb->cpb.sha256.message_bit_length, | |
167 | + atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, | |
166 | 168 | &(nx_ctx->stats->sha256_bytes)); |
167 | 169 | memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); |
168 | 170 | out: |
drivers/crypto/nx/nx-sha512.c
... | ... | @@ -69,7 +69,7 @@ |
69 | 69 | * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 |
70 | 70 | * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover |
71 | 71 | */ |
72 | - if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) { | |
72 | + if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { | |
73 | 73 | memcpy(sctx->buf + sctx->count[0], data, len); |
74 | 74 | sctx->count[0] += len; |
75 | 75 | goto out; |
... | ... | @@ -110,7 +110,8 @@ |
110 | 110 | atomic_inc(&(nx_ctx->stats->sha512_ops)); |
111 | 111 | |
112 | 112 | /* copy the leftover back into the state struct */ |
113 | - memcpy(sctx->buf, data + len - leftover, leftover); | |
113 | + if (leftover) | |
114 | + memcpy(sctx->buf, data + len - leftover, leftover); | |
114 | 115 | sctx->count[0] = leftover; |
115 | 116 | |
116 | 117 | spbc_bits = csbcpb->cpb.sha512.spbc * 8; |
... | ... | @@ -168,7 +169,7 @@ |
168 | 169 | goto out; |
169 | 170 | |
170 | 171 | atomic_inc(&(nx_ctx->stats->sha512_ops)); |
171 | - atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo, | |
172 | + atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, | |
172 | 173 | &(nx_ctx->stats->sha512_bytes)); |
173 | 174 | |
174 | 175 | memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); |
drivers/crypto/nx/nx.c
... | ... | @@ -211,44 +211,20 @@ |
211 | 211 | { |
212 | 212 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
213 | 213 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
214 | - struct blkcipher_walk walk; | |
215 | - int rc; | |
216 | 214 | |
217 | - blkcipher_walk_init(&walk, dst, src, nbytes); | |
218 | - rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | |
219 | - if (rc) | |
220 | - goto out; | |
221 | - | |
222 | 215 | if (iv) |
223 | - memcpy(iv, walk.iv, AES_BLOCK_SIZE); | |
216 | + memcpy(iv, desc->info, AES_BLOCK_SIZE); | |
224 | 217 | |
225 | - while (walk.nbytes) { | |
226 | - nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, | |
227 | - walk.nbytes, nx_ctx->ap->sglen); | |
228 | - nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, | |
229 | - walk.nbytes, nx_ctx->ap->sglen); | |
218 | + nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); | |
219 | + nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); | |
230 | 220 | |
231 | - rc = blkcipher_walk_done(desc, &walk, 0); | |
232 | - if (rc) | |
233 | - break; | |
234 | - } | |
235 | - | |
236 | - if (walk.nbytes) { | |
237 | - nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, | |
238 | - walk.nbytes, nx_ctx->ap->sglen); | |
239 | - nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, | |
240 | - walk.nbytes, nx_ctx->ap->sglen); | |
241 | - | |
242 | - rc = 0; | |
243 | - } | |
244 | - | |
245 | 221 | /* these lengths should be negative, which will indicate to phyp that |
246 | 222 | * the input and output parameters are scatterlists, not linear |
247 | 223 | * buffers */ |
248 | 224 | nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); |
249 | 225 | nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); |
250 | -out: | |
251 | - return rc; | |
226 | + | |
227 | + return 0; | |
252 | 228 | } |
253 | 229 | |
254 | 230 | /** |
... | ... | @@ -454,6 +430,8 @@ |
454 | 430 | if (rc) |
455 | 431 | goto out; |
456 | 432 | |
433 | + nx_driver.of.status = NX_OKAY; | |
434 | + | |
457 | 435 | rc = crypto_register_alg(&nx_ecb_aes_alg); |
458 | 436 | if (rc) |
459 | 437 | goto out; |
... | ... | @@ -497,8 +475,6 @@ |
497 | 475 | rc = crypto_register_shash(&nx_shash_aes_xcbc_alg); |
498 | 476 | if (rc) |
499 | 477 | goto out_unreg_s512; |
500 | - | |
501 | - nx_driver.of.status = NX_OKAY; | |
502 | 478 | |
503 | 479 | goto out; |
504 | 480 |