Commit 390944439f746824faec51b576f50cb5ef18745b
1 parent
360b7f3c60
Exists in
master
and in
7 other branches
EDAC: Fixup scrubrate manipulation
Make the ->{get|set}_sdram_scrub_rate return the actual scrub rate bandwidth it succeeded setting and remove superfluous arg pointer used for that. A negative value returned still means that an error occurred while setting the scrubrate. Document this for future reference. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Showing 7 changed files with 52 additions and 63 deletions Side-by-side Diff
drivers/edac/amd64_edac.c
... | ... | @@ -77,7 +77,11 @@ |
77 | 77 | *FIXME: Produce a better mapping/linearisation. |
78 | 78 | */ |
79 | 79 | |
80 | -struct scrubrate scrubrates[] = { | |
80 | + | |
81 | +struct scrubrate { | |
82 | + u32 scrubval; /* bit pattern for scrub rate */ | |
83 | + u32 bandwidth; /* bandwidth consumed (bytes/sec) */ | |
84 | +} scrubrates[] = { | |
81 | 85 | { 0x01, 1600000000UL}, |
82 | 86 | { 0x02, 800000000UL}, |
83 | 87 | { 0x03, 400000000UL}, |
84 | 88 | |
... | ... | @@ -151,14 +155,12 @@ |
151 | 155 | } |
152 | 156 | |
153 | 157 | scrubval = scrubrates[i].scrubval; |
154 | - if (scrubval) | |
155 | - amd64_info("Setting scrub rate bandwidth: %u\n", | |
156 | - scrubrates[i].bandwidth); | |
157 | - else | |
158 | - amd64_info("Turning scrubbing off.\n"); | |
159 | 158 | |
160 | 159 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); |
161 | 160 | |
161 | + if (scrubval) | |
162 | + return scrubrates[i].bandwidth; | |
163 | + | |
162 | 164 | return 0; |
163 | 165 | } |
164 | 166 | |
165 | 167 | |
... | ... | @@ -169,11 +171,11 @@ |
169 | 171 | return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate); |
170 | 172 | } |
171 | 173 | |
172 | -static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |
174 | +static int amd64_get_scrub_rate(struct mem_ctl_info *mci) | |
173 | 175 | { |
174 | 176 | struct amd64_pvt *pvt = mci->pvt_info; |
175 | 177 | u32 scrubval = 0; |
176 | - int status = -1, i; | |
178 | + int i, retval = -EINVAL; | |
177 | 179 | |
178 | 180 | amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); |
179 | 181 | |
180 | 182 | |
... | ... | @@ -183,13 +185,11 @@ |
183 | 185 | |
184 | 186 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
185 | 187 | if (scrubrates[i].scrubval == scrubval) { |
186 | - *bw = scrubrates[i].bandwidth; | |
187 | - status = 0; | |
188 | + retval = scrubrates[i].bandwidth; | |
188 | 189 | break; |
189 | 190 | } |
190 | 191 | } |
191 | - | |
192 | - return status; | |
192 | + return retval; | |
193 | 193 | } |
194 | 194 | |
195 | 195 | /* Map from a CSROW entry to the mask entry that operates on it */ |
drivers/edac/amd64_edac.h
... | ... | @@ -482,12 +482,6 @@ |
482 | 482 | } flags; |
483 | 483 | }; |
484 | 484 | |
485 | -struct scrubrate { | |
486 | - u32 scrubval; /* bit pattern for scrub rate */ | |
487 | - u32 bandwidth; /* bandwidth consumed (bytes/sec) */ | |
488 | -}; | |
489 | - | |
490 | -extern struct scrubrate scrubrates[23]; | |
491 | 485 | extern const char *tt_msgs[4]; |
492 | 486 | extern const char *ll_msgs[4]; |
493 | 487 | extern const char *rrrr_msgs[16]; |
drivers/edac/cpc925_edac.c
... | ... | @@ -818,9 +818,10 @@ |
818 | 818 | } |
819 | 819 | |
820 | 820 | /* Convert current back-ground scrub rate into byte/sec bandwith */ |
821 | -static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |
821 | +static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) | |
822 | 822 | { |
823 | 823 | struct cpc925_mc_pdata *pdata = mci->pvt_info; |
824 | + int bw; | |
824 | 825 | u32 mscr; |
825 | 826 | u8 si; |
826 | 827 | |
827 | 828 | |
828 | 829 | |
... | ... | @@ -832,11 +833,11 @@ |
832 | 833 | if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || |
833 | 834 | (si == 0)) { |
834 | 835 | cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); |
835 | - *bw = 0; | |
836 | + bw = 0; | |
836 | 837 | } else |
837 | - *bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; | |
838 | + bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; | |
838 | 839 | |
839 | - return 0; | |
840 | + return bw; | |
840 | 841 | } |
841 | 842 | |
842 | 843 | /* Return 0 for single channel; 1 for dual channel */ |
drivers/edac/e752x_edac.c
... | ... | @@ -983,11 +983,11 @@ |
983 | 983 | |
984 | 984 | pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); |
985 | 985 | |
986 | - return 0; | |
986 | + return scrubrates[i].bandwidth; | |
987 | 987 | } |
988 | 988 | |
989 | 989 | /* Convert current scrub rate value into byte/sec bandwidth */ |
990 | -static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |
990 | +static int get_sdram_scrub_rate(struct mem_ctl_info *mci) | |
991 | 991 | { |
992 | 992 | const struct scrubrate *scrubrates; |
993 | 993 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
994 | 994 | |
... | ... | @@ -1013,10 +1013,8 @@ |
1013 | 1013 | "Invalid sdram scrub control value: 0x%x\n", scrubval); |
1014 | 1014 | return -1; |
1015 | 1015 | } |
1016 | + return scrubrates[i].bandwidth; | |
1016 | 1017 | |
1017 | - *bw = scrubrates[i].bandwidth; | |
1018 | - | |
1019 | - return 0; | |
1020 | 1018 | } |
1021 | 1019 | |
1022 | 1020 | /* Return 1 if dual channel mode is active. Else return 0. */ |
drivers/edac/edac_core.h
... | ... | @@ -387,7 +387,7 @@ |
387 | 387 | representation and converts it to the closest matching |
388 | 388 | bandwith in bytes/sec. |
389 | 389 | */ |
390 | - int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); | |
390 | + int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); | |
391 | 391 | |
392 | 392 | |
393 | 393 | /* pointer to edac checking routine */ |
drivers/edac/edac_mc_sysfs.c
... | ... | @@ -436,56 +436,55 @@ |
436 | 436 | return count; |
437 | 437 | } |
438 | 438 | |
439 | -/* memory scrubbing */ | |
439 | +/* Memory scrubbing interface: | |
440 | + * | |
441 | + * A MC driver can limit the scrubbing bandwidth based on the CPU type. | |
442 | + * Therefore, ->set_sdram_scrub_rate should be made to return the actual | |
443 | + * bandwidth that is accepted or 0 when scrubbing is to be disabled. | |
444 | + * | |
445 | + * Negative value still means that an error has occurred while setting | |
446 | + * the scrub rate. | |
447 | + */ | |
440 | 448 | static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, |
441 | 449 | const char *data, size_t count) |
442 | 450 | { |
443 | 451 | unsigned long bandwidth = 0; |
444 | - int err; | |
452 | + int new_bw = 0; | |
445 | 453 | |
446 | - if (!mci->set_sdram_scrub_rate) { | |
447 | - edac_printk(KERN_WARNING, EDAC_MC, | |
448 | - "Memory scrub rate setting not implemented!\n"); | |
454 | + if (!mci->set_sdram_scrub_rate) | |
449 | 455 | return -EINVAL; |
450 | - } | |
451 | 456 | |
452 | 457 | if (strict_strtoul(data, 10, &bandwidth) < 0) |
453 | 458 | return -EINVAL; |
454 | 459 | |
455 | - err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth); | |
456 | - if (err) { | |
457 | - edac_printk(KERN_DEBUG, EDAC_MC, | |
458 | - "Failed setting scrub rate to %lu\n", bandwidth); | |
459 | - return -EINVAL; | |
460 | - } | |
461 | - else { | |
462 | - edac_printk(KERN_DEBUG, EDAC_MC, | |
463 | - "Scrub rate set to: %lu\n", bandwidth); | |
460 | + new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); | |
461 | + if (new_bw >= 0) { | |
462 | + edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw); | |
464 | 463 | return count; |
465 | 464 | } |
465 | + | |
466 | + edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth); | |
467 | + return -EINVAL; | |
466 | 468 | } |
467 | 469 | |
470 | +/* | |
471 | + * ->get_sdram_scrub_rate() return value semantics same as above. | |
472 | + */ | |
468 | 473 | static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) |
469 | 474 | { |
470 | - u32 bandwidth = 0; | |
471 | - int err; | |
475 | + int bandwidth = 0; | |
472 | 476 | |
473 | - if (!mci->get_sdram_scrub_rate) { | |
474 | - edac_printk(KERN_WARNING, EDAC_MC, | |
475 | - "Memory scrub rate reading not implemented\n"); | |
477 | + if (!mci->get_sdram_scrub_rate) | |
476 | 478 | return -EINVAL; |
477 | - } | |
478 | 479 | |
479 | - err = mci->get_sdram_scrub_rate(mci, &bandwidth); | |
480 | - if (err) { | |
480 | + bandwidth = mci->get_sdram_scrub_rate(mci); | |
481 | + if (bandwidth < 0) { | |
481 | 482 | edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); |
482 | - return err; | |
483 | + return bandwidth; | |
483 | 484 | } |
484 | - else { | |
485 | - edac_printk(KERN_DEBUG, EDAC_MC, | |
486 | - "Read scrub rate: %d\n", bandwidth); | |
487 | - return sprintf(data, "%d\n", bandwidth); | |
488 | - } | |
485 | + | |
486 | + edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth); | |
487 | + return sprintf(data, "%d\n", bandwidth); | |
489 | 488 | } |
490 | 489 | |
491 | 490 | /* default attribute files for the MCI object */ |
drivers/edac/i5100_edac.c
... | ... | @@ -611,20 +611,17 @@ |
611 | 611 | |
612 | 612 | bandwidth = 5900000 * i5100_mc_scrben(dw); |
613 | 613 | |
614 | - return 0; | |
614 | + return bandwidth; | |
615 | 615 | } |
616 | 616 | |
617 | -static int i5100_get_scrub_rate(struct mem_ctl_info *mci, | |
618 | - u32 *bandwidth) | |
617 | +static int i5100_get_scrub_rate(struct mem_ctl_info *mci) | |
619 | 618 | { |
620 | 619 | struct i5100_priv *priv = mci->pvt_info; |
621 | 620 | u32 dw; |
622 | 621 | |
623 | 622 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
624 | 623 | |
625 | - *bandwidth = 5900000 * i5100_mc_scrben(dw); | |
626 | - | |
627 | - return 0; | |
624 | + return 5900000 * i5100_mc_scrben(dw); | |
628 | 625 | } |
629 | 626 | |
630 | 627 | static struct pci_dev *pci_get_device_func(unsigned vendor, |