Commit 1318952514d5651c453d89989595a9df3b37267b
Committed by
Linus Torvalds
1 parent
2d7bbb91c8
Exists in
master
and in
7 other branches
[PATCH] EDAC: probe1 cleanup 1-of-2
- Add lower-level functions that handle various parts of the initialization done by the xxx_probe1() functions. Some of the xxx_probe1() functions are much too long and complicated (see "Chapter 5: Functions" in Documentation/CodingStyle). - Cleanup of probe1() functions in EDAC Signed-off-by: Doug Thompson <norsk5@xmission.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 6 changed files with 518 additions and 444 deletions Side-by-side Diff
drivers/edac/amd76x_edac.c
... | ... | @@ -182,6 +182,38 @@ |
182 | 182 | amd76x_process_error_info(mci, &info, 1); |
183 | 183 | } |
184 | 184 | |
185 | +static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |
186 | + enum edac_type edac_mode) | |
187 | +{ | |
188 | + struct csrow_info *csrow; | |
189 | + u32 mba, mba_base, mba_mask, dms; | |
190 | + int index; | |
191 | + | |
192 | + for (index = 0; index < mci->nr_csrows; index++) { | |
193 | + csrow = &mci->csrows[index]; | |
194 | + | |
195 | + /* find the DRAM Chip Select Base address and mask */ | |
196 | + pci_read_config_dword(pdev, | |
197 | + AMD76X_MEM_BASE_ADDR + (index * 4), | |
198 | + &mba); | |
199 | + | |
200 | + if (!(mba & BIT(0))) | |
201 | + continue; | |
202 | + | |
203 | + mba_base = mba & 0xff800000UL; | |
204 | + mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | |
205 | + pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); | |
206 | + csrow->first_page = mba_base >> PAGE_SHIFT; | |
207 | + csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | |
208 | + csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | |
209 | + csrow->page_mask = mba_mask >> PAGE_SHIFT; | |
210 | + csrow->grain = csrow->nr_pages << PAGE_SHIFT; | |
211 | + csrow->mtype = MEM_RDDR; | |
212 | + csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; | |
213 | + csrow->edac_mode = edac_mode; | |
214 | + } | |
215 | +} | |
216 | + | |
185 | 217 | /** |
186 | 218 | * amd76x_probe1 - Perform set up for detected device |
187 | 219 | * @pdev; PCI device detected |
188 | 220 | |
... | ... | @@ -193,15 +225,13 @@ |
193 | 225 | */ |
194 | 226 | static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) |
195 | 227 | { |
196 | - int rc = -ENODEV; | |
197 | - int index; | |
198 | - struct mem_ctl_info *mci = NULL; | |
199 | - enum edac_type ems_modes[] = { | |
228 | + static const enum edac_type ems_modes[] = { | |
200 | 229 | EDAC_NONE, |
201 | 230 | EDAC_EC, |
202 | 231 | EDAC_SECDED, |
203 | 232 | EDAC_SECDED |
204 | 233 | }; |
234 | + struct mem_ctl_info *mci = NULL; | |
205 | 235 | u32 ems; |
206 | 236 | u32 ems_mode; |
207 | 237 | struct amd76x_error_info discard; |
... | ... | @@ -212,8 +242,7 @@ |
212 | 242 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); |
213 | 243 | |
214 | 244 | if (mci == NULL) { |
215 | - rc = -ENOMEM; | |
216 | - goto fail; | |
245 | + return -ENOMEM; | |
217 | 246 | } |
218 | 247 | |
219 | 248 | debugf0("%s(): mci = %p\n", __func__, mci); |
... | ... | @@ -228,33 +257,7 @@ |
228 | 257 | mci->edac_check = amd76x_check; |
229 | 258 | mci->ctl_page_to_phys = NULL; |
230 | 259 | |
231 | - for (index = 0; index < mci->nr_csrows; index++) { | |
232 | - struct csrow_info *csrow = &mci->csrows[index]; | |
233 | - u32 mba; | |
234 | - u32 mba_base; | |
235 | - u32 mba_mask; | |
236 | - u32 dms; | |
237 | - | |
238 | - /* find the DRAM Chip Select Base address and mask */ | |
239 | - pci_read_config_dword(pdev, | |
240 | - AMD76X_MEM_BASE_ADDR + (index * 4), &mba); | |
241 | - | |
242 | - if (!(mba & BIT(0))) | |
243 | - continue; | |
244 | - | |
245 | - mba_base = mba & 0xff800000UL; | |
246 | - mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | |
247 | - pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); | |
248 | - csrow->first_page = mba_base >> PAGE_SHIFT; | |
249 | - csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | |
250 | - csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | |
251 | - csrow->page_mask = mba_mask >> PAGE_SHIFT; | |
252 | - csrow->grain = csrow->nr_pages << PAGE_SHIFT; | |
253 | - csrow->mtype = MEM_RDDR; | |
254 | - csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; | |
255 | - csrow->edac_mode = ems_modes[ems_mode]; | |
256 | - } | |
257 | - | |
260 | + amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]); | |
258 | 261 | amd76x_get_error_info(mci, &discard); /* clear counters */ |
259 | 262 | |
260 | 263 | /* Here we assume that we will never see multiple instances of this |
... | ... | @@ -270,9 +273,8 @@ |
270 | 273 | return 0; |
271 | 274 | |
272 | 275 | fail: |
273 | - if (mci != NULL) | |
274 | - edac_mc_free(mci); | |
275 | - return rc; | |
276 | + edac_mc_free(mci); | |
277 | + return -ENODEV; | |
276 | 278 | } |
277 | 279 | |
278 | 280 | /* returns count (>= 0), or negative on error */ |
drivers/edac/e752x_edac.c
... | ... | @@ -765,22 +765,174 @@ |
765 | 765 | e752x_process_error_info(mci, &info, 1); |
766 | 766 | } |
767 | 767 | |
768 | -static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |
768 | +/* Return 1 if dual channel mode is active. Else return 0. */ | |
769 | +static inline int dual_channel_active(u16 ddrcsr) | |
769 | 770 | { |
770 | - int rc = -ENODEV; | |
771 | + return (((ddrcsr >> 12) & 3) == 3); | |
772 | +} | |
773 | + | |
774 | +static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |
775 | + u16 ddrcsr) | |
776 | +{ | |
777 | + struct csrow_info *csrow; | |
778 | + unsigned long last_cumul_size; | |
779 | + int index, mem_dev, drc_chan; | |
780 | + int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ | |
781 | + int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ | |
782 | + u8 value; | |
783 | + u32 dra, drc, cumul_size; | |
784 | + | |
785 | + pci_read_config_dword(pdev, E752X_DRA, &dra); | |
786 | + pci_read_config_dword(pdev, E752X_DRC, &drc); | |
787 | + drc_chan = dual_channel_active(ddrcsr); | |
788 | + drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ | |
789 | + drc_ddim = (drc >> 20) & 0x3; | |
790 | + | |
791 | + /* The dram row boundary (DRB) reg values are boundary address for | |
792 | + * each DRAM row with a granularity of 64 or 128MB (single/dual | |
793 | + * channel operation). DRB regs are cumulative; therefore DRB7 will | |
794 | + * contain the total memory contained in all eight rows. | |
795 | + */ | |
796 | + for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | |
797 | + /* mem_dev 0=x8, 1=x4 */ | |
798 | + mem_dev = (dra >> (index * 4 + 2)) & 0x3; | |
799 | + csrow = &mci->csrows[index]; | |
800 | + | |
801 | + mem_dev = (mem_dev == 2); | |
802 | + pci_read_config_byte(pdev, E752X_DRB + index, &value); | |
803 | + /* convert a 128 or 64 MiB DRB to a page size. */ | |
804 | + cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | |
805 | + debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | |
806 | + cumul_size); | |
807 | + if (cumul_size == last_cumul_size) | |
808 | + continue; /* not populated */ | |
809 | + | |
810 | + csrow->first_page = last_cumul_size; | |
811 | + csrow->last_page = cumul_size - 1; | |
812 | + csrow->nr_pages = cumul_size - last_cumul_size; | |
813 | + last_cumul_size = cumul_size; | |
814 | + csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | |
815 | + csrow->mtype = MEM_RDDR; /* only one type supported */ | |
816 | + csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | |
817 | + | |
818 | + /* | |
819 | + * if single channel or x8 devices then SECDED | |
820 | + * if dual channel and x4 then S4ECD4ED | |
821 | + */ | |
822 | + if (drc_ddim) { | |
823 | + if (drc_chan && mem_dev) { | |
824 | + csrow->edac_mode = EDAC_S4ECD4ED; | |
825 | + mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | |
826 | + } else { | |
827 | + csrow->edac_mode = EDAC_SECDED; | |
828 | + mci->edac_cap |= EDAC_FLAG_SECDED; | |
829 | + } | |
830 | + } else | |
831 | + csrow->edac_mode = EDAC_NONE; | |
832 | + } | |
833 | +} | |
834 | + | |
835 | +static void e752x_init_mem_map_table(struct pci_dev *pdev, | |
836 | + struct e752x_pvt *pvt) | |
837 | +{ | |
771 | 838 | int index; |
839 | + u8 value, last, row, stat8; | |
840 | + | |
841 | + last = 0; | |
842 | + row = 0; | |
843 | + | |
844 | + for (index = 0; index < 8; index += 2) { | |
845 | + pci_read_config_byte(pdev, E752X_DRB + index, &value); | |
846 | + /* test if there is a dimm in this slot */ | |
847 | + if (value == last) { | |
848 | + /* no dimm in the slot, so flag it as empty */ | |
849 | + pvt->map[index] = 0xff; | |
850 | + pvt->map[index + 1] = 0xff; | |
851 | + } else { /* there is a dimm in the slot */ | |
852 | + pvt->map[index] = row; | |
853 | + row++; | |
854 | + last = value; | |
855 | + /* test the next value to see if the dimm is double | |
856 | + * sided | |
857 | + */ | |
858 | + pci_read_config_byte(pdev, E752X_DRB + index + 1, | |
859 | + &value); | |
860 | + pvt->map[index + 1] = (value == last) ? | |
861 | + 0xff : /* the dimm is single sided, | |
862 | + so flag as empty */ | |
863 | + row; /* this is a double sided dimm | |
864 | + to save the next row # */ | |
865 | + row++; | |
866 | + last = value; | |
867 | + } | |
868 | + } | |
869 | + | |
870 | + /* set the map type. 1 = normal, 0 = reversed */ | |
871 | + pci_read_config_byte(pdev, E752X_DRM, &stat8); | |
872 | + pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); | |
873 | +} | |
874 | + | |
875 | +/* Return 0 on success or 1 on failure. */ | |
876 | +static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, | |
877 | + struct e752x_pvt *pvt) | |
878 | +{ | |
879 | + struct pci_dev *dev; | |
880 | + | |
881 | + pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | |
882 | + pvt->dev_info->err_dev, | |
883 | + pvt->bridge_ck); | |
884 | + | |
885 | + if (pvt->bridge_ck == NULL) | |
886 | + pvt->bridge_ck = pci_scan_single_device(pdev->bus, | |
887 | + PCI_DEVFN(0, 1)); | |
888 | + | |
889 | + if (pvt->bridge_ck == NULL) { | |
890 | + e752x_printk(KERN_ERR, "error reporting device not found:" | |
891 | + "vendor %x device 0x%x (broken BIOS?)\n", | |
892 | + PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); | |
893 | + return 1; | |
894 | + } | |
895 | + | |
896 | + dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, | |
897 | + NULL); | |
898 | + | |
899 | + if (dev == NULL) | |
900 | + goto fail; | |
901 | + | |
902 | + pvt->dev_d0f0 = dev; | |
903 | + pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); | |
904 | + | |
905 | + return 0; | |
906 | + | |
907 | +fail: | |
908 | + pci_dev_put(pvt->bridge_ck); | |
909 | + return 1; | |
910 | +} | |
911 | + | |
912 | +static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) | |
913 | +{ | |
914 | + struct pci_dev *dev; | |
915 | + | |
916 | + dev = pvt->dev_d0f1; | |
917 | + /* Turn off error disable & SMI in case the BIOS turned it on */ | |
918 | + pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | |
919 | + pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | |
920 | + pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); | |
921 | + pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); | |
922 | + pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); | |
923 | + pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | |
924 | + pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); | |
925 | + pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); | |
926 | +} | |
927 | + | |
928 | +static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |
929 | +{ | |
772 | 930 | u16 pci_data; |
773 | 931 | u8 stat8; |
774 | - struct mem_ctl_info *mci = NULL; | |
775 | - struct e752x_pvt *pvt = NULL; | |
932 | + struct mem_ctl_info *mci; | |
933 | + struct e752x_pvt *pvt; | |
776 | 934 | u16 ddrcsr; |
777 | - u32 drc; | |
778 | 935 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ |
779 | - int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ | |
780 | - int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | |
781 | - u32 dra; | |
782 | - unsigned long last_cumul_size; | |
783 | - struct pci_dev *dev = NULL; | |
784 | 936 | struct e752x_error_info discard; |
785 | 937 | |
786 | 938 | debugf0("%s(): mci\n", __func__); |
787 | 939 | |
788 | 940 | |
789 | 941 | |
... | ... | @@ -794,25 +946,20 @@ |
794 | 946 | if (!force_function_unhide && !(stat8 & (1 << 5))) { |
795 | 947 | printk(KERN_INFO "Contact your BIOS vendor to see if the " |
796 | 948 | "E752x error registers can be safely un-hidden\n"); |
797 | - goto fail; | |
949 | + return -ENOMEM; | |
798 | 950 | } |
799 | 951 | stat8 |= (1 << 5); |
800 | 952 | pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); |
801 | 953 | |
802 | - /* need to find out the number of channels */ | |
803 | - pci_read_config_dword(pdev, E752X_DRC, &drc); | |
804 | 954 | pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); |
805 | 955 | /* FIXME: should check >>12 or 0xf, true for all? */ |
806 | 956 | /* Dual channel = 1, Single channel = 0 */ |
807 | - drc_chan = (((ddrcsr >> 12) & 3) == 3); | |
808 | - drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ | |
809 | - drc_ddim = (drc >> 20) & 0x3; | |
957 | + drc_chan = dual_channel_active(ddrcsr); | |
810 | 958 | |
811 | 959 | mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); |
812 | 960 | |
813 | 961 | if (mci == NULL) { |
814 | - rc = -ENOMEM; | |
815 | - goto fail; | |
962 | + return -ENOMEM; | |
816 | 963 | } |
817 | 964 | |
818 | 965 | debugf3("%s(): init mci\n", __func__); |
819 | 966 | |
820 | 967 | |
821 | 968 | |
822 | 969 | |
... | ... | @@ -827,114 +974,21 @@ |
827 | 974 | debugf3("%s(): init pvt\n", __func__); |
828 | 975 | pvt = (struct e752x_pvt *) mci->pvt_info; |
829 | 976 | pvt->dev_info = &e752x_devs[dev_idx]; |
830 | - pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | |
831 | - pvt->dev_info->err_dev, | |
832 | - pvt->bridge_ck); | |
977 | + pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); | |
833 | 978 | |
834 | - if (pvt->bridge_ck == NULL) | |
835 | - pvt->bridge_ck = pci_scan_single_device(pdev->bus, | |
836 | - PCI_DEVFN(0, 1)); | |
837 | - | |
838 | - if (pvt->bridge_ck == NULL) { | |
839 | - e752x_printk(KERN_ERR, "error reporting device not found:" | |
840 | - "vendor %x device 0x%x (broken BIOS?)\n", | |
841 | - PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); | |
842 | - goto fail; | |
979 | + if (e752x_get_devs(pdev, dev_idx, pvt)) { | |
980 | + edac_mc_free(mci); | |
981 | + return -ENODEV; | |
843 | 982 | } |
844 | 983 | |
845 | - pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); | |
846 | 984 | debugf3("%s(): more mci init\n", __func__); |
847 | 985 | mci->ctl_name = pvt->dev_info->ctl_name; |
848 | 986 | mci->edac_check = e752x_check; |
849 | 987 | mci->ctl_page_to_phys = ctl_page_to_phys; |
850 | 988 | |
851 | - /* find out the device types */ | |
852 | - pci_read_config_dword(pdev, E752X_DRA, &dra); | |
989 | + e752x_init_csrows(mci, pdev, ddrcsr); | |
990 | + e752x_init_mem_map_table(pdev, pvt); | |
853 | 991 | |
854 | - /* | |
855 | - * The dram row boundary (DRB) reg values are boundary address for | |
856 | - * each DRAM row with a granularity of 64 or 128MB (single/dual | |
857 | - * channel operation). DRB regs are cumulative; therefore DRB7 will | |
858 | - * contain the total memory contained in all eight rows. | |
859 | - */ | |
860 | - for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | |
861 | - u8 value; | |
862 | - u32 cumul_size; | |
863 | - | |
864 | - /* mem_dev 0=x8, 1=x4 */ | |
865 | - int mem_dev = (dra >> (index * 4 + 2)) & 0x3; | |
866 | - struct csrow_info *csrow = &mci->csrows[index]; | |
867 | - | |
868 | - mem_dev = (mem_dev == 2); | |
869 | - pci_read_config_byte(pdev, E752X_DRB + index, &value); | |
870 | - /* convert a 128 or 64 MiB DRB to a page size. */ | |
871 | - cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | |
872 | - debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | |
873 | - cumul_size); | |
874 | - | |
875 | - if (cumul_size == last_cumul_size) | |
876 | - continue; /* not populated */ | |
877 | - | |
878 | - csrow->first_page = last_cumul_size; | |
879 | - csrow->last_page = cumul_size - 1; | |
880 | - csrow->nr_pages = cumul_size - last_cumul_size; | |
881 | - last_cumul_size = cumul_size; | |
882 | - csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | |
883 | - csrow->mtype = MEM_RDDR; /* only one type supported */ | |
884 | - csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | |
885 | - | |
886 | - /* | |
887 | - * if single channel or x8 devices then SECDED | |
888 | - * if dual channel and x4 then S4ECD4ED | |
889 | - */ | |
890 | - if (drc_ddim) { | |
891 | - if (drc_chan && mem_dev) { | |
892 | - csrow->edac_mode = EDAC_S4ECD4ED; | |
893 | - mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | |
894 | - } else { | |
895 | - csrow->edac_mode = EDAC_SECDED; | |
896 | - mci->edac_cap |= EDAC_FLAG_SECDED; | |
897 | - } | |
898 | - } else | |
899 | - csrow->edac_mode = EDAC_NONE; | |
900 | - } | |
901 | - | |
902 | - /* Fill in the memory map table */ | |
903 | - { | |
904 | - u8 value; | |
905 | - u8 last = 0; | |
906 | - u8 row = 0; | |
907 | - | |
908 | - for (index = 0; index < 8; index += 2) { | |
909 | - pci_read_config_byte(pdev, E752X_DRB + index, &value); | |
910 | - | |
911 | - /* test if there is a dimm in this slot */ | |
912 | - if (value == last) { | |
913 | - /* no dimm in the slot, so flag it as empty */ | |
914 | - pvt->map[index] = 0xff; | |
915 | - pvt->map[index + 1] = 0xff; | |
916 | - } else { /* there is a dimm in the slot */ | |
917 | - pvt->map[index] = row; | |
918 | - row++; | |
919 | - last = value; | |
920 | - /* test the next value to see if the dimm is | |
921 | - double sided */ | |
922 | - pci_read_config_byte(pdev, | |
923 | - E752X_DRB + index + 1, | |
924 | - &value); | |
925 | - pvt->map[index + 1] = (value == last) ? | |
926 | - 0xff : /* the dimm is single sided, | |
927 | - * so flag as empty | |
928 | - */ | |
929 | - row; /* this is a double sided dimm | |
930 | - * to save the next row # | |
931 | - */ | |
932 | - row++; | |
933 | - last = value; | |
934 | - } | |
935 | - } | |
936 | - } | |
937 | - | |
938 | 992 | /* set the map type. 1 = normal, 0 = reversed */ |
939 | 993 | pci_read_config_byte(pdev, E752X_DRM, &stat8); |
940 | 994 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); |
... | ... | @@ -961,21 +1015,7 @@ |
961 | 1015 | goto fail; |
962 | 1016 | } |
963 | 1017 | |
964 | - dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, | |
965 | - NULL); | |
966 | - pvt->dev_d0f0 = dev; | |
967 | - /* find the error reporting device and clear errors */ | |
968 | - dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); | |
969 | - /* Turn off error disable & SMI in case the BIOS turned it on */ | |
970 | - pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | |
971 | - pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | |
972 | - pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); | |
973 | - pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); | |
974 | - pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); | |
975 | - pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | |
976 | - pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); | |
977 | - pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); | |
978 | - | |
1018 | + e752x_init_error_reporting_regs(pvt); | |
979 | 1019 | e752x_get_error_info(mci, &discard); /* clear other MCH errors */ |
980 | 1020 | |
981 | 1021 | /* get this far and it's successful */ |
982 | 1022 | |
... | ... | @@ -983,20 +1023,12 @@ |
983 | 1023 | return 0; |
984 | 1024 | |
985 | 1025 | fail: |
986 | - if (mci) { | |
987 | - if (pvt->dev_d0f0) | |
988 | - pci_dev_put(pvt->dev_d0f0); | |
1026 | + pci_dev_put(pvt->dev_d0f0); | |
1027 | + pci_dev_put(pvt->dev_d0f1); | |
1028 | + pci_dev_put(pvt->bridge_ck); | |
1029 | + edac_mc_free(mci); | |
989 | 1030 | |
990 | - if (pvt->dev_d0f1) | |
991 | - pci_dev_put(pvt->dev_d0f1); | |
992 | - | |
993 | - if (pvt->bridge_ck) | |
994 | - pci_dev_put(pvt->bridge_ck); | |
995 | - | |
996 | - edac_mc_free(mci); | |
997 | - } | |
998 | - | |
999 | - return rc; | |
1031 | + return -ENODEV; | |
1000 | 1032 | } |
1001 | 1033 | |
1002 | 1034 | /* returns count (>= 0), or negative on error */ |
drivers/edac/e7xxx_edac.c
... | ... | @@ -335,99 +335,61 @@ |
335 | 335 | e7xxx_process_error_info(mci, &info, 1); |
336 | 336 | } |
337 | 337 | |
338 | -static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |
338 | +/* Return 1 if dual channel mode is active. Else return 0. */ | |
339 | +static inline int dual_channel_active(u32 drc, int dev_idx) | |
339 | 340 | { |
340 | - int rc = -ENODEV; | |
341 | - int index; | |
342 | - u16 pci_data; | |
343 | - struct mem_ctl_info *mci = NULL; | |
344 | - struct e7xxx_pvt *pvt = NULL; | |
345 | - u32 drc; | |
346 | - int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */ | |
347 | - int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */ | |
348 | - int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | |
349 | - u32 dra; | |
350 | - unsigned long last_cumul_size; | |
351 | - struct e7xxx_error_info discard; | |
341 | + return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1; | |
342 | +} | |
352 | 343 | |
353 | - debugf0("%s(): mci\n", __func__); | |
354 | 344 | |
355 | - /* need to find out the number of channels */ | |
356 | - pci_read_config_dword(pdev, E7XXX_DRC, &drc); | |
357 | - | |
345 | +/* Return DRB granularity (0=32mb, 1=64mb). */ | |
346 | +static inline int drb_granularity(u32 drc, int dev_idx) | |
347 | +{ | |
358 | 348 | /* only e7501 can be single channel */ |
359 | - if (dev_idx == E7501) { | |
360 | - drc_chan = ((drc >> 22) & 0x1); | |
361 | - drc_drbg = (drc >> 18) & 0x3; | |
362 | - } | |
349 | + return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1; | |
350 | +} | |
363 | 351 | |
364 | - drc_ddim = (drc >> 20) & 0x3; | |
365 | - mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); | |
366 | 352 | |
367 | - if (mci == NULL) { | |
368 | - rc = -ENOMEM; | |
369 | - goto fail; | |
370 | - } | |
353 | +static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |
354 | + int dev_idx, u32 drc) | |
355 | +{ | |
356 | + unsigned long last_cumul_size; | |
357 | + int index; | |
358 | + u8 value; | |
359 | + u32 dra, cumul_size; | |
360 | + int drc_chan, drc_drbg, drc_ddim, mem_dev; | |
361 | + struct csrow_info *csrow; | |
371 | 362 | |
372 | - debugf3("%s(): init mci\n", __func__); | |
373 | - mci->mtype_cap = MEM_FLAG_RDDR; | |
374 | - mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | |
375 | - EDAC_FLAG_S4ECD4ED; | |
376 | - /* FIXME - what if different memory types are in different csrows? */ | |
377 | - mci->mod_name = EDAC_MOD_STR; | |
378 | - mci->mod_ver = E7XXX_REVISION; | |
379 | - mci->dev = &pdev->dev; | |
380 | - | |
381 | - debugf3("%s(): init pvt\n", __func__); | |
382 | - pvt = (struct e7xxx_pvt *) mci->pvt_info; | |
383 | - pvt->dev_info = &e7xxx_devs[dev_idx]; | |
384 | - pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | |
385 | - pvt->dev_info->err_dev, | |
386 | - pvt->bridge_ck); | |
387 | - | |
388 | - if (!pvt->bridge_ck) { | |
389 | - e7xxx_printk(KERN_ERR, "error reporting device not found:" | |
390 | - "vendor %x device 0x%x (broken BIOS?)\n", | |
391 | - PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); | |
392 | - goto fail; | |
393 | - } | |
394 | - | |
395 | - debugf3("%s(): more mci init\n", __func__); | |
396 | - mci->ctl_name = pvt->dev_info->ctl_name; | |
397 | - mci->edac_check = e7xxx_check; | |
398 | - mci->ctl_page_to_phys = ctl_page_to_phys; | |
399 | - | |
400 | - /* find out the device types */ | |
401 | 363 | pci_read_config_dword(pdev, E7XXX_DRA, &dra); |
364 | + drc_chan = dual_channel_active(drc, dev_idx); | |
365 | + drc_drbg = drb_granularity(drc, dev_idx); | |
366 | + drc_ddim = (drc >> 20) & 0x3; | |
367 | + last_cumul_size = 0; | |
402 | 368 | |
403 | - /* | |
404 | - * The dram row boundary (DRB) reg values are boundary address | |
369 | + /* The dram row boundary (DRB) reg values are boundary address | |
405 | 370 | * for each DRAM row with a granularity of 32 or 64MB (single/dual |
406 | 371 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
407 | 372 | * contain the total memory contained in all eight rows. |
408 | 373 | */ |
409 | - for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | |
410 | - u8 value; | |
411 | - u32 cumul_size; | |
374 | + for (index = 0; index < mci->nr_csrows; index++) { | |
412 | 375 | /* mem_dev 0=x8, 1=x4 */ |
413 | - int mem_dev = (dra >> (index * 4 + 3)) & 0x1; | |
414 | - struct csrow_info *csrow = &mci->csrows[index]; | |
376 | + mem_dev = (dra >> (index * 4 + 3)) & 0x1; | |
377 | + csrow = &mci->csrows[index]; | |
415 | 378 | |
416 | 379 | pci_read_config_byte(pdev, E7XXX_DRB + index, &value); |
417 | 380 | /* convert a 64 or 32 MiB DRB to a page size. */ |
418 | 381 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
419 | 382 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
420 | 383 | cumul_size); |
421 | - | |
422 | 384 | if (cumul_size == last_cumul_size) |
423 | - continue; /* not populated */ | |
385 | + continue; /* not populated */ | |
424 | 386 | |
425 | 387 | csrow->first_page = last_cumul_size; |
426 | 388 | csrow->last_page = cumul_size - 1; |
427 | 389 | csrow->nr_pages = cumul_size - last_cumul_size; |
428 | 390 | last_cumul_size = cumul_size; |
429 | - csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | |
430 | - csrow->mtype = MEM_RDDR; /* only one type supported */ | |
391 | + csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | |
392 | + csrow->mtype = MEM_RDDR; /* only one type supported */ | |
431 | 393 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; |
432 | 394 | |
433 | 395 | /* |
434 | 396 | |
435 | 397 | |
... | ... | @@ -445,9 +407,54 @@ |
445 | 407 | } else |
446 | 408 | csrow->edac_mode = EDAC_NONE; |
447 | 409 | } |
410 | +} | |
448 | 411 | |
449 | - mci->edac_cap |= EDAC_FLAG_NONE; | |
412 | +static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |
413 | +{ | |
414 | + u16 pci_data; | |
415 | + struct mem_ctl_info *mci = NULL; | |
416 | + struct e7xxx_pvt *pvt = NULL; | |
417 | + u32 drc; | |
418 | + int drc_chan; | |
419 | + struct e7xxx_error_info discard; | |
450 | 420 | |
421 | + debugf0("%s(): mci\n", __func__); | |
422 | + pci_read_config_dword(pdev, E7XXX_DRC, &drc); | |
423 | + | |
424 | + drc_chan = dual_channel_active(drc, dev_idx); | |
425 | + mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); | |
426 | + | |
427 | + if (mci == NULL) | |
428 | + return -ENOMEM; | |
429 | + | |
430 | + debugf3("%s(): init mci\n", __func__); | |
431 | + mci->mtype_cap = MEM_FLAG_RDDR; | |
432 | + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | |
433 | + EDAC_FLAG_S4ECD4ED; | |
434 | + /* FIXME - what if different memory types are in different csrows? */ | |
435 | + mci->mod_name = EDAC_MOD_STR; | |
436 | + mci->mod_ver = E7XXX_REVISION; | |
437 | + mci->dev = &pdev->dev; | |
438 | + debugf3("%s(): init pvt\n", __func__); | |
439 | + pvt = (struct e7xxx_pvt *) mci->pvt_info; | |
440 | + pvt->dev_info = &e7xxx_devs[dev_idx]; | |
441 | + pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | |
442 | + pvt->dev_info->err_dev, | |
443 | + pvt->bridge_ck); | |
444 | + | |
445 | + if (!pvt->bridge_ck) { | |
446 | + e7xxx_printk(KERN_ERR, "error reporting device not found:" | |
447 | + "vendor %x device 0x%x (broken BIOS?)\n", | |
448 | + PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); | |
449 | + goto fail0; | |
450 | + } | |
451 | + | |
452 | + debugf3("%s(): more mci init\n", __func__); | |
453 | + mci->ctl_name = pvt->dev_info->ctl_name; | |
454 | + mci->edac_check = e7xxx_check; | |
455 | + mci->ctl_page_to_phys = ctl_page_to_phys; | |
456 | + e7xxx_init_csrows(mci, pdev, dev_idx, drc); | |
457 | + mci->edac_cap |= EDAC_FLAG_NONE; | |
451 | 458 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
452 | 459 | /* load the top of low memory, remap base, and remap limit vars */ |
453 | 460 | pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); |
454 | 461 | |
455 | 462 | |
... | ... | @@ -468,21 +475,20 @@ |
468 | 475 | */ |
469 | 476 | if (edac_mc_add_mc(mci,0)) { |
470 | 477 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
471 | - goto fail; | |
478 | + goto fail1; | |
472 | 479 | } |
473 | 480 | |
474 | 481 | /* get this far and it's successful */ |
475 | 482 | debugf3("%s(): success\n", __func__); |
476 | 483 | return 0; |
477 | 484 | |
478 | -fail: | |
479 | - if (mci != NULL) { | |
480 | - if(pvt != NULL && pvt->bridge_ck) | |
481 | - pci_dev_put(pvt->bridge_ck); | |
482 | - edac_mc_free(mci); | |
483 | - } | |
485 | +fail1: | |
486 | + pci_dev_put(pvt->bridge_ck); | |
484 | 487 | |
485 | - return rc; | |
488 | +fail0: | |
489 | + edac_mc_free(mci); | |
490 | + | |
491 | + return -ENODEV; | |
486 | 492 | } |
487 | 493 | |
488 | 494 | /* returns count (>= 0), or negative on error */ |
drivers/edac/i82860_edac.c
... | ... | @@ -133,16 +133,51 @@ |
133 | 133 | i82860_process_error_info(mci, &info, 1); |
134 | 134 | } |
135 | 135 | |
136 | -static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |
136 | +static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) | |
137 | 137 | { |
138 | - int rc = -ENODEV; | |
139 | - int index; | |
140 | - struct mem_ctl_info *mci = NULL; | |
141 | 138 | unsigned long last_cumul_size; |
142 | - struct i82860_error_info discard; | |
139 | + u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ | |
140 | + u16 value; | |
141 | + u32 cumul_size; | |
142 | + struct csrow_info *csrow; | |
143 | + int index; | |
143 | 144 | |
144 | - u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | |
145 | + pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); | |
146 | + mchcfg_ddim = mchcfg_ddim & 0x180; | |
147 | + last_cumul_size = 0; | |
145 | 148 | |
149 | + /* The group row boundary (GRA) reg values are boundary address | |
150 | + * for each DRAM row with a granularity of 16MB. GRA regs are | |
151 | + * cumulative; therefore GRA15 will contain the total memory contained | |
152 | + * in all eight rows. | |
153 | + */ | |
154 | + for (index = 0; index < mci->nr_csrows; index++) { | |
155 | + csrow = &mci->csrows[index]; | |
156 | + pci_read_config_word(pdev, I82860_GBA + index * 2, &value); | |
157 | + cumul_size = (value & I82860_GBA_MASK) << | |
158 | + (I82860_GBA_SHIFT - PAGE_SHIFT); | |
159 | + debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | |
160 | + cumul_size); | |
161 | + | |
162 | + if (cumul_size == last_cumul_size) | |
163 | + continue; /* not populated */ | |
164 | + | |
165 | + csrow->first_page = last_cumul_size; | |
166 | + csrow->last_page = cumul_size - 1; | |
167 | + csrow->nr_pages = cumul_size - last_cumul_size; | |
168 | + last_cumul_size = cumul_size; | |
169 | + csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | |
170 | + csrow->mtype = MEM_RMBS; | |
171 | + csrow->dtype = DEV_UNKNOWN; | |
172 | + csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | |
173 | + } | |
174 | +} | |
175 | + | |
176 | +static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |
177 | +{ | |
178 | + struct mem_ctl_info *mci; | |
179 | + struct i82860_error_info discard; | |
180 | + | |
146 | 181 | /* RDRAM has channels but these don't map onto the abstractions that |
147 | 182 | edac uses. |
148 | 183 | The device groups from the GRA registers seem to map reasonably |
149 | 184 | |
150 | 185 | |
... | ... | @@ -159,53 +194,15 @@ |
159 | 194 | debugf3("%s(): init mci\n", __func__); |
160 | 195 | mci->dev = &pdev->dev; |
161 | 196 | mci->mtype_cap = MEM_FLAG_DDR; |
162 | - | |
163 | 197 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
164 | 198 | /* I"m not sure about this but I think that all RDRAM is SECDED */ |
165 | 199 | mci->edac_cap = EDAC_FLAG_SECDED; |
166 | - /* adjust FLAGS */ | |
167 | - | |
168 | 200 | mci->mod_name = EDAC_MOD_STR; |
169 | 201 | mci->mod_ver = I82860_REVISION; |
170 | 202 | mci->ctl_name = i82860_devs[dev_idx].ctl_name; |
171 | 203 | mci->edac_check = i82860_check; |
172 | 204 | mci->ctl_page_to_phys = NULL; |
173 | - | |
174 | - pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); | |
175 | - mchcfg_ddim = mchcfg_ddim & 0x180; | |
176 | - | |
177 | - /* | |
178 | - * The group row boundary (GRA) reg values are boundary address | |
179 | - * for each DRAM row with a granularity of 16MB. GRA regs are | |
180 | - * cumulative; therefore GRA15 will contain the total memory contained | |
181 | - * in all eight rows. | |
182 | - */ | |
183 | - for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | |
184 | - u16 value; | |
185 | - u32 cumul_size; | |
186 | - struct csrow_info *csrow = &mci->csrows[index]; | |
187 | - | |
188 | - pci_read_config_word(pdev, I82860_GBA + index * 2, | |
189 | - &value); | |
190 | - | |
191 | - cumul_size = (value & I82860_GBA_MASK) << | |
192 | - (I82860_GBA_SHIFT - PAGE_SHIFT); | |
193 | - debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | |
194 | - cumul_size); | |
195 | - | |
196 | - if (cumul_size == last_cumul_size) | |
197 | - continue; /* not populated */ | |
198 | - | |
199 | - csrow->first_page = last_cumul_size; | |
200 | - csrow->last_page = cumul_size - 1; | |
201 | - csrow->nr_pages = cumul_size - last_cumul_size; | |
202 | - last_cumul_size = cumul_size; | |
203 | - csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | |
204 | - csrow->mtype = MEM_RMBS; | |
205 | - csrow->dtype = DEV_UNKNOWN; | |
206 | - csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | |
207 | - } | |
208 | - | |
205 | + i82860_init_csrows(mci, pdev); | |
209 | 206 | i82860_get_error_info(mci, &discard); /* clear counters */ |
210 | 207 | |
211 | 208 | /* Here we assume that we will never see multiple instances of this |
212 | 209 | |
... | ... | @@ -213,14 +210,17 @@ |
213 | 210 | */ |
214 | 211 | if (edac_mc_add_mc(mci,0)) { |
215 | 212 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
216 | - edac_mc_free(mci); | |
217 | - } else { | |
218 | - /* get this far and it's successful */ | |
219 | - debugf3("%s(): success\n", __func__); | |
220 | - rc = 0; | |
213 | + goto fail; | |
221 | 214 | } |
222 | 215 | |
223 | - return rc; | |
216 | + /* get this far and it's successful */ | |
217 | + debugf3("%s(): success\n", __func__); | |
218 | + | |
219 | + return 0; | |
220 | + | |
221 | +fail: | |
222 | + edac_mc_free(mci); | |
223 | + return -ENODEV; | |
224 | 224 | } |
225 | 225 | |
226 | 226 | /* returns count (>= 0), or negative on error */ |
drivers/edac/i82875p_edac.c
... | ... | @@ -265,116 +265,109 @@ |
265 | 265 | extern int pci_proc_attach_device(struct pci_dev *); |
266 | 266 | #endif |
267 | 267 | |
268 | -static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |
268 | +/* Return 0 on success or 1 on failure. */ | |
269 | +static int i82875p_setup_overfl_dev(struct pci_dev *pdev, | |
270 | + struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window) | |
269 | 271 | { |
270 | - int rc = -ENODEV; | |
271 | - int index; | |
272 | - struct mem_ctl_info *mci = NULL; | |
273 | - struct i82875p_pvt *pvt = NULL; | |
274 | - unsigned long last_cumul_size; | |
275 | - struct pci_dev *ovrfl_pdev; | |
276 | - void __iomem *ovrfl_window = NULL; | |
277 | - u32 drc; | |
278 | - u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ | |
279 | - u32 nr_chans; | |
280 | - u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | |
281 | - struct i82875p_error_info discard; | |
272 | + struct pci_dev *dev; | |
273 | + void __iomem *window; | |
282 | 274 | |
283 | - debugf0("%s()\n", __func__); | |
284 | - ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | |
275 | + *ovrfl_pdev = NULL; | |
276 | + *ovrfl_window = NULL; | |
277 | + dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | |
285 | 278 | |
286 | - if (!ovrfl_pdev) { | |
287 | - /* | |
288 | - * Intel tells BIOS developers to hide device 6 which | |
279 | + if (dev == NULL) { | |
280 | + /* Intel tells BIOS developers to hide device 6 which | |
289 | 281 | * configures the overflow device access containing |
290 | 282 | * the DRBs - this is where we expose device 6. |
291 | 283 | * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm |
292 | 284 | */ |
293 | 285 | pci_write_bits8(pdev, 0xf4, 0x2, 0x2); |
294 | - ovrfl_pdev = | |
295 | - pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); | |
286 | + dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); | |
296 | 287 | |
297 | - if (!ovrfl_pdev) | |
298 | - return -ENODEV; | |
288 | + if (dev == NULL) | |
289 | + return 1; | |
299 | 290 | } |
300 | 291 | |
292 | + *ovrfl_pdev = dev; | |
293 | + | |
301 | 294 | #ifdef CONFIG_PROC_FS |
302 | - if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { | |
303 | - i82875p_printk(KERN_ERR, | |
304 | - "%s(): Failed to attach overflow device\n", __func__); | |
305 | - return -ENODEV; | |
295 | + if ((dev->procent == NULL) && pci_proc_attach_device(dev)) { | |
296 | + i82875p_printk(KERN_ERR, "%s(): Failed to attach overflow " | |
297 | + "device\n", __func__); | |
298 | + return 1; | |
306 | 299 | } |
307 | -#endif | |
308 | - /* CONFIG_PROC_FS */ | |
309 | - if (pci_enable_device(ovrfl_pdev)) { | |
310 | - i82875p_printk(KERN_ERR, | |
311 | - "%s(): Failed to enable overflow device\n", __func__); | |
312 | - return -ENODEV; | |
300 | +#endif /* CONFIG_PROC_FS */ | |
301 | + if (pci_enable_device(dev)) { | |
302 | + i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " | |
303 | + "device\n", __func__); | |
304 | + return 1; | |
313 | 305 | } |
314 | 306 | |
315 | - if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { | |
307 | + if (pci_request_regions(dev, pci_name(dev))) { | |
316 | 308 | #ifdef CORRECT_BIOS |
317 | 309 | goto fail0; |
318 | 310 | #endif |
319 | 311 | } |
320 | 312 | |
321 | 313 | /* cache is irrelevant for PCI bus reads/writes */ |
322 | - ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), | |
323 | - pci_resource_len(ovrfl_pdev, 0)); | |
314 | + window = ioremap_nocache(pci_resource_start(dev, 0), | |
315 | + pci_resource_len(dev, 0)); | |
324 | 316 | |
325 | - if (!ovrfl_window) { | |
317 | + if (window == NULL) { | |
326 | 318 | i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", |
327 | - __func__); | |
319 | + __func__); | |
328 | 320 | goto fail1; |
329 | 321 | } |
330 | 322 | |
331 | - /* need to find out the number of channels */ | |
332 | - drc = readl(ovrfl_window + I82875P_DRC); | |
333 | - drc_chan = ((drc >> 21) & 0x1); | |
334 | - nr_chans = drc_chan + 1; | |
323 | + *ovrfl_window = window; | |
324 | + return 0; | |
335 | 325 | |
336 | - drc_ddim = (drc >> 18) & 0x1; | |
337 | - mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | |
338 | - nr_chans); | |
326 | +fail1: | |
327 | + pci_release_regions(dev); | |
339 | 328 | |
340 | - if (!mci) { | |
341 | - rc = -ENOMEM; | |
342 | - goto fail2; | |
343 | - } | |
329 | +#ifdef CORRECT_BIOS | |
330 | +fail0: | |
331 | + pci_disable_device(dev); | |
332 | +#endif | |
333 | + /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ | |
334 | + return 1; | |
335 | +} | |
344 | 336 | |
345 | - debugf3("%s(): init mci\n", __func__); | |
346 | - mci->dev = &pdev->dev; | |
347 | - mci->mtype_cap = MEM_FLAG_DDR; | |
348 | - mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | |
349 | - mci->edac_cap = EDAC_FLAG_UNKNOWN; | |
350 | - /* adjust FLAGS */ | |
351 | 337 | |
352 | - mci->mod_name = EDAC_MOD_STR; | |
353 | - mci->mod_ver = I82875P_REVISION; | |
354 | - mci->ctl_name = i82875p_devs[dev_idx].ctl_name; | |
355 | - mci->edac_check = i82875p_check; | |
356 | - mci->ctl_page_to_phys = NULL; | |
357 | - debugf3("%s(): init pvt\n", __func__); | |
358 | - pvt = (struct i82875p_pvt *) mci->pvt_info; | |
359 | - pvt->ovrfl_pdev = ovrfl_pdev; | |
360 | - pvt->ovrfl_window = ovrfl_window; | |
338 | +/* Return 1 if dual channel mode is active. Else return 0. */ | |
339 | +static inline int dual_channel_active(u32 drc) | |
340 | +{ | |
341 | + return (drc >> 21) & 0x1; | |
342 | +} | |
361 | 343 | |
362 | - /* | |
363 | - * The dram row boundary (DRB) reg values are boundary address | |
344 | + | |
345 | +static void i82875p_init_csrows(struct mem_ctl_info *mci, | |
346 | + struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc) | |
347 | +{ | |
348 | + struct csrow_info *csrow; | |
349 | + unsigned long last_cumul_size; | |
350 | + u8 value; | |
351 | + u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | |
352 | + u32 cumul_size; | |
353 | + int index; | |
354 | + | |
355 | + drc_ddim = (drc >> 18) & 0x1; | |
356 | + last_cumul_size = 0; | |
357 | + | |
358 | + /* The dram row boundary (DRB) reg values are boundary address | |
364 | 359 | * for each DRAM row with a granularity of 32 or 64MB (single/dual |
365 | 360 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
366 | 361 | * contain the total memory contained in all eight rows. |
367 | 362 | */ |
368 | - for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | |
369 | - u8 value; | |
370 | - u32 cumul_size; | |
371 | - struct csrow_info *csrow = &mci->csrows[index]; | |
372 | 363 | |
364 | + for (index = 0; index < mci->nr_csrows; index++) { | |
365 | + csrow = &mci->csrows[index]; | |
366 | + | |
373 | 367 | value = readb(ovrfl_window + I82875P_DRB + index); |
374 | 368 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); |
375 | 369 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
376 | 370 | cumul_size); |
377 | - | |
378 | 371 | if (cumul_size == last_cumul_size) |
379 | 372 | continue; /* not populated */ |
380 | 373 | |
381 | 374 | |
382 | 375 | |
... | ... | @@ -382,12 +375,54 @@ |
382 | 375 | csrow->last_page = cumul_size - 1; |
383 | 376 | csrow->nr_pages = cumul_size - last_cumul_size; |
384 | 377 | last_cumul_size = cumul_size; |
385 | - csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ | |
378 | + csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ | |
386 | 379 | csrow->mtype = MEM_DDR; |
387 | 380 | csrow->dtype = DEV_UNKNOWN; |
388 | 381 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; |
389 | 382 | } |
383 | +} | |
390 | 384 | |
385 | +static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |
386 | +{ | |
387 | + int rc = -ENODEV; | |
388 | + struct mem_ctl_info *mci; | |
389 | + struct i82875p_pvt *pvt; | |
390 | + struct pci_dev *ovrfl_pdev; | |
391 | + void __iomem *ovrfl_window; | |
392 | + u32 drc; | |
393 | + u32 nr_chans; | |
394 | + struct i82875p_error_info discard; | |
395 | + | |
396 | + debugf0("%s()\n", __func__); | |
397 | + ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | |
398 | + | |
399 | + if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) | |
400 | + return -ENODEV; | |
401 | + drc = readl(ovrfl_window + I82875P_DRC); | |
402 | + nr_chans = dual_channel_active(drc) + 1; | |
403 | + mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | |
404 | + nr_chans); | |
405 | + | |
406 | + if (!mci) { | |
407 | + rc = -ENOMEM; | |
408 | + goto fail0; | |
409 | + } | |
410 | + | |
411 | + debugf3("%s(): init mci\n", __func__); | |
412 | + mci->dev = &pdev->dev; | |
413 | + mci->mtype_cap = MEM_FLAG_DDR; | |
414 | + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | |
415 | + mci->edac_cap = EDAC_FLAG_UNKNOWN; | |
416 | + mci->mod_name = EDAC_MOD_STR; | |
417 | + mci->mod_ver = I82875P_REVISION; | |
418 | + mci->ctl_name = i82875p_devs[dev_idx].ctl_name; | |
419 | + mci->edac_check = i82875p_check; | |
420 | + mci->ctl_page_to_phys = NULL; | |
421 | + debugf3("%s(): init pvt\n", __func__); | |
422 | + pvt = (struct i82875p_pvt *) mci->pvt_info; | |
423 | + pvt->ovrfl_pdev = ovrfl_pdev; | |
424 | + pvt->ovrfl_window = ovrfl_window; | |
425 | + i82875p_init_csrows(mci, pdev, ovrfl_window, drc); | |
391 | 426 | i82875p_get_error_info(mci, &discard); /* clear counters */ |
392 | 427 | |
393 | 428 | /* Here we assume that we will never see multiple instances of this |
394 | 429 | |
395 | 430 | |
396 | 431 | |
397 | 432 | |
... | ... | @@ -395,25 +430,20 @@ |
395 | 430 | */ |
396 | 431 | if (edac_mc_add_mc(mci,0)) { |
397 | 432 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
398 | - goto fail3; | |
433 | + goto fail1; | |
399 | 434 | } |
400 | 435 | |
401 | 436 | /* get this far and it's successful */ |
402 | 437 | debugf3("%s(): success\n", __func__); |
403 | 438 | return 0; |
404 | 439 | |
405 | -fail3: | |
440 | +fail1: | |
406 | 441 | edac_mc_free(mci); |
407 | 442 | |
408 | -fail2: | |
443 | +fail0: | |
409 | 444 | iounmap(ovrfl_window); |
410 | - | |
411 | -fail1: | |
412 | 445 | pci_release_regions(ovrfl_pdev); |
413 | 446 | |
414 | -#ifdef CORRECT_BIOS | |
415 | -fail0: | |
416 | -#endif | |
417 | 447 | pci_disable_device(ovrfl_pdev); |
418 | 448 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ |
419 | 449 | return rc; |
drivers/edac/r82600_edac.c
... | ... | @@ -205,25 +205,72 @@ |
205 | 205 | r82600_process_error_info(mci, &info, 1); |
206 | 206 | } |
207 | 207 | |
208 | -static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |
208 | +static inline int ecc_enabled(u8 dramcr) | |
209 | 209 | { |
210 | - int rc = -ENODEV; | |
210 | + return dramcr & BIT(5); | |
211 | +} | |
212 | + | |
213 | +static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |
214 | + u8 dramcr) | |
215 | +{ | |
216 | + struct csrow_info *csrow; | |
211 | 217 | int index; |
212 | - struct mem_ctl_info *mci = NULL; | |
218 | + u8 drbar; /* SDRAM Row Boundry Address Register */ | |
219 | + u32 row_high_limit, row_high_limit_last; | |
220 | + u32 reg_sdram, ecc_on, row_base; | |
221 | + | |
222 | + ecc_on = ecc_enabled(dramcr); | |
223 | + reg_sdram = dramcr & BIT(4); | |
224 | + row_high_limit_last = 0; | |
225 | + | |
226 | + for (index = 0; index < mci->nr_csrows; index++) { | |
227 | + csrow = &mci->csrows[index]; | |
228 | + | |
229 | + /* find the DRAM Chip Select Base address and mask */ | |
230 | + pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); | |
231 | + | |
232 | + debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); | |
233 | + | |
234 | + row_high_limit = ((u32) drbar << 24); | |
235 | +/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | |
236 | + | |
237 | + debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n", | |
238 | + __func__, index, row_high_limit, row_high_limit_last); | |
239 | + | |
240 | + /* Empty row [p.57] */ | |
241 | + if (row_high_limit == row_high_limit_last) | |
242 | + continue; | |
243 | + | |
244 | + row_base = row_high_limit_last; | |
245 | + | |
246 | + csrow->first_page = row_base >> PAGE_SHIFT; | |
247 | + csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | |
248 | + csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | |
249 | + /* Error address is top 19 bits - so granularity is * | |
250 | + * 14 bits */ | |
251 | + csrow->grain = 1 << 14; | |
252 | + csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; | |
253 | + /* FIXME - check that this is unknowable with this chipset */ | |
254 | + csrow->dtype = DEV_UNKNOWN; | |
255 | + | |
256 | + /* Mode is global on 82600 */ | |
257 | + csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; | |
258 | + row_high_limit_last = row_high_limit; | |
259 | + } | |
260 | +} | |
261 | + | |
262 | +static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |
263 | +{ | |
264 | + struct mem_ctl_info *mci; | |
213 | 265 | u8 dramcr; |
214 | - u32 ecc_on; | |
215 | - u32 reg_sdram; | |
216 | 266 | u32 eapr; |
217 | 267 | u32 scrub_disabled; |
218 | 268 | u32 sdram_refresh_rate; |
219 | - u32 row_high_limit_last = 0; | |
220 | 269 | struct r82600_error_info discard; |
221 | 270 | |
222 | 271 | debugf0("%s()\n", __func__); |
223 | 272 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); |
224 | 273 | pci_read_config_dword(pdev, R82600_EAP, &eapr); |
225 | - ecc_on = dramcr & BIT(5); | |
226 | - reg_sdram = dramcr & BIT(4); | |
227 | 274 | scrub_disabled = eapr & BIT(31); |
228 | 275 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); |
229 | 276 | debugf2("%s(): sdram refresh rate = %#0x\n", __func__, |
... | ... | @@ -231,10 +278,8 @@ |
231 | 278 | debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); |
232 | 279 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); |
233 | 280 | |
234 | - if (mci == NULL) { | |
235 | - rc = -ENOMEM; | |
236 | - goto fail; | |
237 | - } | |
281 | + if (mci == NULL) | |
282 | + return -ENOMEM; | |
238 | 283 | |
239 | 284 | debugf0("%s(): mci = %p\n", __func__, mci); |
240 | 285 | mci->dev = &pdev->dev; |
... | ... | @@ -250,7 +295,7 @@ |
250 | 295 | * is possible. */ |
251 | 296 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
252 | 297 | |
253 | - if (ecc_on) { | |
298 | + if (ecc_enabled(dramcr)) { | |
254 | 299 | if (scrub_disabled) |
255 | 300 | debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " |
256 | 301 | "%#0x\n", __func__, mci, eapr); |
... | ... | @@ -262,46 +307,7 @@ |
262 | 307 | mci->ctl_name = "R82600"; |
263 | 308 | mci->edac_check = r82600_check; |
264 | 309 | mci->ctl_page_to_phys = NULL; |
265 | - | |
266 | - for (index = 0; index < mci->nr_csrows; index++) { | |
267 | - struct csrow_info *csrow = &mci->csrows[index]; | |
268 | - u8 drbar; /* sDram Row Boundry Address Register */ | |
269 | - u32 row_high_limit; | |
270 | - u32 row_base; | |
271 | - | |
272 | - /* find the DRAM Chip Select Base address and mask */ | |
273 | - pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); | |
274 | - | |
275 | - debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx, | |
276 | - __func__, index, drbar); | |
277 | - | |
278 | - row_high_limit = ((u32) drbar << 24); | |
279 | -/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | |
280 | - | |
281 | - debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = " | |
282 | - "%#0x \n", mci->mc_idx, __func__, index, | |
283 | - row_high_limit, row_high_limit_last); | |
284 | - | |
285 | - /* Empty row [p.57] */ | |
286 | - if (row_high_limit == row_high_limit_last) | |
287 | - continue; | |
288 | - | |
289 | - row_base = row_high_limit_last; | |
290 | - csrow->first_page = row_base >> PAGE_SHIFT; | |
291 | - csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | |
292 | - csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | |
293 | - /* Error address is top 19 bits - so granularity is * | |
294 | - * 14 bits */ | |
295 | - csrow->grain = 1 << 14; | |
296 | - csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; | |
297 | - /* FIXME - check that this is unknowable with this chipset */ | |
298 | - csrow->dtype = DEV_UNKNOWN; | |
299 | - | |
300 | - /* Mode is global on 82600 */ | |
301 | - csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; | |
302 | - row_high_limit_last = row_high_limit; | |
303 | - } | |
304 | - | |
310 | + r82600_init_csrows(mci, pdev, dramcr); | |
305 | 311 | r82600_get_error_info(mci, &discard); /* clear counters */ |
306 | 312 | |
307 | 313 | /* Here we assume that we will never see multiple instances of this |
... | ... | @@ -324,10 +330,8 @@ |
324 | 330 | return 0; |
325 | 331 | |
326 | 332 | fail: |
327 | - if (mci) | |
328 | - edac_mc_free(mci); | |
329 | - | |
330 | - return rc; | |
333 | + edac_mc_free(mci); | |
334 | + return -ENODEV; | |
331 | 335 | } |
332 | 336 | |
333 | 337 | /* returns count (>= 0), or negative on error */ |