Commit cdec9cb5167ab1113ba9c58e395f664d9d3f9acb

Authored by Dmitry Kasatkin
1 parent 1ea6b8f489

crypto: GnuPG based MPI lib - source files (part 1)

Adds the multi-precision-integer maths library which was originally taken
from GnuPG and ported to the kernel by (among others) David Howells.
This version is taken from Fedora kernel 2.6.32-71.14.1.el6.
The difference is that checkpatch reported errors and warnings have been fixed.

This library is used to implemenet RSA digital signature verification
used in IMA/EVM integrity protection subsystem.

Due to patch size limitation, the patch is divided into 4 parts.

Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@intel.com>

Showing 14 changed files with 2681 additions and 0 deletions Side-by-side Diff

lib/mpi/generic_mpih-add1.c
  1 +/* mpihelp-add_1.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1997, 1998,
  3 + * 2000 Free Software Foundation, Inc.
  4 + *
  5 + * This file is part of GnuPG.
  6 + *
  7 + * GnuPG is free software; you can redistribute it and/or modify
  8 + * it under the terms of the GNU General Public License as published by
  9 + * the Free Software Foundation; either version 2 of the License, or
  10 + * (at your option) any later version.
  11 + *
  12 + * GnuPG is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  20 + *
  21 + * Note: This code is heavily based on the GNU MP Library.
  22 + * Actually it's the same code with only minor changes in the
  23 + * way the data is stored; this is to support the abstraction
  24 + * of an optional secure memory allocation which may be used
  25 + * to avoid revealing of sensitive data due to paging etc.
  26 + * The GNU MP Library itself is published under the LGPL;
  27 + * however I decided to publish this code under the plain GPL.
  28 + */
  29 +
  30 +#include "mpi-internal.h"
  31 +#include "longlong.h"
  32 +
  33 +mpi_limb_t
  34 +mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
  35 + mpi_ptr_t s2_ptr, mpi_size_t size)
  36 +{
  37 + mpi_limb_t x, y, cy;
  38 + mpi_size_t j;
  39 +
  40 + /* The loop counter and index J goes from -SIZE to -1. This way
  41 + the loop becomes faster. */
  42 + j = -size;
  43 +
  44 + /* Offset the base pointers to compensate for the negative indices. */
  45 + s1_ptr -= j;
  46 + s2_ptr -= j;
  47 + res_ptr -= j;
  48 +
  49 + cy = 0;
  50 + do {
  51 + y = s2_ptr[j];
  52 + x = s1_ptr[j];
  53 + y += cy; /* add previous carry to one addend */
  54 + cy = y < cy; /* get out carry from that addition */
  55 + y += x; /* add other addend */
  56 + cy += y < x; /* get out carry from that add, combine */
  57 + res_ptr[j] = y;
  58 + } while (++j);
  59 +
  60 + return cy;
  61 +}
lib/mpi/generic_mpih-lshift.c
  1 +/* mpihelp-lshift.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1998, 2001 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + *
  20 + * Note: This code is heavily based on the GNU MP Library.
  21 + * Actually it's the same code with only minor changes in the
  22 + * way the data is stored; this is to support the abstraction
  23 + * of an optional secure memory allocation which may be used
  24 + * to avoid revealing of sensitive data due to paging etc.
  25 + * The GNU MP Library itself is published under the LGPL;
  26 + * however I decided to publish this code under the plain GPL.
  27 + */
  28 +
  29 +#include "mpi-internal.h"
  30 +
  31 +/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left
  32 + * and store the USIZE least significant digits of the result at WP.
  33 + * Return the bits shifted out from the most significant digit.
  34 + *
  35 + * Argument constraints:
  36 + * 1. 0 < CNT < BITS_PER_MP_LIMB
  37 + * 2. If the result is to be written over the input, WP must be >= UP.
  38 + */
  39 +
  40 +mpi_limb_t
  41 +mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned int cnt)
  42 +{
  43 + mpi_limb_t high_limb, low_limb;
  44 + unsigned sh_1, sh_2;
  45 + mpi_size_t i;
  46 + mpi_limb_t retval;
  47 +
  48 + sh_1 = cnt;
  49 + wp += 1;
  50 + sh_2 = BITS_PER_MPI_LIMB - sh_1;
  51 + i = usize - 1;
  52 + low_limb = up[i];
  53 + retval = low_limb >> sh_2;
  54 + high_limb = low_limb;
  55 + while (--i >= 0) {
  56 + low_limb = up[i];
  57 + wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
  58 + high_limb = low_limb;
  59 + }
  60 + wp[i] = high_limb << sh_1;
  61 +
  62 + return retval;
  63 +}
lib/mpi/generic_mpih-mul1.c
  1 +/* mpihelp-mul_1.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + *
  20 + * Note: This code is heavily based on the GNU MP Library.
  21 + * Actually it's the same code with only minor changes in the
  22 + * way the data is stored; this is to support the abstraction
  23 + * of an optional secure memory allocation which may be used
  24 + * to avoid revealing of sensitive data due to paging etc.
  25 + * The GNU MP Library itself is published under the LGPL;
  26 + * however I decided to publish this code under the plain GPL.
  27 + */
  28 +
  29 +#include "mpi-internal.h"
  30 +#include "longlong.h"
  31 +
  32 +mpi_limb_t
  33 +mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
  34 + mpi_limb_t s2_limb)
  35 +{
  36 + mpi_limb_t cy_limb;
  37 + mpi_size_t j;
  38 + mpi_limb_t prod_high, prod_low;
  39 +
  40 + /* The loop counter and index J goes from -S1_SIZE to -1. This way
  41 + * the loop becomes faster. */
  42 + j = -s1_size;
  43 +
  44 + /* Offset the base pointers to compensate for the negative indices. */
  45 + s1_ptr -= j;
  46 + res_ptr -= j;
  47 +
  48 + cy_limb = 0;
  49 + do {
  50 + umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
  51 + prod_low += cy_limb;
  52 + cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
  53 + res_ptr[j] = prod_low;
  54 + } while (++j);
  55 +
  56 + return cy_limb;
  57 +}
lib/mpi/generic_mpih-mul2.c
  1 +/* mpihelp-mul_2.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + *
  20 + * Note: This code is heavily based on the GNU MP Library.
  21 + * Actually it's the same code with only minor changes in the
  22 + * way the data is stored; this is to support the abstraction
  23 + * of an optional secure memory allocation which may be used
  24 + * to avoid revealing of sensitive data due to paging etc.
  25 + * The GNU MP Library itself is published under the LGPL;
  26 + * however I decided to publish this code under the plain GPL.
  27 + */
  28 +
  29 +#include "mpi-internal.h"
  30 +#include "longlong.h"
  31 +
  32 +mpi_limb_t
  33 +mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
  34 + mpi_size_t s1_size, mpi_limb_t s2_limb)
  35 +{
  36 + mpi_limb_t cy_limb;
  37 + mpi_size_t j;
  38 + mpi_limb_t prod_high, prod_low;
  39 + mpi_limb_t x;
  40 +
  41 + /* The loop counter and index J goes from -SIZE to -1. This way
  42 + * the loop becomes faster. */
  43 + j = -s1_size;
  44 + res_ptr -= j;
  45 + s1_ptr -= j;
  46 +
  47 + cy_limb = 0;
  48 + do {
  49 + umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
  50 +
  51 + prod_low += cy_limb;
  52 + cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
  53 +
  54 + x = res_ptr[j];
  55 + prod_low = x + prod_low;
  56 + cy_limb += prod_low < x ? 1 : 0;
  57 + res_ptr[j] = prod_low;
  58 + } while (++j);
  59 + return cy_limb;
  60 +}
lib/mpi/generic_mpih-mul3.c
  1 +/* mpihelp-mul_3.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + *
  20 + * Note: This code is heavily based on the GNU MP Library.
  21 + * Actually it's the same code with only minor changes in the
  22 + * way the data is stored; this is to support the abstraction
  23 + * of an optional secure memory allocation which may be used
  24 + * to avoid revealing of sensitive data due to paging etc.
  25 + * The GNU MP Library itself is published under the LGPL;
  26 + * however I decided to publish this code under the plain GPL.
  27 + */
  28 +
  29 +#include "mpi-internal.h"
  30 +#include "longlong.h"
  31 +
  32 +mpi_limb_t
  33 +mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
  34 + mpi_size_t s1_size, mpi_limb_t s2_limb)
  35 +{
  36 + mpi_limb_t cy_limb;
  37 + mpi_size_t j;
  38 + mpi_limb_t prod_high, prod_low;
  39 + mpi_limb_t x;
  40 +
  41 + /* The loop counter and index J goes from -SIZE to -1. This way
  42 + * the loop becomes faster. */
  43 + j = -s1_size;
  44 + res_ptr -= j;
  45 + s1_ptr -= j;
  46 +
  47 + cy_limb = 0;
  48 + do {
  49 + umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
  50 +
  51 + prod_low += cy_limb;
  52 + cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
  53 +
  54 + x = res_ptr[j];
  55 + prod_low = x - prod_low;
  56 + cy_limb += prod_low > x ? 1 : 0;
  57 + res_ptr[j] = prod_low;
  58 + } while (++j);
  59 +
  60 + return cy_limb;
  61 +}
lib/mpi/generic_mpih-rshift.c
  1 +/* mpih-rshift.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1998, 1999,
  3 + * 2000, 2001 Free Software Foundation, Inc.
  4 + *
  5 + * This file is part of GNUPG
  6 + *
  7 + * GNUPG is free software; you can redistribute it and/or modify
  8 + * it under the terms of the GNU General Public License as published by
  9 + * the Free Software Foundation; either version 2 of the License, or
  10 + * (at your option) any later version.
  11 + *
  12 + * GNUPG is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  20 + *
  21 + * Note: This code is heavily based on the GNU MP Library.
  22 + * Actually it's the same code with only minor changes in the
  23 + * way the data is stored; this is to support the abstraction
  24 + * of an optional secure memory allocation which may be used
  25 + * to avoid revealing of sensitive data due to paging etc.
  26 + * The GNU MP Library itself is published under the LGPL;
  27 + * however I decided to publish this code under the plain GPL.
  28 + */
  29 +
  30 +#include "mpi-internal.h"
  31 +
  32 +/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right
  33 + * and store the USIZE least significant limbs of the result at WP.
  34 + * The bits shifted out to the right are returned.
  35 + *
  36 + * Argument constraints:
  37 + * 1. 0 < CNT < BITS_PER_MP_LIMB
  38 + * 2. If the result is to be written over the input, WP must be <= UP.
  39 + */
  40 +
  41 +mpi_limb_t
  42 +mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt)
  43 +{
  44 + mpi_limb_t high_limb, low_limb;
  45 + unsigned sh_1, sh_2;
  46 + mpi_size_t i;
  47 + mpi_limb_t retval;
  48 +
  49 + sh_1 = cnt;
  50 + wp -= 1;
  51 + sh_2 = BITS_PER_MPI_LIMB - sh_1;
  52 + high_limb = up[0];
  53 + retval = high_limb << sh_2;
  54 + low_limb = high_limb;
  55 + for (i = 1; i < usize; i++) {
  56 + high_limb = up[i];
  57 + wp[i] = (low_limb >> sh_1) | (high_limb << sh_2);
  58 + low_limb = high_limb;
  59 + }
  60 + wp[i] = low_limb >> sh_1;
  61 +
  62 + return retval;
  63 +}
lib/mpi/generic_mpih-sub1.c
  1 +/* mpihelp-add_2.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + *
  20 + * Note: This code is heavily based on the GNU MP Library.
  21 + * Actually it's the same code with only minor changes in the
  22 + * way the data is stored; this is to support the abstraction
  23 + * of an optional secure memory allocation which may be used
  24 + * to avoid revealing of sensitive data due to paging etc.
  25 + * The GNU MP Library itself is published under the LGPL;
  26 + * however I decided to publish this code under the plain GPL.
  27 + */
  28 +
  29 +#include "mpi-internal.h"
  30 +#include "longlong.h"
  31 +
  32 +mpi_limb_t
  33 +mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
  34 + mpi_ptr_t s2_ptr, mpi_size_t size)
  35 +{
  36 + mpi_limb_t x, y, cy;
  37 + mpi_size_t j;
  38 +
  39 + /* The loop counter and index J goes from -SIZE to -1. This way
  40 + the loop becomes faster. */
  41 + j = -size;
  42 +
  43 + /* Offset the base pointers to compensate for the negative indices. */
  44 + s1_ptr -= j;
  45 + s2_ptr -= j;
  46 + res_ptr -= j;
  47 +
  48 + cy = 0;
  49 + do {
  50 + y = s2_ptr[j];
  51 + x = s1_ptr[j];
  52 + y += cy; /* add previous carry to subtrahend */
  53 + cy = y < cy; /* get out carry from that addition */
  54 + y = x - y; /* main subtract */
  55 + cy += y > x; /* get out carry from the subtract, combine */
  56 + res_ptr[j] = y;
  57 + } while (++j);
  58 +
  59 + return cy;
  60 +}
  1 +/* mpi-bit.c - MPI bit level fucntions
  2 + * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + */
  20 +
  21 +#include "mpi-internal.h"
  22 +#include "longlong.h"
  23 +
  24 +const unsigned char __clz_tab[] = {
  25 + 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
  26 + 5, 5, 5, 5, 5, 5, 5, 5,
  27 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  28 + 6, 6, 6, 6, 6, 6, 6, 6,
  29 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  30 + 7, 7, 7, 7, 7, 7, 7, 7,
  31 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  32 + 7, 7, 7, 7, 7, 7, 7, 7,
  33 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  34 + 8, 8, 8, 8, 8, 8, 8, 8,
  35 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  36 + 8, 8, 8, 8, 8, 8, 8, 8,
  37 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  38 + 8, 8, 8, 8, 8, 8, 8, 8,
  39 + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  40 + 8, 8, 8, 8, 8, 8, 8, 8,
  41 +};
  42 +
  43 +#define A_LIMB_1 ((mpi_limb_t) 1)
  44 +
  45 +/****************
  46 + * Sometimes we have MSL (most significant limbs) which are 0;
  47 + * this is for some reasons not good, so this function removes them.
  48 + */
  49 +void mpi_normalize(MPI a)
  50 +{
  51 + for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
  52 + ;
  53 +}
  54 +
  55 +/****************
  56 + * Return the number of bits in A.
  57 + */
  58 +unsigned mpi_get_nbits(MPI a)
  59 +{
  60 + unsigned n;
  61 +
  62 + mpi_normalize(a);
  63 +
  64 + if (a->nlimbs) {
  65 + mpi_limb_t alimb = a->d[a->nlimbs - 1];
  66 + if (alimb)
  67 + count_leading_zeros(n, alimb);
  68 + else
  69 + n = BITS_PER_MPI_LIMB;
  70 + n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB;
  71 + } else
  72 + n = 0;
  73 + return n;
  74 +}
  75 +EXPORT_SYMBOL_GPL(mpi_get_nbits);
  76 +
  77 +/****************
  78 + * Test whether bit N is set.
  79 + */
  80 +int mpi_test_bit(MPI a, unsigned n)
  81 +{
  82 + unsigned limbno, bitno;
  83 + mpi_limb_t limb;
  84 +
  85 + limbno = n / BITS_PER_MPI_LIMB;
  86 + bitno = n % BITS_PER_MPI_LIMB;
  87 +
  88 + if (limbno >= a->nlimbs)
  89 + return 0; /* too far left: this is a 0 */
  90 + limb = a->d[limbno];
  91 + return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
  92 +}
  93 +
  94 +/****************
  95 + * Set bit N of A.
  96 + */
  97 +int mpi_set_bit(MPI a, unsigned n)
  98 +{
  99 + unsigned limbno, bitno;
  100 +
  101 + limbno = n / BITS_PER_MPI_LIMB;
  102 + bitno = n % BITS_PER_MPI_LIMB;
  103 +
  104 + if (limbno >= a->nlimbs) { /* resize */
  105 + if (a->alloced >= limbno)
  106 + if (mpi_resize(a, limbno + 1) < 0)
  107 + return -ENOMEM;
  108 + a->nlimbs = limbno + 1;
  109 + }
  110 + a->d[limbno] |= (A_LIMB_1 << bitno);
  111 + return 0;
  112 +}
  113 +
  114 +/****************
  115 + * Set bit N of A. and clear all bits above
  116 + */
  117 +int mpi_set_highbit(MPI a, unsigned n)
  118 +{
  119 + unsigned limbno, bitno;
  120 +
  121 + limbno = n / BITS_PER_MPI_LIMB;
  122 + bitno = n % BITS_PER_MPI_LIMB;
  123 +
  124 + if (limbno >= a->nlimbs) { /* resize */
  125 + if (a->alloced >= limbno)
  126 + if (mpi_resize(a, limbno + 1) < 0)
  127 + return -ENOMEM;
  128 + a->nlimbs = limbno + 1;
  129 + }
  130 + a->d[limbno] |= (A_LIMB_1 << bitno);
  131 + for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
  132 + a->d[limbno] &= ~(A_LIMB_1 << bitno);
  133 + a->nlimbs = limbno + 1;
  134 + return 0;
  135 +}
  136 +
  137 +/****************
  138 + * clear bit N of A and all bits above
  139 + */
  140 +void mpi_clear_highbit(MPI a, unsigned n)
  141 +{
  142 + unsigned limbno, bitno;
  143 +
  144 + limbno = n / BITS_PER_MPI_LIMB;
  145 + bitno = n % BITS_PER_MPI_LIMB;
  146 +
  147 + if (limbno >= a->nlimbs)
  148 + return; /* not allocated, so need to clear bits :-) */
  149 +
  150 + for (; bitno < BITS_PER_MPI_LIMB; bitno++)
  151 + a->d[limbno] &= ~(A_LIMB_1 << bitno);
  152 + a->nlimbs = limbno + 1;
  153 +}
  154 +
  155 +/****************
  156 + * Clear bit N of A.
  157 + */
  158 +void mpi_clear_bit(MPI a, unsigned n)
  159 +{
  160 + unsigned limbno, bitno;
  161 +
  162 + limbno = n / BITS_PER_MPI_LIMB;
  163 + bitno = n % BITS_PER_MPI_LIMB;
  164 +
  165 + if (limbno >= a->nlimbs)
  166 + return; /* don't need to clear this bit, it's to far to left */
  167 + a->d[limbno] &= ~(A_LIMB_1 << bitno);
  168 +}
  169 +
  170 +/****************
  171 + * Shift A by N bits to the right
  172 + * FIXME: should use alloc_limb if X and A are same.
  173 + */
  174 +int mpi_rshift(MPI x, MPI a, unsigned n)
  175 +{
  176 + mpi_ptr_t xp;
  177 + mpi_size_t xsize;
  178 +
  179 + xsize = a->nlimbs;
  180 + x->sign = a->sign;
  181 + if (RESIZE_IF_NEEDED(x, (size_t) xsize) < 0)
  182 + return -ENOMEM;
  183 + xp = x->d;
  184 +
  185 + if (xsize) {
  186 + mpihelp_rshift(xp, a->d, xsize, n);
  187 + MPN_NORMALIZE(xp, xsize);
  188 + }
  189 + x->nlimbs = xsize;
  190 + return 0;
  191 +}
  192 +
  193 +/****************
  194 + * Shift A by COUNT limbs to the left
  195 + * This is used only within the MPI library
  196 + */
  197 +int mpi_lshift_limbs(MPI a, unsigned int count)
  198 +{
  199 + mpi_ptr_t ap = a->d;
  200 + int n = a->nlimbs;
  201 + int i;
  202 +
  203 + if (!count || !n)
  204 + return 0;
  205 +
  206 + if (RESIZE_IF_NEEDED(a, n + count) < 0)
  207 + return -ENOMEM;
  208 +
  209 + for (i = n - 1; i >= 0; i--)
  210 + ap[i + count] = ap[i];
  211 + for (i = 0; i < count; i++)
  212 + ap[i] = 0;
  213 + a->nlimbs += count;
  214 + return 0;
  215 +}
  216 +
  217 +/****************
  218 + * Shift A by COUNT limbs to the right
  219 + * This is used only within the MPI library
  220 + */
  221 +void mpi_rshift_limbs(MPI a, unsigned int count)
  222 +{
  223 + mpi_ptr_t ap = a->d;
  224 + mpi_size_t n = a->nlimbs;
  225 + unsigned int i;
  226 +
  227 + if (count >= n) {
  228 + a->nlimbs = 0;
  229 + return;
  230 + }
  231 +
  232 + for (i = 0; i < n - count; i++)
  233 + ap[i] = ap[i + count];
  234 + ap[i] = 0;
  235 + a->nlimbs -= count;
  236 +}
  1 +/* mpi-pow.c - MPI functions
  2 + * Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + *
  20 + * Note: This code is heavily based on the GNU MP Library.
  21 + * Actually it's the same code with only minor changes in the
  22 + * way the data is stored; this is to support the abstraction
  23 + * of an optional secure memory allocation which may be used
  24 + * to avoid revealing of sensitive data due to paging etc.
  25 + * The GNU MP Library itself is published under the LGPL;
  26 + * however I decided to publish this code under the plain GPL.
  27 + */
  28 +
  29 +#include <linux/string.h>
  30 +#include "mpi-internal.h"
  31 +#include "longlong.h"
  32 +
  33 +/****************
  34 + * RES = BASE ^ EXP mod MOD
  35 + */
  36 +int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
  37 +{
  38 + mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
  39 + mpi_ptr_t xp_marker = NULL;
  40 + mpi_ptr_t tspace = NULL;
  41 + mpi_ptr_t rp, ep, mp, bp;
  42 + mpi_size_t esize, msize, bsize, rsize;
  43 + int esign, msign, bsign, rsign;
  44 + mpi_size_t size;
  45 + int mod_shift_cnt;
  46 + int negative_result;
  47 + int assign_rp = 0;
  48 + mpi_size_t tsize = 0; /* to avoid compiler warning */
  49 + /* fixme: we should check that the warning is void */
  50 + int rc = -ENOMEM;
  51 +
  52 + esize = exp->nlimbs;
  53 + msize = mod->nlimbs;
  54 + size = 2 * msize;
  55 + esign = exp->sign;
  56 + msign = mod->sign;
  57 +
  58 + rp = res->d;
  59 + ep = exp->d;
  60 +
  61 + if (!msize)
  62 + msize = 1 / msize; /* provoke a signal */
  63 +
  64 + if (!esize) {
  65 + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
  66 + * depending on if MOD equals 1. */
  67 + rp[0] = 1;
  68 + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
  69 + res->sign = 0;
  70 + goto leave;
  71 + }
  72 +
  73 + /* Normalize MOD (i.e. make its most significant bit set) as required by
  74 + * mpn_divrem. This will make the intermediate values in the calculation
  75 + * slightly larger, but the correct result is obtained after a final
  76 + * reduction using the original MOD value. */
  77 + mp = mp_marker = mpi_alloc_limb_space(msize);
  78 + if (!mp)
  79 + goto enomem;
  80 + count_leading_zeros(mod_shift_cnt, mod->d[msize - 1]);
  81 + if (mod_shift_cnt)
  82 + mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt);
  83 + else
  84 + MPN_COPY(mp, mod->d, msize);
  85 +
  86 + bsize = base->nlimbs;
  87 + bsign = base->sign;
  88 + if (bsize > msize) { /* The base is larger than the module. Reduce it. */
  89 + /* Allocate (BSIZE + 1) with space for remainder and quotient.
  90 + * (The quotient is (bsize - msize + 1) limbs.) */
  91 + bp = bp_marker = mpi_alloc_limb_space(bsize + 1);
  92 + if (!bp)
  93 + goto enomem;
  94 + MPN_COPY(bp, base->d, bsize);
  95 + /* We don't care about the quotient, store it above the remainder,
  96 + * at BP + MSIZE. */
  97 + mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize);
  98 + bsize = msize;
  99 + /* Canonicalize the base, since we are going to multiply with it
  100 + * quite a few times. */
  101 + MPN_NORMALIZE(bp, bsize);
  102 + } else
  103 + bp = base->d;
  104 +
  105 + if (!bsize) {
  106 + res->nlimbs = 0;
  107 + res->sign = 0;
  108 + goto leave;
  109 + }
  110 +
  111 + if (res->alloced < size) {
  112 + /* We have to allocate more space for RES. If any of the input
  113 + * parameters are identical to RES, defer deallocation of the old
  114 + * space. */
  115 + if (rp == ep || rp == mp || rp == bp) {
  116 + rp = mpi_alloc_limb_space(size);
  117 + if (!rp)
  118 + goto enomem;
  119 + assign_rp = 1;
  120 + } else {
  121 + if (mpi_resize(res, size) < 0)
  122 + goto enomem;
  123 + rp = res->d;
  124 + }
  125 + } else { /* Make BASE, EXP and MOD not overlap with RES. */
  126 + if (rp == bp) {
  127 + /* RES and BASE are identical. Allocate temp. space for BASE. */
  128 + BUG_ON(bp_marker);
  129 + bp = bp_marker = mpi_alloc_limb_space(bsize);
  130 + if (!bp)
  131 + goto enomem;
  132 + MPN_COPY(bp, rp, bsize);
  133 + }
  134 + if (rp == ep) {
  135 + /* RES and EXP are identical. Allocate temp. space for EXP. */
  136 + ep = ep_marker = mpi_alloc_limb_space(esize);
  137 + if (!ep)
  138 + goto enomem;
  139 + MPN_COPY(ep, rp, esize);
  140 + }
  141 + if (rp == mp) {
  142 + /* RES and MOD are identical. Allocate temporary space for MOD. */
  143 + BUG_ON(mp_marker);
  144 + mp = mp_marker = mpi_alloc_limb_space(msize);
  145 + if (!mp)
  146 + goto enomem;
  147 + MPN_COPY(mp, rp, msize);
  148 + }
  149 + }
  150 +
  151 + MPN_COPY(rp, bp, bsize);
  152 + rsize = bsize;
  153 + rsign = bsign;
  154 +
  155 + {
  156 + mpi_size_t i;
  157 + mpi_ptr_t xp;
  158 + int c;
  159 + mpi_limb_t e;
  160 + mpi_limb_t carry_limb;
  161 + struct karatsuba_ctx karactx;
  162 +
  163 + xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
  164 + if (!xp)
  165 + goto enomem;
  166 +
  167 + memset(&karactx, 0, sizeof karactx);
  168 + negative_result = (ep[0] & 1) && base->sign;
  169 +
  170 + i = esize - 1;
  171 + e = ep[i];
  172 + count_leading_zeros(c, e);
  173 + e = (e << c) << 1; /* shift the exp bits to the left, lose msb */
  174 + c = BITS_PER_MPI_LIMB - 1 - c;
  175 +
  176 + /* Main loop.
  177 + *
  178 + * Make the result be pointed to alternately by XP and RP. This
  179 + * helps us avoid block copying, which would otherwise be necessary
  180 + * with the overlap restrictions of mpihelp_divmod. With 50% probability
  181 + * the result after this loop will be in the area originally pointed
  182 + * by RP (==RES->d), and with 50% probability in the area originally
  183 + * pointed to by XP.
  184 + */
  185 +
  186 + for (;;) {
  187 + while (c) {
  188 + mpi_ptr_t tp;
  189 + mpi_size_t xsize;
  190 +
  191 + /*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */
  192 + if (rsize < KARATSUBA_THRESHOLD)
  193 + mpih_sqr_n_basecase(xp, rp, rsize);
  194 + else {
  195 + if (!tspace) {
  196 + tsize = 2 * rsize;
  197 + tspace =
  198 + mpi_alloc_limb_space(tsize);
  199 + if (!tspace)
  200 + goto enomem;
  201 + } else if (tsize < (2 * rsize)) {
  202 + mpi_free_limb_space(tspace);
  203 + tsize = 2 * rsize;
  204 + tspace =
  205 + mpi_alloc_limb_space(tsize);
  206 + if (!tspace)
  207 + goto enomem;
  208 + }
  209 + mpih_sqr_n(xp, rp, rsize, tspace);
  210 + }
  211 +
  212 + xsize = 2 * rsize;
  213 + if (xsize > msize) {
  214 + mpihelp_divrem(xp + msize, 0, xp, xsize,
  215 + mp, msize);
  216 + xsize = msize;
  217 + }
  218 +
  219 + tp = rp;
  220 + rp = xp;
  221 + xp = tp;
  222 + rsize = xsize;
  223 +
  224 + if ((mpi_limb_signed_t) e < 0) {
  225 + /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */
  226 + if (bsize < KARATSUBA_THRESHOLD) {
  227 + mpi_limb_t tmp;
  228 + if (mpihelp_mul
  229 + (xp, rp, rsize, bp, bsize,
  230 + &tmp) < 0)
  231 + goto enomem;
  232 + } else {
  233 + if (mpihelp_mul_karatsuba_case
  234 + (xp, rp, rsize, bp, bsize,
  235 + &karactx) < 0)
  236 + goto enomem;
  237 + }
  238 +
  239 + xsize = rsize + bsize;
  240 + if (xsize > msize) {
  241 + mpihelp_divrem(xp + msize, 0,
  242 + xp, xsize, mp,
  243 + msize);
  244 + xsize = msize;
  245 + }
  246 +
  247 + tp = rp;
  248 + rp = xp;
  249 + xp = tp;
  250 + rsize = xsize;
  251 + }
  252 + e <<= 1;
  253 + c--;
  254 + }
  255 +
  256 + i--;
  257 + if (i < 0)
  258 + break;
  259 + e = ep[i];
  260 + c = BITS_PER_MPI_LIMB;
  261 + }
  262 +
  263 + /* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT
  264 + * steps. Adjust the result by reducing it with the original MOD.
  265 + *
  266 + * Also make sure the result is put in RES->d (where it already
  267 + * might be, see above).
  268 + */
  269 + if (mod_shift_cnt) {
  270 + carry_limb =
  271 + mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt);
  272 + rp = res->d;
  273 + if (carry_limb) {
  274 + rp[rsize] = carry_limb;
  275 + rsize++;
  276 + }
  277 + } else {
  278 + MPN_COPY(res->d, rp, rsize);
  279 + rp = res->d;
  280 + }
  281 +
  282 + if (rsize >= msize) {
  283 + mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize);
  284 + rsize = msize;
  285 + }
  286 +
  287 + /* Remove any leading zero words from the result. */
  288 + if (mod_shift_cnt)
  289 + mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
  290 + MPN_NORMALIZE(rp, rsize);
  291 +
  292 + mpihelp_release_karatsuba_ctx(&karactx);
  293 + }
  294 +
  295 + if (negative_result && rsize) {
  296 + if (mod_shift_cnt)
  297 + mpihelp_rshift(mp, mp, msize, mod_shift_cnt);
  298 + mpihelp_sub(rp, mp, msize, rp, rsize);
  299 + rsize = msize;
  300 + rsign = msign;
  301 + MPN_NORMALIZE(rp, rsize);
  302 + }
  303 + res->nlimbs = rsize;
  304 + res->sign = rsign;
  305 +
  306 +leave:
  307 + rc = 0;
  308 +enomem:
  309 + if (assign_rp)
  310 + mpi_assign_limb_space(res, rp, size);
  311 + if (mp_marker)
  312 + mpi_free_limb_space(mp_marker);
  313 + if (bp_marker)
  314 + mpi_free_limb_space(bp_marker);
  315 + if (ep_marker)
  316 + mpi_free_limb_space(ep_marker);
  317 + if (xp_marker)
  318 + mpi_free_limb_space(xp_marker);
  319 + if (tspace)
  320 + mpi_free_limb_space(tspace);
  321 + return rc;
  322 +}
  323 +EXPORT_SYMBOL_GPL(mpi_powm);
  1 +/* mpicoder.c - Coder for the external representation of MPIs
  2 + * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + */
  20 +
  21 +#include "mpi-internal.h"
  22 +
  23 +#define DIM(v) (sizeof(v)/sizeof((v)[0]))
  24 +#define MAX_EXTERN_MPI_BITS 16384
  25 +
  26 +static uint8_t asn[15] = /* Object ID is 1.3.14.3.2.26 */
  27 +{ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03,
  28 + 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14
  29 +};
  30 +
  31 +MPI do_encode_md(const void *sha_buffer, unsigned nbits)
  32 +{
  33 + int nframe = (nbits + 7) / 8;
  34 + uint8_t *frame, *fr_pt;
  35 + int i = 0, n;
  36 + size_t asnlen = DIM(asn);
  37 + MPI a = MPI_NULL;
  38 +
  39 + if (SHA1_DIGEST_LENGTH + asnlen + 4 > nframe)
  40 + pr_info("MPI: can't encode a %d bit MD into a %d bits frame\n",
  41 + (int)(SHA1_DIGEST_LENGTH * 8), (int)nbits);
  42 +
  43 + /* We encode the MD in this way:
  44 + *
  45 + * 0 A PAD(n bytes) 0 ASN(asnlen bytes) MD(len bytes)
  46 + *
  47 + * PAD consists of FF bytes.
  48 + */
  49 + frame = kmalloc(nframe, GFP_KERNEL);
  50 + if (!frame)
  51 + return MPI_NULL;
  52 + n = 0;
  53 + frame[n++] = 0;
  54 + frame[n++] = 1; /* block type */
  55 + i = nframe - SHA1_DIGEST_LENGTH - asnlen - 3;
  56 +
  57 + if (i <= 1) {
  58 + pr_info("MPI: message digest encoding failed\n");
  59 + kfree(frame);
  60 + return a;
  61 + }
  62 +
  63 + memset(frame + n, 0xff, i);
  64 + n += i;
  65 + frame[n++] = 0;
  66 + memcpy(frame + n, &asn, asnlen);
  67 + n += asnlen;
  68 + memcpy(frame + n, sha_buffer, SHA1_DIGEST_LENGTH);
  69 + n += SHA1_DIGEST_LENGTH;
  70 +
  71 + i = nframe;
  72 + fr_pt = frame;
  73 +
  74 + if (n != nframe) {
  75 + printk
  76 + ("MPI: message digest encoding failed, frame length is wrong\n");
  77 + kfree(frame);
  78 + return a;
  79 + }
  80 +
  81 + a = mpi_alloc((nframe + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB);
  82 + mpi_set_buffer(a, frame, nframe, 0);
  83 + kfree(frame);
  84 +
  85 + return a;
  86 +}
  87 +
  88 +MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
  89 +{
  90 + const uint8_t *buffer = xbuffer;
  91 + int i, j;
  92 + unsigned nbits, nbytes, nlimbs, nread = 0;
  93 + mpi_limb_t a;
  94 + MPI val = MPI_NULL;
  95 +
  96 + if (*ret_nread < 2)
  97 + goto leave;
  98 + nbits = buffer[0] << 8 | buffer[1];
  99 +
  100 + if (nbits > MAX_EXTERN_MPI_BITS) {
  101 + pr_info("MPI: mpi too large (%u bits)\n", nbits);
  102 + goto leave;
  103 + }
  104 + buffer += 2;
  105 + nread = 2;
  106 +
  107 + nbytes = (nbits + 7) / 8;
  108 + nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
  109 + val = mpi_alloc(nlimbs);
  110 + if (!val)
  111 + return MPI_NULL;
  112 + i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
  113 + i %= BYTES_PER_MPI_LIMB;
  114 + val->nbits = nbits;
  115 + j = val->nlimbs = nlimbs;
  116 + val->sign = 0;
  117 + for (; j > 0; j--) {
  118 + a = 0;
  119 + for (; i < BYTES_PER_MPI_LIMB; i++) {
  120 + if (++nread > *ret_nread) {
  121 + printk
  122 + ("MPI: mpi larger than buffer nread=%d ret_nread=%d\n",
  123 + nread, *ret_nread);
  124 + goto leave;
  125 + }
  126 + a <<= 8;
  127 + a |= *buffer++;
  128 + }
  129 + i = 0;
  130 + val->d[j - 1] = a;
  131 + }
  132 +
  133 +leave:
  134 + *ret_nread = nread;
  135 + return val;
  136 +}
  137 +EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
  138 +
  139 +/****************
  140 + * Make an mpi from a character string.
  141 + */
  142 +int mpi_fromstr(MPI val, const char *str)
  143 +{
  144 + int hexmode = 0, sign = 0, prepend_zero = 0, i, j, c, c1, c2;
  145 + unsigned nbits, nbytes, nlimbs;
  146 + mpi_limb_t a;
  147 +
  148 + if (*str == '-') {
  149 + sign = 1;
  150 + str++;
  151 + }
  152 + if (*str == '0' && str[1] == 'x')
  153 + hexmode = 1;
  154 + else
  155 + return -EINVAL; /* other bases are not yet supported */
  156 + str += 2;
  157 +
  158 + nbits = strlen(str) * 4;
  159 + if (nbits % 8)
  160 + prepend_zero = 1;
  161 + nbytes = (nbits + 7) / 8;
  162 + nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
  163 + if (val->alloced < nlimbs)
  164 + if (!mpi_resize(val, nlimbs))
  165 + return -ENOMEM;
  166 + i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
  167 + i %= BYTES_PER_MPI_LIMB;
  168 + j = val->nlimbs = nlimbs;
  169 + val->sign = sign;
  170 + for (; j > 0; j--) {
  171 + a = 0;
  172 + for (; i < BYTES_PER_MPI_LIMB; i++) {
  173 + if (prepend_zero) {
  174 + c1 = '0';
  175 + prepend_zero = 0;
  176 + } else
  177 + c1 = *str++;
  178 + assert(c1);
  179 + c2 = *str++;
  180 + assert(c2);
  181 + if (c1 >= '0' && c1 <= '9')
  182 + c = c1 - '0';
  183 + else if (c1 >= 'a' && c1 <= 'f')
  184 + c = c1 - 'a' + 10;
  185 + else if (c1 >= 'A' && c1 <= 'F')
  186 + c = c1 - 'A' + 10;
  187 + else {
  188 + mpi_clear(val);
  189 + return 1;
  190 + }
  191 + c <<= 4;
  192 + if (c2 >= '0' && c2 <= '9')
  193 + c |= c2 - '0';
  194 + else if (c2 >= 'a' && c2 <= 'f')
  195 + c |= c2 - 'a' + 10;
  196 + else if (c2 >= 'A' && c2 <= 'F')
  197 + c |= c2 - 'A' + 10;
  198 + else {
  199 + mpi_clear(val);
  200 + return 1;
  201 + }
  202 + a <<= 8;
  203 + a |= c;
  204 + }
  205 + i = 0;
  206 +
  207 + val->d[j - 1] = a;
  208 + }
  209 +
  210 + return 0;
  211 +}
  212 +EXPORT_SYMBOL_GPL(mpi_fromstr);
  213 +
  214 +/****************
  215 + * Special function to get the low 8 bytes from an mpi.
  216 + * This can be used as a keyid; KEYID is an 2 element array.
  217 + * Return the low 4 bytes.
  218 + */
  219 +u32 mpi_get_keyid(const MPI a, u32 *keyid)
  220 +{
  221 +#if BYTES_PER_MPI_LIMB == 4
  222 + if (keyid) {
  223 + keyid[0] = a->nlimbs >= 2 ? a->d[1] : 0;
  224 + keyid[1] = a->nlimbs >= 1 ? a->d[0] : 0;
  225 + }
  226 + return a->nlimbs >= 1 ? a->d[0] : 0;
  227 +#elif BYTES_PER_MPI_LIMB == 8
  228 + if (keyid) {
  229 + keyid[0] = a->nlimbs ? (u32) (a->d[0] >> 32) : 0;
  230 + keyid[1] = a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0;
  231 + }
  232 + return a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0;
  233 +#else
  234 +#error Make this function work with other LIMB sizes
  235 +#endif
  236 +}
  237 +
  238 +/****************
  239 + * Return an allocated buffer with the MPI (msb first).
  240 + * NBYTES receives the length of this buffer. Caller must free the
  241 + * return string (This function does return a 0 byte buffer with NBYTES
  242 + * set to zero if the value of A is zero. If sign is not NULL, it will
  243 + * be set to the sign of the A.
  244 + */
  245 +void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
  246 +{
  247 + uint8_t *p, *buffer;
  248 + mpi_limb_t alimb;
  249 + int i;
  250 + unsigned int n;
  251 +
  252 + if (sign)
  253 + *sign = a->sign;
  254 + *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB;
  255 + if (!n)
  256 + n++; /* avoid zero length allocation */
  257 + p = buffer = kmalloc(n, GFP_KERNEL);
  258 +
  259 + for (i = a->nlimbs - 1; i >= 0; i--) {
  260 + alimb = a->d[i];
  261 +#if BYTES_PER_MPI_LIMB == 4
  262 + *p++ = alimb >> 24;
  263 + *p++ = alimb >> 16;
  264 + *p++ = alimb >> 8;
  265 + *p++ = alimb;
  266 +#elif BYTES_PER_MPI_LIMB == 8
  267 + *p++ = alimb >> 56;
  268 + *p++ = alimb >> 48;
  269 + *p++ = alimb >> 40;
  270 + *p++ = alimb >> 32;
  271 + *p++ = alimb >> 24;
  272 + *p++ = alimb >> 16;
  273 + *p++ = alimb >> 8;
  274 + *p++ = alimb;
  275 +#else
  276 +#error please implement for this limb size.
  277 +#endif
  278 + }
  279 +
  280 + /* this is sub-optimal but we need to do the shift operation
  281 + * because the caller has to free the returned buffer */
  282 + for (p = buffer; !*p && *nbytes; p++, --*nbytes)
  283 + ;
  284 + if (p != buffer)
  285 + memmove(buffer, p, *nbytes);
  286 +
  287 + return buffer;
  288 +}
  289 +EXPORT_SYMBOL_GPL(mpi_get_buffer);
  290 +
  291 +/****************
  292 + * Use BUFFER to update MPI.
  293 + */
  294 +int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign)
  295 +{
  296 + const uint8_t *buffer = xbuffer, *p;
  297 + mpi_limb_t alimb;
  298 + int nlimbs;
  299 + int i;
  300 +
  301 + nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
  302 + if (RESIZE_IF_NEEDED(a, nlimbs) < 0)
  303 + return -ENOMEM;
  304 + a->sign = sign;
  305 +
  306 + for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) {
  307 +#if BYTES_PER_MPI_LIMB == 4
  308 + alimb = (mpi_limb_t) *p--;
  309 + alimb |= (mpi_limb_t) *p-- << 8;
  310 + alimb |= (mpi_limb_t) *p-- << 16;
  311 + alimb |= (mpi_limb_t) *p-- << 24;
  312 +#elif BYTES_PER_MPI_LIMB == 8
  313 + alimb = (mpi_limb_t) *p--;
  314 + alimb |= (mpi_limb_t) *p-- << 8;
  315 + alimb |= (mpi_limb_t) *p-- << 16;
  316 + alimb |= (mpi_limb_t) *p-- << 24;
  317 + alimb |= (mpi_limb_t) *p-- << 32;
  318 + alimb |= (mpi_limb_t) *p-- << 40;
  319 + alimb |= (mpi_limb_t) *p-- << 48;
  320 + alimb |= (mpi_limb_t) *p-- << 56;
  321 +#else
  322 +#error please implement for this limb size.
  323 +#endif
  324 + a->d[i++] = alimb;
  325 + }
  326 + if (p >= buffer) {
  327 +#if BYTES_PER_MPI_LIMB == 4
  328 + alimb = *p--;
  329 + if (p >= buffer)
  330 + alimb |= (mpi_limb_t) *p-- << 8;
  331 + if (p >= buffer)
  332 + alimb |= (mpi_limb_t) *p-- << 16;
  333 + if (p >= buffer)
  334 + alimb |= (mpi_limb_t) *p-- << 24;
  335 +#elif BYTES_PER_MPI_LIMB == 8
  336 + alimb = (mpi_limb_t) *p--;
  337 + if (p >= buffer)
  338 + alimb |= (mpi_limb_t) *p-- << 8;
  339 + if (p >= buffer)
  340 + alimb |= (mpi_limb_t) *p-- << 16;
  341 + if (p >= buffer)
  342 + alimb |= (mpi_limb_t) *p-- << 24;
  343 + if (p >= buffer)
  344 + alimb |= (mpi_limb_t) *p-- << 32;
  345 + if (p >= buffer)
  346 + alimb |= (mpi_limb_t) *p-- << 40;
  347 + if (p >= buffer)
  348 + alimb |= (mpi_limb_t) *p-- << 48;
  349 + if (p >= buffer)
  350 + alimb |= (mpi_limb_t) *p-- << 56;
  351 +#else
  352 +#error please implement for this limb size.
  353 +#endif
  354 + a->d[i++] = alimb;
  355 + }
  356 + a->nlimbs = i;
  357 +
  358 + if (i != nlimbs) {
  359 + pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i,
  360 + nlimbs);
  361 + BUG();
  362 + }
  363 + return 0;
  364 +}
  365 +EXPORT_SYMBOL_GPL(mpi_set_buffer);
  1 +/* mpihelp-sub.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
  3 + * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
  4 + *
  5 + * This file is part of GnuPG.
  6 + *
  7 + * GnuPG is free software; you can redistribute it and/or modify
  8 + * it under the terms of the GNU General Public License as published by
  9 + * the Free Software Foundation; either version 2 of the License, or
  10 + * (at your option) any later version.
  11 + *
  12 + * GnuPG is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  20 + *
  21 + * Note: This code is heavily based on the GNU MP Library.
  22 + * Actually it's the same code with only minor changes in the
  23 + * way the data is stored; this is to support the abstraction
  24 + * of an optional secure memory allocation which may be used
  25 + * to avoid revealing of sensitive data due to paging etc.
  26 + * The GNU MP Library itself is published under the LGPL;
  27 + * however I decided to publish this code under the plain GPL.
  28 + */
  29 +
  30 +#include "mpi-internal.h"
  31 +
  32 +/****************
  33 + * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE.
  34 + * There are no restrictions on the relative sizes of
  35 + * the two arguments.
  36 + * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2.
  37 + */
  38 +int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size)
  39 +{
  40 + mpi_size_t i;
  41 + mpi_limb_t op1_word, op2_word;
  42 +
  43 + for (i = size - 1; i >= 0; i--) {
  44 + op1_word = op1_ptr[i];
  45 + op2_word = op2_ptr[i];
  46 + if (op1_word != op2_word)
  47 + goto diff;
  48 + }
  49 + return 0;
  50 +
  51 +diff:
  52 + /* This can *not* be simplified to
  53 + * op2_word - op2_word
  54 + * since that expression might give signed overflow. */
  55 + return (op1_word > op2_word) ? 1 : -1;
  56 +}
  1 +/* mpihelp-div.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
  3 + * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
  4 + *
  5 + * This file is part of GnuPG.
  6 + *
  7 + * GnuPG is free software; you can redistribute it and/or modify
  8 + * it under the terms of the GNU General Public License as published by
  9 + * the Free Software Foundation; either version 2 of the License, or
  10 + * (at your option) any later version.
  11 + *
  12 + * GnuPG is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  20 + *
  21 + * Note: This code is heavily based on the GNU MP Library.
  22 + * Actually it's the same code with only minor changes in the
  23 + * way the data is stored; this is to support the abstraction
  24 + * of an optional secure memory allocation which may be used
  25 + * to avoid revealing of sensitive data due to paging etc.
  26 + * The GNU MP Library itself is published under the LGPL;
  27 + * however I decided to publish this code under the plain GPL.
  28 + */
  29 +
  30 +#include "mpi-internal.h"
  31 +#include "longlong.h"
  32 +
  33 +#ifndef UMUL_TIME
  34 +#define UMUL_TIME 1
  35 +#endif
  36 +#ifndef UDIV_TIME
  37 +#define UDIV_TIME UMUL_TIME
  38 +#endif
  39 +
  40 +/* FIXME: We should be using invert_limb (or invert_normalized_limb)
  41 + * here (not udiv_qrnnd).
  42 + */
  43 +
  44 +mpi_limb_t
  45 +mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
  46 + mpi_limb_t divisor_limb)
  47 +{
  48 + mpi_size_t i;
  49 + mpi_limb_t n1, n0, r;
  50 + int dummy;
  51 +
  52 + /* Botch: Should this be handled at all? Rely on callers? */
  53 + if (!dividend_size)
  54 + return 0;
  55 +
  56 + /* If multiplication is much faster than division, and the
  57 + * dividend is large, pre-invert the divisor, and use
  58 + * only multiplications in the inner loop.
  59 + *
  60 + * This test should be read:
  61 + * Does it ever help to use udiv_qrnnd_preinv?
  62 + * && Does what we save compensate for the inversion overhead?
  63 + */
  64 + if (UDIV_TIME > (2 * UMUL_TIME + 6)
  65 + && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
  66 + int normalization_steps;
  67 +
  68 + count_leading_zeros(normalization_steps, divisor_limb);
  69 + if (normalization_steps) {
  70 + mpi_limb_t divisor_limb_inverted;
  71 +
  72 + divisor_limb <<= normalization_steps;
  73 +
  74 + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
  75 + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
  76 + * most significant bit (with weight 2**N) implicit.
  77 + *
  78 + * Special case for DIVISOR_LIMB == 100...000.
  79 + */
  80 + if (!(divisor_limb << 1))
  81 + divisor_limb_inverted = ~(mpi_limb_t) 0;
  82 + else
  83 + udiv_qrnnd(divisor_limb_inverted, dummy,
  84 + -divisor_limb, 0, divisor_limb);
  85 +
  86 + n1 = dividend_ptr[dividend_size - 1];
  87 + r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
  88 +
  89 + /* Possible optimization:
  90 + * if (r == 0
  91 + * && divisor_limb > ((n1 << normalization_steps)
  92 + * | (dividend_ptr[dividend_size - 2] >> ...)))
  93 + * ...one division less...
  94 + */
  95 + for (i = dividend_size - 2; i >= 0; i--) {
  96 + n0 = dividend_ptr[i];
  97 + UDIV_QRNND_PREINV(dummy, r, r,
  98 + ((n1 << normalization_steps)
  99 + | (n0 >>
  100 + (BITS_PER_MPI_LIMB -
  101 + normalization_steps))),
  102 + divisor_limb,
  103 + divisor_limb_inverted);
  104 + n1 = n0;
  105 + }
  106 + UDIV_QRNND_PREINV(dummy, r, r,
  107 + n1 << normalization_steps,
  108 + divisor_limb, divisor_limb_inverted);
  109 + return r >> normalization_steps;
  110 + } else {
  111 + mpi_limb_t divisor_limb_inverted;
  112 +
  113 + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
  114 + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
  115 + * most significant bit (with weight 2**N) implicit.
  116 + *
  117 + * Special case for DIVISOR_LIMB == 100...000.
  118 + */
  119 + if (!(divisor_limb << 1))
  120 + divisor_limb_inverted = ~(mpi_limb_t) 0;
  121 + else
  122 + udiv_qrnnd(divisor_limb_inverted, dummy,
  123 + -divisor_limb, 0, divisor_limb);
  124 +
  125 + i = dividend_size - 1;
  126 + r = dividend_ptr[i];
  127 +
  128 + if (r >= divisor_limb)
  129 + r = 0;
  130 + else
  131 + i--;
  132 +
  133 + for (; i >= 0; i--) {
  134 + n0 = dividend_ptr[i];
  135 + UDIV_QRNND_PREINV(dummy, r, r,
  136 + n0, divisor_limb,
  137 + divisor_limb_inverted);
  138 + }
  139 + return r;
  140 + }
  141 + } else {
  142 + if (UDIV_NEEDS_NORMALIZATION) {
  143 + int normalization_steps;
  144 +
  145 + count_leading_zeros(normalization_steps, divisor_limb);
  146 + if (normalization_steps) {
  147 + divisor_limb <<= normalization_steps;
  148 +
  149 + n1 = dividend_ptr[dividend_size - 1];
  150 + r = n1 >> (BITS_PER_MPI_LIMB -
  151 + normalization_steps);
  152 +
  153 + /* Possible optimization:
  154 + * if (r == 0
  155 + * && divisor_limb > ((n1 << normalization_steps)
  156 + * | (dividend_ptr[dividend_size - 2] >> ...)))
  157 + * ...one division less...
  158 + */
  159 + for (i = dividend_size - 2; i >= 0; i--) {
  160 + n0 = dividend_ptr[i];
  161 + udiv_qrnnd(dummy, r, r,
  162 + ((n1 << normalization_steps)
  163 + | (n0 >>
  164 + (BITS_PER_MPI_LIMB -
  165 + normalization_steps))),
  166 + divisor_limb);
  167 + n1 = n0;
  168 + }
  169 + udiv_qrnnd(dummy, r, r,
  170 + n1 << normalization_steps,
  171 + divisor_limb);
  172 + return r >> normalization_steps;
  173 + }
  174 + }
  175 + /* No normalization needed, either because udiv_qrnnd doesn't require
  176 + * it, or because DIVISOR_LIMB is already normalized. */
  177 + i = dividend_size - 1;
  178 + r = dividend_ptr[i];
  179 +
  180 + if (r >= divisor_limb)
  181 + r = 0;
  182 + else
  183 + i--;
  184 +
  185 + for (; i >= 0; i--) {
  186 + n0 = dividend_ptr[i];
  187 + udiv_qrnnd(dummy, r, r, n0, divisor_limb);
  188 + }
  189 + return r;
  190 + }
  191 +}
  192 +
  193 +/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
  194 + * the NSIZE-DSIZE least significant quotient limbs at QP
  195 + * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
  196 + * non-zero, generate that many fraction bits and append them after the
  197 + * other quotient limbs.
  198 + * Return the most significant limb of the quotient, this is always 0 or 1.
  199 + *
  200 + * Preconditions:
  201 + * 0. NSIZE >= DSIZE.
  202 + * 1. The most significant bit of the divisor must be set.
  203 + * 2. QP must either not overlap with the input operands at all, or
  204 + * QP + DSIZE >= NP must hold true. (This means that it's
  205 + * possible to put the quotient in the high part of NUM, right after the
  206 + * remainder in NUM.
  207 + * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
  208 + */
  209 +
  210 +mpi_limb_t
  211 +mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
  212 + mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize)
  213 +{
  214 + mpi_limb_t most_significant_q_limb = 0;
  215 +
  216 + switch (dsize) {
  217 + case 0:
  218 + /* We are asked to divide by zero, so go ahead and do it! (To make
  219 + the compiler not remove this statement, return the value.) */
  220 + return 1 / dsize;
  221 +
  222 + case 1:
  223 + {
  224 + mpi_size_t i;
  225 + mpi_limb_t n1;
  226 + mpi_limb_t d;
  227 +
  228 + d = dp[0];
  229 + n1 = np[nsize - 1];
  230 +
  231 + if (n1 >= d) {
  232 + n1 -= d;
  233 + most_significant_q_limb = 1;
  234 + }
  235 +
  236 + qp += qextra_limbs;
  237 + for (i = nsize - 2; i >= 0; i--)
  238 + udiv_qrnnd(qp[i], n1, n1, np[i], d);
  239 + qp -= qextra_limbs;
  240 +
  241 + for (i = qextra_limbs - 1; i >= 0; i--)
  242 + udiv_qrnnd(qp[i], n1, n1, 0, d);
  243 +
  244 + np[0] = n1;
  245 + }
  246 + break;
  247 +
  248 + case 2:
  249 + {
  250 + mpi_size_t i;
  251 + mpi_limb_t n1, n0, n2;
  252 + mpi_limb_t d1, d0;
  253 +
  254 + np += nsize - 2;
  255 + d1 = dp[1];
  256 + d0 = dp[0];
  257 + n1 = np[1];
  258 + n0 = np[0];
  259 +
  260 + if (n1 >= d1 && (n1 > d1 || n0 >= d0)) {
  261 + sub_ddmmss(n1, n0, n1, n0, d1, d0);
  262 + most_significant_q_limb = 1;
  263 + }
  264 +
  265 + for (i = qextra_limbs + nsize - 2 - 1; i >= 0; i--) {
  266 + mpi_limb_t q;
  267 + mpi_limb_t r;
  268 +
  269 + if (i >= qextra_limbs)
  270 + np--;
  271 + else
  272 + np[0] = 0;
  273 +
  274 + if (n1 == d1) {
  275 + /* Q should be either 111..111 or 111..110. Need special
  276 + * treatment of this rare case as normal division would
  277 + * give overflow. */
  278 + q = ~(mpi_limb_t) 0;
  279 +
  280 + r = n0 + d1;
  281 + if (r < d1) { /* Carry in the addition? */
  282 + add_ssaaaa(n1, n0, r - d0,
  283 + np[0], 0, d0);
  284 + qp[i] = q;
  285 + continue;
  286 + }
  287 + n1 = d0 - (d0 != 0 ? 1 : 0);
  288 + n0 = -d0;
  289 + } else {
  290 + udiv_qrnnd(q, r, n1, n0, d1);
  291 + umul_ppmm(n1, n0, d0, q);
  292 + }
  293 +
  294 + n2 = np[0];
  295 +q_test:
  296 + if (n1 > r || (n1 == r && n0 > n2)) {
  297 + /* The estimated Q was too large. */
  298 + q--;
  299 + sub_ddmmss(n1, n0, n1, n0, 0, d0);
  300 + r += d1;
  301 + if (r >= d1) /* If not carry, test Q again. */
  302 + goto q_test;
  303 + }
  304 +
  305 + qp[i] = q;
  306 + sub_ddmmss(n1, n0, r, n2, n1, n0);
  307 + }
  308 + np[1] = n1;
  309 + np[0] = n0;
  310 + }
  311 + break;
  312 +
  313 + default:
  314 + {
  315 + mpi_size_t i;
  316 + mpi_limb_t dX, d1, n0;
  317 +
  318 + np += nsize - dsize;
  319 + dX = dp[dsize - 1];
  320 + d1 = dp[dsize - 2];
  321 + n0 = np[dsize - 1];
  322 +
  323 + if (n0 >= dX) {
  324 + if (n0 > dX
  325 + || mpihelp_cmp(np, dp, dsize - 1) >= 0) {
  326 + mpihelp_sub_n(np, np, dp, dsize);
  327 + n0 = np[dsize - 1];
  328 + most_significant_q_limb = 1;
  329 + }
  330 + }
  331 +
  332 + for (i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
  333 + mpi_limb_t q;
  334 + mpi_limb_t n1, n2;
  335 + mpi_limb_t cy_limb;
  336 +
  337 + if (i >= qextra_limbs) {
  338 + np--;
  339 + n2 = np[dsize];
  340 + } else {
  341 + n2 = np[dsize - 1];
  342 + MPN_COPY_DECR(np + 1, np, dsize - 1);
  343 + np[0] = 0;
  344 + }
  345 +
  346 + if (n0 == dX) {
  347 + /* This might over-estimate q, but it's probably not worth
  348 + * the extra code here to find out. */
  349 + q = ~(mpi_limb_t) 0;
  350 + } else {
  351 + mpi_limb_t r;
  352 +
  353 + udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
  354 + umul_ppmm(n1, n0, d1, q);
  355 +
  356 + while (n1 > r
  357 + || (n1 == r
  358 + && n0 > np[dsize - 2])) {
  359 + q--;
  360 + r += dX;
  361 + if (r < dX) /* I.e. "carry in previous addition?" */
  362 + break;
  363 + n1 -= n0 < d1;
  364 + n0 -= d1;
  365 + }
  366 + }
  367 +
  368 + /* Possible optimization: We already have (q * n0) and (1 * n1)
  369 + * after the calculation of q. Taking advantage of that, we
  370 + * could make this loop make two iterations less. */
  371 + cy_limb = mpihelp_submul_1(np, dp, dsize, q);
  372 +
  373 + if (n2 != cy_limb) {
  374 + mpihelp_add_n(np, np, dp, dsize);
  375 + q--;
  376 + }
  377 +
  378 + qp[i] = q;
  379 + n0 = np[dsize - 1];
  380 + }
  381 + }
  382 + }
  383 +
  384 + return most_significant_q_limb;
  385 +}
  386 +
  387 +/****************
  388 + * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
  389 + * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
  390 + * Return the single-limb remainder.
  391 + * There are no constraints on the value of the divisor.
  392 + *
  393 + * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
  394 + */
  395 +
  396 +mpi_limb_t
  397 +mpihelp_divmod_1(mpi_ptr_t quot_ptr,
  398 + mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
  399 + mpi_limb_t divisor_limb)
  400 +{
  401 + mpi_size_t i;
  402 + mpi_limb_t n1, n0, r;
  403 + int dummy;
  404 +
  405 + if (!dividend_size)
  406 + return 0;
  407 +
  408 + /* If multiplication is much faster than division, and the
  409 + * dividend is large, pre-invert the divisor, and use
  410 + * only multiplications in the inner loop.
  411 + *
  412 + * This test should be read:
  413 + * Does it ever help to use udiv_qrnnd_preinv?
  414 + * && Does what we save compensate for the inversion overhead?
  415 + */
  416 + if (UDIV_TIME > (2 * UMUL_TIME + 6)
  417 + && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
  418 + int normalization_steps;
  419 +
  420 + count_leading_zeros(normalization_steps, divisor_limb);
  421 + if (normalization_steps) {
  422 + mpi_limb_t divisor_limb_inverted;
  423 +
  424 + divisor_limb <<= normalization_steps;
  425 +
  426 + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
  427 + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
  428 + * most significant bit (with weight 2**N) implicit.
  429 + */
  430 + /* Special case for DIVISOR_LIMB == 100...000. */
  431 + if (!(divisor_limb << 1))
  432 + divisor_limb_inverted = ~(mpi_limb_t) 0;
  433 + else
  434 + udiv_qrnnd(divisor_limb_inverted, dummy,
  435 + -divisor_limb, 0, divisor_limb);
  436 +
  437 + n1 = dividend_ptr[dividend_size - 1];
  438 + r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
  439 +
  440 + /* Possible optimization:
  441 + * if (r == 0
  442 + * && divisor_limb > ((n1 << normalization_steps)
  443 + * | (dividend_ptr[dividend_size - 2] >> ...)))
  444 + * ...one division less...
  445 + */
  446 + for (i = dividend_size - 2; i >= 0; i--) {
  447 + n0 = dividend_ptr[i];
  448 + UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
  449 + ((n1 << normalization_steps)
  450 + | (n0 >>
  451 + (BITS_PER_MPI_LIMB -
  452 + normalization_steps))),
  453 + divisor_limb,
  454 + divisor_limb_inverted);
  455 + n1 = n0;
  456 + }
  457 + UDIV_QRNND_PREINV(quot_ptr[0], r, r,
  458 + n1 << normalization_steps,
  459 + divisor_limb, divisor_limb_inverted);
  460 + return r >> normalization_steps;
  461 + } else {
  462 + mpi_limb_t divisor_limb_inverted;
  463 +
  464 + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
  465 + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
  466 + * most significant bit (with weight 2**N) implicit.
  467 + */
  468 + /* Special case for DIVISOR_LIMB == 100...000. */
  469 + if (!(divisor_limb << 1))
  470 + divisor_limb_inverted = ~(mpi_limb_t) 0;
  471 + else
  472 + udiv_qrnnd(divisor_limb_inverted, dummy,
  473 + -divisor_limb, 0, divisor_limb);
  474 +
  475 + i = dividend_size - 1;
  476 + r = dividend_ptr[i];
  477 +
  478 + if (r >= divisor_limb)
  479 + r = 0;
  480 + else
  481 + quot_ptr[i--] = 0;
  482 +
  483 + for (; i >= 0; i--) {
  484 + n0 = dividend_ptr[i];
  485 + UDIV_QRNND_PREINV(quot_ptr[i], r, r,
  486 + n0, divisor_limb,
  487 + divisor_limb_inverted);
  488 + }
  489 + return r;
  490 + }
  491 + } else {
  492 + if (UDIV_NEEDS_NORMALIZATION) {
  493 + int normalization_steps;
  494 +
  495 + count_leading_zeros(normalization_steps, divisor_limb);
  496 + if (normalization_steps) {
  497 + divisor_limb <<= normalization_steps;
  498 +
  499 + n1 = dividend_ptr[dividend_size - 1];
  500 + r = n1 >> (BITS_PER_MPI_LIMB -
  501 + normalization_steps);
  502 +
  503 + /* Possible optimization:
  504 + * if (r == 0
  505 + * && divisor_limb > ((n1 << normalization_steps)
  506 + * | (dividend_ptr[dividend_size - 2] >> ...)))
  507 + * ...one division less...
  508 + */
  509 + for (i = dividend_size - 2; i >= 0; i--) {
  510 + n0 = dividend_ptr[i];
  511 + udiv_qrnnd(quot_ptr[i + 1], r, r,
  512 + ((n1 << normalization_steps)
  513 + | (n0 >>
  514 + (BITS_PER_MPI_LIMB -
  515 + normalization_steps))),
  516 + divisor_limb);
  517 + n1 = n0;
  518 + }
  519 + udiv_qrnnd(quot_ptr[0], r, r,
  520 + n1 << normalization_steps,
  521 + divisor_limb);
  522 + return r >> normalization_steps;
  523 + }
  524 + }
  525 + /* No normalization needed, either because udiv_qrnnd doesn't require
  526 + * it, or because DIVISOR_LIMB is already normalized. */
  527 + i = dividend_size - 1;
  528 + r = dividend_ptr[i];
  529 +
  530 + if (r >= divisor_limb)
  531 + r = 0;
  532 + else
  533 + quot_ptr[i--] = 0;
  534 +
  535 + for (; i >= 0; i--) {
  536 + n0 = dividend_ptr[i];
  537 + udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
  538 + }
  539 + return r;
  540 + }
  541 +}
  1 +/* mpihelp-mul.c - MPI helper functions
  2 + * Copyright (C) 1994, 1996, 1998, 1999,
  3 + * 2000 Free Software Foundation, Inc.
  4 + *
  5 + * This file is part of GnuPG.
  6 + *
  7 + * GnuPG is free software; you can redistribute it and/or modify
  8 + * it under the terms of the GNU General Public License as published by
  9 + * the Free Software Foundation; either version 2 of the License, or
  10 + * (at your option) any later version.
  11 + *
  12 + * GnuPG is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  20 + *
  21 + * Note: This code is heavily based on the GNU MP Library.
  22 + * Actually it's the same code with only minor changes in the
  23 + * way the data is stored; this is to support the abstraction
  24 + * of an optional secure memory allocation which may be used
  25 + * to avoid revealing of sensitive data due to paging etc.
  26 + * The GNU MP Library itself is published under the LGPL;
  27 + * however I decided to publish this code under the plain GPL.
  28 + */
  29 +
  30 +#include <linux/string.h>
  31 +#include "mpi-internal.h"
  32 +#include "longlong.h"
  33 +
  34 +#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
  35 + do { \
  36 + if ((size) < KARATSUBA_THRESHOLD) \
  37 + mul_n_basecase(prodp, up, vp, size); \
  38 + else \
  39 + mul_n(prodp, up, vp, size, tspace); \
  40 + } while (0);
  41 +
  42 +#define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
  43 + do { \
  44 + if ((size) < KARATSUBA_THRESHOLD) \
  45 + mpih_sqr_n_basecase(prodp, up, size); \
  46 + else \
  47 + mpih_sqr_n(prodp, up, size, tspace); \
  48 + } while (0);
  49 +
  50 +/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
  51 + * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
  52 + * always stored. Return the most significant limb.
  53 + *
  54 + * Argument constraints:
  55 + * 1. PRODP != UP and PRODP != VP, i.e. the destination
  56 + * must be distinct from the multiplier and the multiplicand.
  57 + *
  58 + *
  59 + * Handle simple cases with traditional multiplication.
  60 + *
  61 + * This is the most critical code of multiplication. All multiplies rely
  62 + * on this, both small and huge. Small ones arrive here immediately. Huge
  63 + * ones arrive here as this is the base case for Karatsuba's recursive
  64 + * algorithm below.
  65 + */
  66 +
  67 +static mpi_limb_t
  68 +mul_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
  69 +{
  70 + mpi_size_t i;
  71 + mpi_limb_t cy;
  72 + mpi_limb_t v_limb;
  73 +
  74 + /* Multiply by the first limb in V separately, as the result can be
  75 + * stored (not added) to PROD. We also avoid a loop for zeroing. */
  76 + v_limb = vp[0];
  77 + if (v_limb <= 1) {
  78 + if (v_limb == 1)
  79 + MPN_COPY(prodp, up, size);
  80 + else
  81 + MPN_ZERO(prodp, size);
  82 + cy = 0;
  83 + } else
  84 + cy = mpihelp_mul_1(prodp, up, size, v_limb);
  85 +
  86 + prodp[size] = cy;
  87 + prodp++;
  88 +
  89 + /* For each iteration in the outer loop, multiply one limb from
  90 + * U with one limb from V, and add it to PROD. */
  91 + for (i = 1; i < size; i++) {
  92 + v_limb = vp[i];
  93 + if (v_limb <= 1) {
  94 + cy = 0;
  95 + if (v_limb == 1)
  96 + cy = mpihelp_add_n(prodp, prodp, up, size);
  97 + } else
  98 + cy = mpihelp_addmul_1(prodp, up, size, v_limb);
  99 +
  100 + prodp[size] = cy;
  101 + prodp++;
  102 + }
  103 +
  104 + return cy;
  105 +}
  106 +
  107 +static void
  108 +mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
  109 + mpi_size_t size, mpi_ptr_t tspace)
  110 +{
  111 + if (size & 1) {
  112 + /* The size is odd, and the code below doesn't handle that.
  113 + * Multiply the least significant (size - 1) limbs with a recursive
  114 + * call, and handle the most significant limb of S1 and S2
  115 + * separately.
  116 + * A slightly faster way to do this would be to make the Karatsuba
  117 + * code below behave as if the size were even, and let it check for
  118 + * odd size in the end. I.e., in essence move this code to the end.
  119 + * Doing so would save us a recursive call, and potentially make the
  120 + * stack grow a lot less.
  121 + */
  122 + mpi_size_t esize = size - 1; /* even size */
  123 + mpi_limb_t cy_limb;
  124 +
  125 + MPN_MUL_N_RECURSE(prodp, up, vp, esize, tspace);
  126 + cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, vp[esize]);
  127 + prodp[esize + esize] = cy_limb;
  128 + cy_limb = mpihelp_addmul_1(prodp + esize, vp, size, up[esize]);
  129 + prodp[esize + size] = cy_limb;
  130 + } else {
  131 + /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
  132 + *
  133 + * Split U in two pieces, U1 and U0, such that
  134 + * U = U0 + U1*(B**n),
  135 + * and V in V1 and V0, such that
  136 + * V = V0 + V1*(B**n).
  137 + *
  138 + * UV is then computed recursively using the identity
  139 + *
  140 + * 2n n n n
  141 + * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
  142 + * 1 1 1 0 0 1 0 0
  143 + *
  144 + * Where B = 2**BITS_PER_MP_LIMB.
  145 + */
  146 + mpi_size_t hsize = size >> 1;
  147 + mpi_limb_t cy;
  148 + int negflg;
  149 +
  150 + /* Product H. ________________ ________________
  151 + * |_____U1 x V1____||____U0 x V0_____|
  152 + * Put result in upper part of PROD and pass low part of TSPACE
  153 + * as new TSPACE.
  154 + */
  155 + MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize,
  156 + tspace);
  157 +
  158 + /* Product M. ________________
  159 + * |_(U1-U0)(V0-V1)_|
  160 + */
  161 + if (mpihelp_cmp(up + hsize, up, hsize) >= 0) {
  162 + mpihelp_sub_n(prodp, up + hsize, up, hsize);
  163 + negflg = 0;
  164 + } else {
  165 + mpihelp_sub_n(prodp, up, up + hsize, hsize);
  166 + negflg = 1;
  167 + }
  168 + if (mpihelp_cmp(vp + hsize, vp, hsize) >= 0) {
  169 + mpihelp_sub_n(prodp + hsize, vp + hsize, vp, hsize);
  170 + negflg ^= 1;
  171 + } else {
  172 + mpihelp_sub_n(prodp + hsize, vp, vp + hsize, hsize);
  173 + /* No change of NEGFLG. */
  174 + }
  175 + /* Read temporary operands from low part of PROD.
  176 + * Put result in low part of TSPACE using upper part of TSPACE
  177 + * as new TSPACE.
  178 + */
  179 + MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize,
  180 + tspace + size);
  181 +
  182 + /* Add/copy product H. */
  183 + MPN_COPY(prodp + hsize, prodp + size, hsize);
  184 + cy = mpihelp_add_n(prodp + size, prodp + size,
  185 + prodp + size + hsize, hsize);
  186 +
  187 + /* Add product M (if NEGFLG M is a negative number) */
  188 + if (negflg)
  189 + cy -=
  190 + mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace,
  191 + size);
  192 + else
  193 + cy +=
  194 + mpihelp_add_n(prodp + hsize, prodp + hsize, tspace,
  195 + size);
  196 +
  197 + /* Product L. ________________ ________________
  198 + * |________________||____U0 x V0_____|
  199 + * Read temporary operands from low part of PROD.
  200 + * Put result in low part of TSPACE using upper part of TSPACE
  201 + * as new TSPACE.
  202 + */
  203 + MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
  204 +
  205 + /* Add/copy Product L (twice) */
  206 +
  207 + cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
  208 + if (cy)
  209 + mpihelp_add_1(prodp + hsize + size,
  210 + prodp + hsize + size, hsize, cy);
  211 +
  212 + MPN_COPY(prodp, tspace, hsize);
  213 + cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize,
  214 + hsize);
  215 + if (cy)
  216 + mpihelp_add_1(prodp + size, prodp + size, size, 1);
  217 + }
  218 +}
  219 +
  220 +void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size)
  221 +{
  222 + mpi_size_t i;
  223 + mpi_limb_t cy_limb;
  224 + mpi_limb_t v_limb;
  225 +
  226 + /* Multiply by the first limb in V separately, as the result can be
  227 + * stored (not added) to PROD. We also avoid a loop for zeroing. */
  228 + v_limb = up[0];
  229 + if (v_limb <= 1) {
  230 + if (v_limb == 1)
  231 + MPN_COPY(prodp, up, size);
  232 + else
  233 + MPN_ZERO(prodp, size);
  234 + cy_limb = 0;
  235 + } else
  236 + cy_limb = mpihelp_mul_1(prodp, up, size, v_limb);
  237 +
  238 + prodp[size] = cy_limb;
  239 + prodp++;
  240 +
  241 + /* For each iteration in the outer loop, multiply one limb from
  242 + * U with one limb from V, and add it to PROD. */
  243 + for (i = 1; i < size; i++) {
  244 + v_limb = up[i];
  245 + if (v_limb <= 1) {
  246 + cy_limb = 0;
  247 + if (v_limb == 1)
  248 + cy_limb = mpihelp_add_n(prodp, prodp, up, size);
  249 + } else
  250 + cy_limb = mpihelp_addmul_1(prodp, up, size, v_limb);
  251 +
  252 + prodp[size] = cy_limb;
  253 + prodp++;
  254 + }
  255 +}
  256 +
  257 +void
  258 +mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
  259 +{
  260 + if (size & 1) {
  261 + /* The size is odd, and the code below doesn't handle that.
  262 + * Multiply the least significant (size - 1) limbs with a recursive
  263 + * call, and handle the most significant limb of S1 and S2
  264 + * separately.
  265 + * A slightly faster way to do this would be to make the Karatsuba
  266 + * code below behave as if the size were even, and let it check for
  267 + * odd size in the end. I.e., in essence move this code to the end.
  268 + * Doing so would save us a recursive call, and potentially make the
  269 + * stack grow a lot less.
  270 + */
  271 + mpi_size_t esize = size - 1; /* even size */
  272 + mpi_limb_t cy_limb;
  273 +
  274 + MPN_SQR_N_RECURSE(prodp, up, esize, tspace);
  275 + cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, up[esize]);
  276 + prodp[esize + esize] = cy_limb;
  277 + cy_limb = mpihelp_addmul_1(prodp + esize, up, size, up[esize]);
  278 +
  279 + prodp[esize + size] = cy_limb;
  280 + } else {
  281 + mpi_size_t hsize = size >> 1;
  282 + mpi_limb_t cy;
  283 +
  284 + /* Product H. ________________ ________________
  285 + * |_____U1 x U1____||____U0 x U0_____|
  286 + * Put result in upper part of PROD and pass low part of TSPACE
  287 + * as new TSPACE.
  288 + */
  289 + MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
  290 +
  291 + /* Product M. ________________
  292 + * |_(U1-U0)(U0-U1)_|
  293 + */
  294 + if (mpihelp_cmp(up + hsize, up, hsize) >= 0)
  295 + mpihelp_sub_n(prodp, up + hsize, up, hsize);
  296 + else
  297 + mpihelp_sub_n(prodp, up, up + hsize, hsize);
  298 +
  299 + /* Read temporary operands from low part of PROD.
  300 + * Put result in low part of TSPACE using upper part of TSPACE
  301 + * as new TSPACE. */
  302 + MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
  303 +
  304 + /* Add/copy product H */
  305 + MPN_COPY(prodp + hsize, prodp + size, hsize);
  306 + cy = mpihelp_add_n(prodp + size, prodp + size,
  307 + prodp + size + hsize, hsize);
  308 +
  309 + /* Add product M (if NEGFLG M is a negative number). */
  310 + cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size);
  311 +
  312 + /* Product L. ________________ ________________
  313 + * |________________||____U0 x U0_____|
  314 + * Read temporary operands from low part of PROD.
  315 + * Put result in low part of TSPACE using upper part of TSPACE
  316 + * as new TSPACE. */
  317 + MPN_SQR_N_RECURSE(tspace, up, hsize, tspace + size);
  318 +
  319 + /* Add/copy Product L (twice). */
  320 + cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
  321 + if (cy)
  322 + mpihelp_add_1(prodp + hsize + size,
  323 + prodp + hsize + size, hsize, cy);
  324 +
  325 + MPN_COPY(prodp, tspace, hsize);
  326 + cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize,
  327 + hsize);
  328 + if (cy)
  329 + mpihelp_add_1(prodp + size, prodp + size, size, 1);
  330 + }
  331 +}
  332 +
  333 +/* This should be made into an inline function in gmp.h. */
  334 +int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
  335 +{
  336 + if (up == vp) {
  337 + if (size < KARATSUBA_THRESHOLD)
  338 + mpih_sqr_n_basecase(prodp, up, size);
  339 + else {
  340 + mpi_ptr_t tspace;
  341 + tspace = mpi_alloc_limb_space(2 * size);
  342 + if (!tspace)
  343 + return -ENOMEM;
  344 + mpih_sqr_n(prodp, up, size, tspace);
  345 + mpi_free_limb_space(tspace);
  346 + }
  347 + } else {
  348 + if (size < KARATSUBA_THRESHOLD)
  349 + mul_n_basecase(prodp, up, vp, size);
  350 + else {
  351 + mpi_ptr_t tspace;
  352 + tspace = mpi_alloc_limb_space(2 * size);
  353 + if (!tspace)
  354 + return -ENOMEM;
  355 + mul_n(prodp, up, vp, size, tspace);
  356 + mpi_free_limb_space(tspace);
  357 + }
  358 + }
  359 +
  360 + return 0;
  361 +}
  362 +
  363 +int
  364 +mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
  365 + mpi_ptr_t up, mpi_size_t usize,
  366 + mpi_ptr_t vp, mpi_size_t vsize,
  367 + struct karatsuba_ctx *ctx)
  368 +{
  369 + mpi_limb_t cy;
  370 +
  371 + if (!ctx->tspace || ctx->tspace_size < vsize) {
  372 + if (ctx->tspace)
  373 + mpi_free_limb_space(ctx->tspace);
  374 + ctx->tspace = mpi_alloc_limb_space(2 * vsize);
  375 + if (!ctx->tspace)
  376 + return -ENOMEM;
  377 + ctx->tspace_size = vsize;
  378 + }
  379 +
  380 + MPN_MUL_N_RECURSE(prodp, up, vp, vsize, ctx->tspace);
  381 +
  382 + prodp += vsize;
  383 + up += vsize;
  384 + usize -= vsize;
  385 + if (usize >= vsize) {
  386 + if (!ctx->tp || ctx->tp_size < vsize) {
  387 + if (ctx->tp)
  388 + mpi_free_limb_space(ctx->tp);
  389 + ctx->tp = mpi_alloc_limb_space(2 * vsize);
  390 + if (!ctx->tp) {
  391 + if (ctx->tspace)
  392 + mpi_free_limb_space(ctx->tspace);
  393 + ctx->tspace = NULL;
  394 + return -ENOMEM;
  395 + }
  396 + ctx->tp_size = vsize;
  397 + }
  398 +
  399 + do {
  400 + MPN_MUL_N_RECURSE(ctx->tp, up, vp, vsize, ctx->tspace);
  401 + cy = mpihelp_add_n(prodp, prodp, ctx->tp, vsize);
  402 + mpihelp_add_1(prodp + vsize, ctx->tp + vsize, vsize,
  403 + cy);
  404 + prodp += vsize;
  405 + up += vsize;
  406 + usize -= vsize;
  407 + } while (usize >= vsize);
  408 + }
  409 +
  410 + if (usize) {
  411 + if (usize < KARATSUBA_THRESHOLD) {
  412 + mpi_limb_t tmp;
  413 + if (mpihelp_mul(ctx->tspace, vp, vsize, up, usize, &tmp)
  414 + < 0)
  415 + return -ENOMEM;
  416 + } else {
  417 + if (!ctx->next) {
  418 + ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL);
  419 + if (!ctx->next)
  420 + return -ENOMEM;
  421 + }
  422 + if (mpihelp_mul_karatsuba_case(ctx->tspace,
  423 + vp, vsize,
  424 + up, usize,
  425 + ctx->next) < 0)
  426 + return -ENOMEM;
  427 + }
  428 +
  429 + cy = mpihelp_add_n(prodp, prodp, ctx->tspace, vsize);
  430 + mpihelp_add_1(prodp + vsize, ctx->tspace + vsize, usize, cy);
  431 + }
  432 +
  433 + return 0;
  434 +}
  435 +
  436 +void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx)
  437 +{
  438 + struct karatsuba_ctx *ctx2;
  439 +
  440 + if (ctx->tp)
  441 + mpi_free_limb_space(ctx->tp);
  442 + if (ctx->tspace)
  443 + mpi_free_limb_space(ctx->tspace);
  444 + for (ctx = ctx->next; ctx; ctx = ctx2) {
  445 + ctx2 = ctx->next;
  446 + if (ctx->tp)
  447 + mpi_free_limb_space(ctx->tp);
  448 + if (ctx->tspace)
  449 + mpi_free_limb_space(ctx->tspace);
  450 + kfree(ctx);
  451 + }
  452 +}
  453 +
  454 +/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
  455 + * and v (pointed to by VP, with VSIZE limbs), and store the result at
  456 + * PRODP. USIZE + VSIZE limbs are always stored, but if the input
  457 + * operands are normalized. Return the most significant limb of the
  458 + * result.
  459 + *
  460 + * NOTE: The space pointed to by PRODP is overwritten before finished
  461 + * with U and V, so overlap is an error.
  462 + *
  463 + * Argument constraints:
  464 + * 1. USIZE >= VSIZE.
  465 + * 2. PRODP != UP and PRODP != VP, i.e. the destination
  466 + * must be distinct from the multiplier and the multiplicand.
  467 + */
  468 +
  469 +int
  470 +mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
  471 + mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result)
  472 +{
  473 + mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
  474 + mpi_limb_t cy;
  475 + struct karatsuba_ctx ctx;
  476 +
  477 + if (vsize < KARATSUBA_THRESHOLD) {
  478 + mpi_size_t i;
  479 + mpi_limb_t v_limb;
  480 +
  481 + if (!vsize) {
  482 + *_result = 0;
  483 + return 0;
  484 + }
  485 +
  486 + /* Multiply by the first limb in V separately, as the result can be
  487 + * stored (not added) to PROD. We also avoid a loop for zeroing. */
  488 + v_limb = vp[0];
  489 + if (v_limb <= 1) {
  490 + if (v_limb == 1)
  491 + MPN_COPY(prodp, up, usize);
  492 + else
  493 + MPN_ZERO(prodp, usize);
  494 + cy = 0;
  495 + } else
  496 + cy = mpihelp_mul_1(prodp, up, usize, v_limb);
  497 +
  498 + prodp[usize] = cy;
  499 + prodp++;
  500 +
  501 + /* For each iteration in the outer loop, multiply one limb from
  502 + * U with one limb from V, and add it to PROD. */
  503 + for (i = 1; i < vsize; i++) {
  504 + v_limb = vp[i];
  505 + if (v_limb <= 1) {
  506 + cy = 0;
  507 + if (v_limb == 1)
  508 + cy = mpihelp_add_n(prodp, prodp, up,
  509 + usize);
  510 + } else
  511 + cy = mpihelp_addmul_1(prodp, up, usize, v_limb);
  512 +
  513 + prodp[usize] = cy;
  514 + prodp++;
  515 + }
  516 +
  517 + *_result = cy;
  518 + return 0;
  519 + }
  520 +
  521 + memset(&ctx, 0, sizeof ctx);
  522 + if (mpihelp_mul_karatsuba_case(prodp, up, usize, vp, vsize, &ctx) < 0)
  523 + return -ENOMEM;
  524 + mpihelp_release_karatsuba_ctx(&ctx);
  525 + *_result = *prod_endp;
  526 + return 0;
  527 +}
  1 +/* mpiutil.ac - Utility functions for MPI
  2 + * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
  3 + *
  4 + * This file is part of GnuPG.
  5 + *
  6 + * GnuPG is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License as published by
  8 + * the Free Software Foundation; either version 2 of the License, or
  9 + * (at your option) any later version.
  10 + *
  11 + * GnuPG is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
  19 + */
  20 +
  21 +#include "mpi-internal.h"
  22 +
  23 +/****************
  24 + * Note: It was a bad idea to use the number of limbs to allocate
  25 + * because on a alpha the limbs are large but we normally need
  26 + * integers of n bits - So we should chnage this to bits (or bytes).
  27 + *
  28 + * But mpi_alloc is used in a lot of places :-)
  29 + */
  30 +MPI mpi_alloc(unsigned nlimbs)
  31 +{
  32 + MPI a;
  33 +
  34 + a = kmalloc(sizeof *a, GFP_KERNEL);
  35 + if (!a)
  36 + return a;
  37 +
  38 + if (nlimbs) {
  39 + a->d = mpi_alloc_limb_space(nlimbs);
  40 + if (!a->d) {
  41 + kfree(a);
  42 + return NULL;
  43 + }
  44 + } else {
  45 + a->d = NULL;
  46 + }
  47 +
  48 + a->alloced = nlimbs;
  49 + a->nlimbs = 0;
  50 + a->sign = 0;
  51 + a->flags = 0;
  52 + a->nbits = 0;
  53 + return a;
  54 +}
  55 +EXPORT_SYMBOL_GPL(mpi_alloc);
  56 +
  57 +mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs)
  58 +{
  59 + size_t len = nlimbs * sizeof(mpi_limb_t);
  60 +
  61 + return kmalloc(len, GFP_KERNEL);
  62 +}
  63 +
  64 +void mpi_free_limb_space(mpi_ptr_t a)
  65 +{
  66 + if (!a)
  67 + return;
  68 +
  69 + kfree(a);
  70 +}
  71 +
  72 +void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
  73 +{
  74 + mpi_free_limb_space(a->d);
  75 + a->d = ap;
  76 + a->alloced = nlimbs;
  77 +}
  78 +
  79 +/****************
  80 + * Resize the array of A to NLIMBS. the additional space is cleared
  81 + * (set to 0) [done by m_realloc()]
  82 + */
  83 +int mpi_resize(MPI a, unsigned nlimbs)
  84 +{
  85 + void *p;
  86 +
  87 + if (nlimbs <= a->alloced)
  88 + return 0; /* no need to do it */
  89 +
  90 + if (a->d) {
  91 + p = kmalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
  92 + if (!p)
  93 + return -ENOMEM;
  94 + memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
  95 + kfree(a->d);
  96 + a->d = p;
  97 + } else {
  98 + a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
  99 + if (!a->d)
  100 + return -ENOMEM;
  101 + }
  102 + a->alloced = nlimbs;
  103 + return 0;
  104 +}
  105 +
  106 +void mpi_clear(MPI a)
  107 +{
  108 + a->nlimbs = 0;
  109 + a->nbits = 0;
  110 + a->flags = 0;
  111 +}
  112 +
  113 +void mpi_free(MPI a)
  114 +{
  115 + if (!a)
  116 + return;
  117 +
  118 + if (a->flags & 4)
  119 + kfree(a->d);
  120 + else
  121 + mpi_free_limb_space(a->d);
  122 +
  123 + if (a->flags & ~7)
  124 + pr_info("invalid flag value in mpi\n");
  125 + kfree(a);
  126 +}
  127 +EXPORT_SYMBOL_GPL(mpi_free);
  128 +
  129 +/****************
  130 + * Note: This copy function should not interpret the MPI
  131 + * but copy it transparently.
  132 + */
  133 +int mpi_copy(MPI *copied, const MPI a)
  134 +{
  135 + size_t i;
  136 + MPI b;
  137 +
  138 + *copied = MPI_NULL;
  139 +
  140 + if (a) {
  141 + b = mpi_alloc(a->nlimbs);
  142 + if (!b)
  143 + return -ENOMEM;
  144 +
  145 + b->nlimbs = a->nlimbs;
  146 + b->sign = a->sign;
  147 + b->flags = a->flags;
  148 + b->nbits = a->nbits;
  149 +
  150 + for (i = 0; i < b->nlimbs; i++)
  151 + b->d[i] = a->d[i];
  152 +
  153 + *copied = b;
  154 + }
  155 +
  156 + return 0;
  157 +}
  158 +
  159 +int mpi_set(MPI w, const MPI u)
  160 +{
  161 + mpi_ptr_t wp, up;
  162 + mpi_size_t usize = u->nlimbs;
  163 + int usign = u->sign;
  164 +
  165 + if (RESIZE_IF_NEEDED(w, (size_t) usize) < 0)
  166 + return -ENOMEM;
  167 +
  168 + wp = w->d;
  169 + up = u->d;
  170 + MPN_COPY(wp, up, usize);
  171 + w->nlimbs = usize;
  172 + w->nbits = u->nbits;
  173 + w->flags = u->flags;
  174 + w->sign = usign;
  175 + return 0;
  176 +}
  177 +
  178 +int mpi_set_ui(MPI w, unsigned long u)
  179 +{
  180 + if (RESIZE_IF_NEEDED(w, 1) < 0)
  181 + return -ENOMEM;
  182 + w->d[0] = u;
  183 + w->nlimbs = u ? 1 : 0;
  184 + w->sign = 0;
  185 + w->nbits = 0;
  186 + w->flags = 0;
  187 + return 0;
  188 +}
  189 +
  190 +MPI mpi_alloc_set_ui(unsigned long u)
  191 +{
  192 + MPI w = mpi_alloc(1);
  193 + if (!w)
  194 + return w;
  195 + w->d[0] = u;
  196 + w->nlimbs = u ? 1 : 0;
  197 + w->sign = 0;
  198 + return w;
  199 +}
  200 +
  201 +void mpi_swap(MPI a, MPI b)
  202 +{
  203 + struct gcry_mpi tmp;
  204 +
  205 + tmp = *a;
  206 + *a = *b;
  207 + *b = tmp;
  208 +}