#include #include #include ////////////////////////////////////////////////////////////////////////// //begin old stuff NTL_START_IMPL static void ExactDiv(ZZ& qq, const ZZ& a, const ZZ& b) { static ZZ q, r; DivRem(q, r, a, b); if (!IsZero(r)) { cerr << "a = " << a << "\n"; cerr << "b = " << b << "\n"; Error("ExactDiv: nonzero remainder"); } qq = q; } static void BalDiv(ZZ& q, const ZZ& a, const ZZ& d) // rounds a/d to nearest integer, breaking ties // by rounding towards zero. Assumes d > 0. { static ZZ r; DivRem(q, r, a, d); add(r, r, r); long cmp = compare(r, d); if (cmp > 0 || (cmp == 0 && q < 0)) add(q, q, 1); } static void MulAddDiv(ZZ& c, const ZZ& c1, const ZZ& c2, const ZZ& x, const ZZ& y, const ZZ& z) // c = (x*c1 + y*c2)/z { static ZZ t1, t2; mul(t1, x, c1); mul(t2, y, c2); add(t1, t1, t2); ExactDiv(c, t1, z); } static void MulSubDiv(ZZ& c, const ZZ& c1, const ZZ& c2, const ZZ& x, const ZZ& y, const ZZ& z) // c = (x*c1 - y*c2)/z { static ZZ t1, t2; mul(t1, x, c1); mul(t2, y, c2); sub(t1, t1, t2); ExactDiv(c, t1, z); } #if 0 static void MulSubDiv(vec_ZZ& c, const vec_ZZ& c1, const vec_ZZ& c2, const ZZ& x, const ZZ& y, const ZZ& z) // c = (x*c1 + y*c2)/z { long n = c1.length(); if (c2.length() != n) Error("MulSubDiv: length mismatch"); c.SetLength(n); long i; for (i = 1; i <= n; i++) MulSubDiv(c(i), c1(i), c2(i), x, y, z); } #endif static void RowTransform(vec_ZZ& c1, vec_ZZ& c2, const ZZ& x, const ZZ& y, const ZZ& u, const ZZ& v) // (c1, c2) = (x*c1 + y*c2, u*c1 + v*c2) { long n = c1.length(); if (c2.length() != n) Error("MulSubDiv: length mismatch"); static ZZ t1, t2, t3, t4; long i; for (i = 1; i <= n; i++) { mul(t1, x, c1(i)); mul(t2, y, c2(i)); add(t1, t1, t2); mul(t3, u, c1(i)); mul(t4, v, c2(i)); add(t3, t3, t4); c1(i) = t1; c2(i) = t3; } } static void RowTransform(ZZ& c1, ZZ& c2, const ZZ& x, const ZZ& y, const ZZ& u, const ZZ& v) // (c1, c2) = (x*c1 + y*c2, u*c1 + v*c2) { static ZZ t1, t2, t3, t4; mul(t1, x, c1); mul(t2, y, c2); add(t1, t1, t2); mul(t3, u, c1); mul(t4, v, c2); add(t3, t3, t4); c1 = t1; c2 = t3; } static void MulSub(ZZ& c, const ZZ& c1, const ZZ& c2, const ZZ& x) // c = c1 - x*c2 { static ZZ t1; mul(t1, x, c2); sub(c, c1, t1); } static void MulSub(vec_ZZ& c, const vec_ZZ& c1, const vec_ZZ& c2, const ZZ& x) // c = c1 - x*c2 { long n = c1.length(); if (c2.length() != n) Error("MulSub: length mismatch"); c.SetLength(n); long i; for (i = 1; i <= n; i++) MulSub(c(i), c1(i), c2(i), x); } static long SwapTest(const ZZ& d0, const ZZ& d1, const ZZ& d2, const ZZ& lam, long a, long b) // test if a*d1^2 > b*(d0*d2 + lam^2) { static ZZ t1, t2; mul(t1, d0, d2); sqr(t2, lam); add(t1, t1, t2); mul(t1, t1, b); sqr(t2, d1); mul(t2, t2, a); return t2 > t1; } static void reduce(long k, long l, mat_ZZ& B, vec_long& P, vec_ZZ& D, vec_vec_ZZ& lam, mat_ZZ* U) { static ZZ t1; static ZZ r; if (P(l) == 0) return; add(t1, lam(k)(P(l)), lam(k)(P(l))); abs(t1, t1); if (t1 <= D[P(l)]) return; long j; BalDiv(r, lam(k)(P(l)), D[P(l)]); MulSub(B(k), B(k), B(l), r); if (U) MulSub((*U)(k), (*U)(k), (*U)(l), r); for (j = 1; j <= l-1; j++) if (P(j) != 0) MulSub(lam(k)(P(j)), lam(k)(P(j)), lam(l)(P(j)), r); MulSub(lam(k)(P(l)), lam(k)(P(l)), D[P(l)], r); } static long swap(long k, mat_ZZ& B, vec_long& P, vec_ZZ& D, vec_vec_ZZ& lam, mat_ZZ* U, long m, long verbose) // swaps vectors k-1 and k; assumes P(k-1) != 0 // returns 1 if vector k-1 need to be reduced after the swap... // this only occurs in 'case 2' when there are linear dependencies { long i, j; static ZZ t1, t2, t3, e, x, y; if (P(k) != 0) { if (verbose) cerr << "swap case 1: " << k << "\n"; swap(B(k-1), B(k)); if (U) swap((*U)(k-1), (*U)(k)); for (j = 1; j <= k-2; j++) if (P(j) != 0) swap(lam(k-1)(P(j)), lam(k)(P(j))); for (i = k+1; i <= m; i++) { MulAddDiv(t1, lam(i)(P(k)-1), lam(i)(P(k)), lam(k)(P(k)-1), D[P(k)-2], D[P(k)-1]); MulSubDiv(t2, lam(i)(P(k)-1), lam(i)(P(k)), D[P(k)], lam(k)(P(k)-1), D[P(k)-1]); lam(i)(P(k)-1) = t1; lam(i)(P(k)) = t2; } MulAddDiv(D[P(k)-1], D[P(k)], lam(k)(P(k)-1), D[P(k)-2], lam(k)(P(k)-1), D[P(k)-1]); return 0; } else if (!IsZero(lam(k)(P(k-1)))) { if (verbose) cerr << "swap case 2: " << k << "\n"; XGCD(e, x, y, lam(k)(P(k-1)), D[P(k-1)]); ExactDiv(t1, lam(k)(P(k-1)), e); ExactDiv(t2, D[P(k-1)], e); t3 = t2; negate(t2, t2); RowTransform(B(k-1), B(k), t1, t2, y, x); if (U) RowTransform((*U)(k-1), (*U)(k), t1, t2, y, x); for (j = 1; j <= k-2; j++) if (P(j) != 0) RowTransform(lam(k-1)(P(j)), lam(k)(P(j)), t1, t2, y, x); sqr(t2, t2); ExactDiv(D[P(k-1)], D[P(k-1)], t2); for (i = k+1; i <= m; i++) if (P(i) != 0) { ExactDiv(D[P(i)], D[P(i)], t2); for (j = i+1; j <= m; j++) { ExactDiv(lam(j)(P(i)), lam(j)(P(i)), t2); } } for (i = k+1; i <= m; i++) { ExactDiv(lam(i)(P(k-1)), lam(i)(P(k-1)), t3); } swap(P(k-1), P(k)); return 1; } else { if (verbose) cerr << "swap case 3: " << k << "\n"; swap(B(k-1), B(k)); if (U) swap((*U)(k-1), (*U)(k)); for (j = 1; j <= k-2; j++) if (P(j) != 0) swap(lam(k-1)(P(j)), lam(k)(P(j))); swap(P(k-1), P(k)); return 0; } } static void IncrementalGS(mat_ZZ& B, vec_long& P, vec_ZZ& D, vec_vec_ZZ& lam, long& s, long k) { long n = B.NumCols(); long m = B.NumRows(); static ZZ u, t1, t2; long i, j; for (j = 1; j <= k-1; j++) { long posj = P(j); if (posj == 0) continue; InnerProduct(u, B(k), B(j)); for (i = 1; i <= posj-1; i++) { mul(t1, D[i], u); mul(t2, lam(k)(i), lam(j)(i)); sub(t1, t1, t2); div(t1, t1, D[i-1]); u = t1; } lam(k)(posj) = u; } InnerProduct(u, B(k), B(k)); for (i = 1; i <= s; i++) { mul(t1, D[i], u); mul(t2, lam(k)(i), lam(k)(i)); sub(t1, t1, t2); div(t1, t1, D[i-1]); u = t1; } if (u == 0) { P(k) = 0; } else { s++; P(k) = s; D[s] = u; } } static long LLL(vec_ZZ& D, mat_ZZ& B, mat_ZZ* U, long a, long b, long verbose) { long m = B.NumRows(); long n = B.NumCols(); long force_reduce = 1; vec_long P; P.SetLength(m); D.SetLength(m+1); D[0] = 1; vec_vec_ZZ lam; lam.SetLength(m); long j; for (j = 1; j <= m; j++) lam(j).SetLength(m); if (U) ident(*U, m); long s = 0; long k = 1; long max_k = 0; while (k <= m) { if (k > max_k) { IncrementalGS(B, P, D, lam, s, k); max_k = k; } if (k == 1) { force_reduce = 1; k++; continue; } if (force_reduce) for (j = k-1; j >= 1; j--) reduce(k, j, B, P, D, lam, U); if (P(k-1) != 0 && (P(k) == 0 || SwapTest(D[P(k)], D[P(k)-1], D[P(k)-2], lam(k)(P(k)-1), a, b))) { force_reduce = swap(k, B, P, D, lam, U, max_k, verbose); k--; } else { force_reduce = 1; k++; } } D.SetLength(s+1); return s; } static long image(ZZ& det, mat_ZZ& B, mat_ZZ* U, long verbose) { long m = B.NumRows(); long n = B.NumCols(); long force_reduce = 1; vec_long P; P.SetLength(m); vec_ZZ D; D.SetLength(m+1); D[0] = 1; vec_vec_ZZ lam; lam.SetLength(m); long j; for (j = 1; j <= m; j++) lam(j).SetLength(m); if (U) ident(*U, m); long s = 0; long k = 1; long max_k = 0; while (k <= m) { if (k > max_k) { IncrementalGS(B, P, D, lam, s, k); max_k = k; } if (k == 1) { force_reduce = 1; k++; continue; } if (force_reduce) for (j = k-1; j >= 1; j--) reduce(k, j, B, P, D, lam, U); if (P(k-1) != 0 && P(k) == 0) { force_reduce = swap(k, B, P, D, lam, U, max_k, verbose); k--; } else { force_reduce = 1; k++; } } det = D[s]; return s; } long LLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose) { vec_ZZ D; long s; s = LLL(D, B, &U, 3, 4, verbose); det = D[s]; return s; } long LLL(ZZ& det, mat_ZZ& B, long verbose) { vec_ZZ D; long s; s = LLL(D, B, 0, 3, 4, verbose); det = D[s]; return s; } long LLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose) { if (a <= 0 || b <= 0 || a > b || b/4 >= a) Error("LLL: bad args"); vec_ZZ D; long s; s = LLL(D, B, &U, a, b, verbose); det = D[s]; return s; } long LLL(ZZ& det, mat_ZZ& B, long a, long b, long verbose) { if (a <= 0 || b <= 0 || a > b || b/4 >= a) Error("LLL: bad args"); vec_ZZ D; long s; s = LLL(D, B, 0, a, b, verbose); det = D[s]; return s; } long LLL_plus(vec_ZZ& D_out, mat_ZZ& B, mat_ZZ& U, long verbose) { vec_ZZ D; long s; s = LLL(D, B, &U, 3, 4, verbose); D_out = D; return s; } long LLL_plus(vec_ZZ& D_out, mat_ZZ& B, long verbose) { vec_ZZ D; long s; s = LLL(D, B, 0, 3, 4, verbose); D_out = D; return s; } long LLL_plus(vec_ZZ& D_out, mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose) { if (a <= 0 || b <= 0 || a > b || b/4 >= a) Error("LLL_plus: bad args"); vec_ZZ D; long s; s = LLL(D, B, &U, a, b, verbose); D_out = D; return s; } long LLL_plus(vec_ZZ& D_out, mat_ZZ& B, long a, long b, long verbose) { if (a <= 0 || b <= 0 || a > b || b/4 >= a) Error("LLL_plus: bad args"); vec_ZZ D; long s; s = LLL(D, B, 0, a, b, verbose); D_out = D; return s; } long image(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose) { return image(det, B, &U, verbose); } long image(ZZ& det, mat_ZZ& B, long verbose) { return image(det, B, 0, verbose); } long LatticeSolve(vec_ZZ& x, const mat_ZZ& A, const vec_ZZ& y, long reduce) { long n = A.NumRows(); long m = A.NumCols(); if (y.length() != m) Error("LatticeSolve: dimension mismatch"); if (reduce < 0 || reduce > 2) Error("LatticeSolve: bad reduce parameter"); if (IsZero(y)) { x.SetLength(n); clear(x); return 1; } mat_ZZ A1, U1; ZZ det2; long im_rank, ker_rank; A1 = A; im_rank = image(det2, A1, U1); ker_rank = n - im_rank; mat_ZZ A2, U2; long new_rank; long i; A2.SetDims(im_rank + 1, m); for (i = 1; i <= im_rank; i++) A2(i) = A1(ker_rank + i); A2(im_rank + 1) = y; new_rank = image(det2, A2, U2); if (new_rank != im_rank || (U2(1)(im_rank+1) != 1 && U2(1)(im_rank+1) != -1)) return 0; vec_ZZ x1; x1.SetLength(im_rank); for (i = 1; i <= im_rank; i++) x1(i) = U2(1)(i); if (U2(1)(im_rank+1) == 1) negate(x1, x1); vec_ZZ x2, tmp; x2.SetLength(n); clear(x2); tmp.SetLength(n); for (i = 1; i <= im_rank; i++) { mul(tmp, U1(ker_rank+i), x1(i)); add(x2, x2, tmp); } if (reduce == 0) { x = x2; return 1; } else if (reduce == 1) { U1.SetDims(ker_rank+1, n); U1(ker_rank+1) = x2; image(det2, U1); x = U1(ker_rank + 1); return 1; } else if (reduce == 2) { U1.SetDims(ker_rank, n); LLL(det2, U1); U1.SetDims(ker_rank+1, n); U1(ker_rank+1) = x2; image(det2, U1); x = U1(ker_rank + 1); return 1; } return 0; } //end old stuff ////////////////////////////////////////////////////////////////////////// static void GS(mat_ZZ& U, vec_ZZ& D, vec_vec_ZZ& lam, long low, long up) // Computes lam(up,low) or D[up]. // It uses no P. Our P remembers pivots. { static ZZ u, t1, t2; long i; InnerProduct(u, U(low), U(up)); for (i = 1 ; i < low ; i++){ mul(t1, D(i+1), u); mul(t2, lam(up)(i), lam(low)(i)); sub(t1, t1, t2); div(u, t1, D(i)); }; if(low max_k) { IncrementalGS(B, P, D, lam, s, k); max_k = k; } if (k == 1) { force_reduce = 1; k++; continue; } if (force_reduce) for (j = k-1; j >= 1; j--) reduce(k, j, B, P, D, lam, U); if (P(k-1) != 0 && (P(k) == 0 || SwapTest(D[P(k)], D[P(k)-1], D[P(k)-2], lam(k)(P(k)-1), a, b))) { force_reduce = swap(k, B, P, D, lam, U, max_k, verbose); k--; } else { force_reduce = 1; k++; } } return s; } static void reduce(long k, long l, vec_ZZ& D, vec_vec_ZZ& lam, mat_ZZ* U) // This version is adapted to HNF_LLL ... { static ZZ t1, t2; t2 = D(l+1); add(t1, lam(k)(l), lam(k)(l)); abs(t1, t1); if (t1 <= t2) return; static ZZ u; long i; BalDiv(u, lam(k)(l), t2); MulSub((*U)(k), (*U)(k), (*U)(l), u); MulSub(lam(k)(l), lam(k)(l), t2, u); for (i = 1; i < l ; i++){ MulSub(lam(k)(i), lam(k)(i), lam(l)(i), u); }; } // The extendedimage part, which may be switched off. #if 0 static void IncrementalGS(mat_ZZ& U, mat_ZZ& gram, vec_ZZ& D, vec_vec_ZZ& lam, long st, long k) // This version is needed in extendedimage. { static ZZ u, t1, t2; long i, j; for (j = st; j <= k; j++){ InnerProduct(u, U(j), gram(k)); for (i = st; i < j ; i++){ mul(t1, D(i+1), u); mul(t2, lam(k)(i), lam(j)(i)); sub(t1, t1, t2); div(u, t1, D(i)); }; if(j max_k){ max_k=k; if (verbose) cerr << "max_k=" << max_k << "\n"; if (verbose) cerr << "D =" << D << "\n"; IncrementalGS(*U, gram, D, lam, dim_ker + 1, k); if (IsZero(D(k+1))) { if (verbose) cerr << "New relation expected; k=" << k << "\n"; for (i = 1; i <= max_k; i++) { R(i) = 1;}; // Push generator down into subspaces, // as in "swap case 2". // We do not distinguish a "swap case 3", // and thus do not use transformations // of determinant -1. for (i = 1; i <= rank; i++) { k--; XGCD(e, x, y, D(k+1), lam(k+1)(k)); div(t1, lam(k+1)(k), e); div(t2, D(k+1) , e); R(k+1) = t2; negate(t2, t2); RowTransform((*U)(k), (*U)(k+1), t1, t2, x, y); for (j = 1; j < k; j++) RowTransform(lam(k)(j), lam(k+1)(j), t1, t2, x, y); }; dim_ker++; if (verbose) cerr << "dim_ker =" << dim_ker << "\n"; // Cumulative volume ratios. for (i = 2; i <= max_k; i++) mul(R(i),R(i),R(i-1)); // Only now do we update D and lam entirely. // This is presumably an unimportant // difference with "image". for (k = max_k; dim_ker < k; k--) { div(D(k+1), D(k), R(k)); div(D(k+1), D(k+1), R(k)); mul(u,R(k),R(k-1)); for (j = k+1; j <= max_k; j++) { div(lam(j)(k), lam(j)(k-1), u); } }; D(dim_ker + 1) = 1; IncrementalGS(*U, D_ker, lam, dim_ker); k = dim_ker; for (l = k-1; l > 0 ; l--) reduce(k, l, D_ker, lam, U); IncrementalGS(*U, D_ker, lam, dim_ker, max_k); if (verbose) cerr << " k=" << k << "\n"; } else { if (verbose) cerr << "rank goes up; k=" << k << "\n"; rank++; }; }; if (k == dim_ker) {k++; continue;}; for (l = k-1; l > dim_ker ; l--) reduce(k, l, D, lam, U); for (l = dim_ker; l > 0 ; l--) reduce(k, l, D_ker, lam, U); k++; }; mul(B,*U,B); det = D(m + 1); return rank; } long extendedimage(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose) { return extendedimage(det, B, &U, verbose); } #endif static long XXLLL(ZZ& det, mat_ZZ& B, mat_ZZ* U, long a, long b, long verbose) // XXLLL first does "HNF_image", then LLL reduces kernel and image, // then reduces U further. // Note that we can give good estimates of U all through the computation, // albeit not as good as for extendedimage. (The output estimates for // the HNF_image stage are the starting point for the estimates in the next // stages.) { ZZ u; long m = B.NumRows(); long j, k, l; vec_ZZ D; D.SetLength(m+1); D(1) = 1; vec_vec_ZZ lam; lam.SetLength(m); for (j = 1; j <= m; j++) lam(j).SetLength(m); long rank = HNF_image(B, *U, verbose); long dim_ker = m - rank; mat_ZZ U0; // LLL reduce the kernel. (*U).SetDims(dim_ker, m); LLL(lam, D, *U, 0, a, b, verbose); (*U).SetDims(m, m);//restore rows. // LLL reduce the image. LLL(det, B, U0, verbose); mul(*U, U0, *U); for (k = dim_ker + 1; k <= m; k++) { for (l = 1; l <= dim_ker ; l++) GS(*U, D, lam, l, k); for (l = dim_ker ; l > 0 ; l--) reduce(k, l, D, lam, U); }; return rank; } long XXLLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose) { return XXLLL(det, B, &U, a, b, verbose); } long XXLLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose) { return XXLLL(det, B, &U, 3, 4, verbose); } long HNF_old(mat_ZZ& W, const mat_ZZ& A_in) // Just for comparison. To be removed. // Works only when rank is number of columns. W is lower triangular and square. { static ZZ det; mat_ZZ B=A_in; long rank = image(det,B,0); SqrRoot(det,det); HNF(W,B,det); return rank; } static void swap(long k, mat_ZZ& B, vec_long& P, long dim_ker, long verbose) // This version is needed in HNF_image. { long i, j, l; static ZZ t1, t2, e, x, y; if (P(k) < P(k-1)) { if (verbose) cerr << "swap\n"; swap( B(k), B(k-1)); swap( P(k), P(k-1)); return; }; j = P(k); if (verbose) cerr << "Euclid \n"; if (verbose) cerr << "k= " << k << "\n"; XGCD(e, x, y, B(k-1,j), B(k,j)); div(t1, B(k,j), e); div(t2, B(k-1,j) , e); negate(t2, t2); if (verbose) cerr << "dim_ker =" << dim_ker << "\n"; // But note that dim_ker stays zero in the swap stages when HNF_image // has been called with U. RowTransform(B(k-1), B(k), t1, t2, x, y); // Recall j = P(k); for (i = 0; (1); i++) { if ( (j) && (B(k,j) < 0) ) { if (verbose) cerr << "minus. \n i= " << i << "\n"; negate(B(k),B(k)); }; if (i) break; k--; while ( (j) && IsZero( B(k,j) )) j--; P(k) = j; }; } static long HNF_image(mat_ZZ& B, mat_ZZ* U, long verbose) // HNF_image is implemented as a mix of "image" and the Havas-Majewski-Matthews // HNF algorithm, Experimental Mathematics 7:2 (1998), 125-137. // It is actually equivalent to the Chou Collins algorithm, // SIAM Journal on Computing 11, 4 (1982), 687-708. // It is superior to HNF when no small multiple of the determinant of // the lattice is known. (In particular when that determinant is not small.) // B is an m x n matrix, viewed as m rows of n-vectors. m may be less // than, equal to, or greater than n, and the rows need not be // linearly independent. B is transformed into our kind of // Hermite Normal Form, and the return value is the rank r of B. // The first m-r rows of new-B are zero. // The rows of new_B span the same lattice as those of old_B. // If new_B is square of full rank, then it is lower triangular, contrary // to the convention of Havas-Majewski-Matthews. // Let a pivot be an entry that is the last nonzero entry in its row and // the first in its column. Then new-B has r positive pivots. // A pivot that is in a lower row is also further to the right. If a column // contains a pivot, then the other entries in the column are nonnegative // and strictly smaller than the pivot. All this determines new-B uniquely // and defines what kind of Hermite Normal Form we use. // // U is an m x m invertible matrix with U * old-B = new-B. // Its determinant may be +1 or -1. The first m-r rows of U make up a matrix // that is in Hermite Normal Form. // There is also a version with U deleted. // As in "extendedimage" there is a good estimate for the entries of U // all through the computation. But HNF_image is usually much faster. // // Wilberd { static ZZ u; long m = B.NumRows(); long n = B.NumCols(); long i, j, l; vec_long P; P.SetLength(m); mat_ZZ B0; if (U) { B0.SetDims(m, m + n);// matrix of zeroes. for (i = 1; i <= m; i++) { B0(i, i) = 1; for (j = 1; j <= n; j++) B0(i, j + m) = B(i, j); }; n = n + m; } else { B0 = B; }; long rank = 0; long dim_ker = 0; long k = 1; long max_k = 0; // P will store the pivots. for (i = 1; i <= m; i++) { j = n; while ((j) && IsZero(B0(i,j))) j--; P(i) = j; }; while (k <= m) { max_k=k; if (verbose) cerr << "max_k=" << max_k << "\n"; if (verbose) cerr << "dim_ker =" << dim_ker << "\n"; while ( (k > dim_ker + 1) && (P(k) <= P(k-1)) ) { swap(k, B0, P, dim_ker, verbose); k--; }; if (verbose) cerr << "k=" << k << "\n"; if( P(k) == 0 ){ if (verbose) cerr << "It is a relation." << "\n"; dim_ker++; } else { rank++; }; for ((1); k <= max_k; k++) { for (l = k-1; l > dim_ker ; l--) { j = P(l); div(u, B0(k,j), B0(l,j)); MulSub(B0(k), B0(k), B0(l), u); }; }; // If it is expected that one row is way out of balance, // then one may wish to repeat this MulSub reduction now for row // max_k + 1, if max_k + 1 <= m. k = max_k + 1; }; if (U) { ident(*U, m); n = n - m; for (i = 1; i <= m; i++) { for (j = 1; j <= m; j++) (*U)(i, j) = B0(i, j); for (j = 1; j <= n; j++) B(i, j) = B0(i, j + m); }; rank = 0; for (i = 1; i <= m; i++) {if (P(i) > m) rank++;}; } else { B = B0; }; return rank; } long HNF_image(mat_ZZ& B, mat_ZZ& U, long verbose) { return HNF_image(B, &U, verbose); } long HNF_image(mat_ZZ& B, long verbose) { return HNF_image(B, 0, verbose); } static long HNF_LLL(mat_ZZ& B, mat_ZZ* U, long a, long b, long verbose) // HNF_LLL first does HNF_image, then LLL reduces kernel to quality a/b, // then reduces U. // Thus one gets as nice an answer as in the Havas-Majewski-Matthews algorithm. // The code is more complicated than in their pseudo-code, but we expect // it often performs better, as HNF_image is so fast. { static ZZ u; long m = B.NumRows(); long j, k, l; vec_ZZ D; D.SetLength(m+1); D(1) = 1; vec_vec_ZZ lam; lam.SetLength(m); for (j = 1; j <= m; j++) lam(j).SetLength(m); long rank = HNF_image(B, *U, verbose); long dim_ker = m - rank; // LLL reduce the kernel. (*U).SetDims(dim_ker, m); LLL(lam, D, *U, 0, a, b, verbose); (*U).SetDims(m, m);//restore rows. for (k = dim_ker + 1; k <= m; k++) { for (l = 1; l <= dim_ker ; l++) GS(*U, D, lam, l, k); for (l = dim_ker ; l > 0 ; l--) reduce(k, l, D, lam, U); }; return rank; } static void straighten(mat_ZZ& U, long dim_ker) // Size reduces U by subtracting multiples of row i from row j, // whenever i < j and i is at most dim_ker. Make this external? { static ZZ u; long m = U.NumRows(); long j, k, l; vec_ZZ D; D.SetLength(m+1); D(1) = 1; vec_vec_ZZ lam; lam.SetLength(m); for (j = 1; j <= m; j++) lam(j).SetLength(m); for (k = 1; k <= dim_ker ; k++) { IncrementalGS(U, D, lam, k); for (l = k - 1; l > 0; l--) reduce(k, l, D, lam, &U); }; for (k = dim_ker + 1; k <= m; k++) { for (l = 1; l <= dim_ker ; l++) GS(U, D, lam, l, k); for (l = dim_ker ; l > 0 ; l--) reduce(k, l, D, lam, &U); }; } static long Ximage(ZZ& det, mat_ZZ& B, mat_ZZ* U, long verbose) // "Ximage" first does "image", then improves U further. // Up to signs the end result agrees with "extendedimage". { long rank = image(det, B, *U, verbose); straighten(*U, B.NumRows() - rank); return rank; } long XLLL(ZZ& det, long hnf, mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose) // XLLL first does "HNF_image" or "image", then LLL reduces the image. // Condider the default case, where one chooses "HNF_image". // Then it is faster than // LLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose) // in many cases where the kernel is significant. // The quality of the U also gets better than in LLL. // And one can give better estimates of U all through the computation. // But choosing HNF_image is a bad idea when the rank is very low. { long rank; if (hnf) { rank = HNF_image(B, U, verbose); } else { rank = image(det, B, U, verbose); }; mat_ZZ U0; LLL(det, B, U0, a, b, verbose); mul(U, U0, U); return rank; } static long XLLL(ZZ& det, long hnf, mat_ZZ& B, mat_ZZ& U, long verbose) { return XLLL(det, hnf, B, U, 3, 4, verbose); } long XLLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose) { return XLLL(det, 1, B, U, 3, 4, verbose); } long XLLL(ZZ& det, mat_ZZ& B, long verbose) { long rank = HNF_image(B, verbose); LLL(det, B, verbose); return rank; } long Ximage(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose) { return Ximage(det, B, &U, verbose); } long HNF_LLL(mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose) { return HNF_LLL(B, &U, a, b, verbose); } long HNF_LLL(mat_ZZ& B, mat_ZZ& U, long verbose) { return HNF_LLL(B, &U, 3, 4, verbose); } NTL_END_IMPL