From ee34da69f58214229fc64aac21a54ee964021ad4 Mon Sep 17 00:00:00 2001 From: Christian daheim Date: Tue, 17 Jun 2014 21:44:53 +0200 Subject: [PATCH] Add (real&complex) Schur decomposition --- configure | 2 +- configure.in | 2 +- generic/clapack_cutdown.c | 16260 +++++++++++++++++++-------------- generic/clapack_cutdown.h | 2 + generic/nacomplex.c | 2 - generic/schur.c | 182 + generic/schur.h | 12 + generic/vectcl.c | 2 + tools/cherrypick_clapack.tcl | 7 +- 9 files changed, 9629 insertions(+), 6842 deletions(-) create mode 100644 generic/schur.c create mode 100644 generic/schur.h diff --git a/configure b/configure index 959b9f4..8f429c8 100755 --- a/configure +++ b/configure @@ -5389,7 +5389,7 @@ fi vars="vectclapi.c vectcl.c linalg.c arrayshape.c nacomplex.c hsfft.c fft.c - svd.c eig.c + svd.c eig.c schur.c bcexecute.c vmparser.c clapack_cutdown.c tcl_xerbla.c" for i in $vars; do diff --git a/configure.in b/configure.in index 02f6ed0..db65c53 100755 --- a/configure.in +++ b/configure.in @@ -75,7 +75,7 @@ AC_TYPE_INTPTR_T #----------------------------------------------------------------------- TEA_ADD_SOURCES([vectclapi.c vectcl.c linalg.c arrayshape.c nacomplex.c hsfft.c fft.c - svd.c eig.c + svd.c eig.c schur.c bcexecute.c vmparser.c clapack_cutdown.c tcl_xerbla.c]) TEA_ADD_HEADERS([generic/vectcl.h generic/nacomplex.h generic/hsfft.h]) diff --git a/generic/clapack_cutdown.c b/generic/clapack_cutdown.c index ab65155..bb2f42b 100644 --- a/generic/clapack_cutdown.c +++ b/generic/clapack_cutdown.c @@ -1,7 +1,7 @@ /* Generated code. Do not edit. See cherrypick_lapack.tcl */ /* This file contains a subset of LAPACK for use with Tcl/VecTcl */ -/* available subroutines: dgesdd_, zgesdd_, dgemm_, zgemm_, dsyevr_, zheevr_, dgeev_, zgeev_, dgelss_, zgelss_, dgelsy_, zgelsy_, dgesv_, zgesv_, dgesvx_, zgesvx_ */ -#include "clapack_cutdown.h" +/* available subroutines: dgesdd_ zgesdd_ dgemm_ zgemm_ dsyevr_ zheevr_ dgeev_ zgeev_ dgelss_ zgelss_ dgelsy_ zgelsy_ dgesv_ zgesv_ dgesvx_ zgesvx_ dgees_ zgees_ */ +#include "/Users/chris/Programmieren/VecTcl/generic/clapack_cutdown.h" #include "f2c_mathlib.h" /* Declaring the Tcl replacement for xerbla */ MODULE_SCOPE int vectcl_xerbla(Tcl_Interp *interp, char* func, integer *info); @@ -109,6 +109,8 @@ static /* Subroutine */ int zgecon_ (Tcl_Interp *interp, char *norm, integer *n, static /* Subroutine */ int zgeequ_ (Tcl_Interp *interp, integer *m, integer *n, doublecomplex *a, integer *lda, doublereal *r__, doublereal *c__, doublereal *rowcnd, doublereal *colcnd, doublereal *amax, integer *info); static /* Subroutine */ int zgerfs_ (Tcl_Interp *interp, char *trans, integer *n, integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf, integer *ipiv, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal *rwork, integer *info); static doublereal zlantr_ (char *norm, char *uplo, char *diag, integer *m, integer *n, doublecomplex *a, integer *lda, doublereal *work); +static /* Subroutine */ int dtrsen_ (Tcl_Interp *interp, char *job, char *compq, logical *select, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, doublereal *wr, doublereal *wi, integer *m, doublereal *s, doublereal *sep, doublereal *work, integer *lwork, integer *iwork, integer * liwork, integer *info); +static /* Subroutine */ int ztrsen_ (Tcl_Interp *interp, char *job, char *compq, logical *select, integer *n, doublecomplex *t, integer *ldt, doublecomplex *q, integer *ldq, doublecomplex *w, integer *m, doublereal *s, doublereal *sep, doublecomplex *work, integer *lwork, integer *info); static /* Subroutine */ int dlasr_ (Tcl_Interp *interp, char *side, char *pivot, char *direct, integer *m, integer *n, doublereal *c__, doublereal *s, doublereal *a, integer * lda); static /* Subroutine */ int dlasd0_ (Tcl_Interp *interp, integer *n, integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer * ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer * info); static /* Subroutine */ int dlasda_ (Tcl_Interp *interp, integer *icompq, integer *smlsiz, integer *n, integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal *poles, integer *givptr, integer *givcol, integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info); @@ -203,6 +205,10 @@ static /* Subroutine */ int zlaswp_ (Tcl_Interp *interp, integer *n, doublecompl static /* Subroutine */ int dlacn2_ (Tcl_Interp *interp, integer *n, doublereal *v, doublereal *x, integer *isgn, doublereal *est, integer *kase, integer *isave); static /* Subroutine */ int dlatrs_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, char * normin, integer *n, doublereal *a, integer *lda, doublereal *x, doublereal *scale, doublereal *cnorm, integer *info); static /* Subroutine */ int zlacn2_ (Tcl_Interp *interp, integer *n, doublecomplex *v, doublecomplex *x, doublereal *est, integer *kase, integer *isave); +static /* Subroutine */ int dtrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublereal *t, integer * ldt, doublereal *q, integer *ldq, integer *ifst, integer *ilst, doublereal *work, integer *info); +static /* Subroutine */ int dtrsyl_ (Tcl_Interp *interp, char *trana, char *tranb, integer *isgn, integer *m, integer *n, doublereal *a, integer *lda, doublereal *b, integer * ldb, doublereal *c__, integer *ldc, doublereal *scale, integer *info); +static /* Subroutine */ int ztrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublecomplex *t, integer *ldt, doublecomplex *q, integer *ldq, integer *ifst, integer * ilst, integer *info); +static /* Subroutine */ int ztrsyl_ (Tcl_Interp *interp, char *trana, char *tranb, integer *isgn, integer *m, integer *n, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb, doublecomplex *c__, integer *ldc, doublereal *scale, integer *info); static /* Subroutine */ int dlasd1_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer * iwork, doublereal *work, integer *info); static /* Subroutine */ int dlasdt_ (Tcl_Interp *interp, integer *n, integer *lvl, integer *nd, integer * inode, integer *ndiml, integer *ndimr, integer *msub); static /* Subroutine */ int dlasd6_ (Tcl_Interp *interp, integer *icompq, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl, doublereal *alpha, doublereal *beta, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *poles, doublereal *difl, doublereal * difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info); @@ -254,6 +260,10 @@ static /* Subroutine */ int zgeru_ (Tcl_Interp *interp, integer *m, integer *n, static /* Subroutine */ int dtrsv_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx); static integer izmax1_ (integer *n, doublecomplex *cx, integer *incx); static doublereal dzsum1_ (integer *n, doublecomplex *cx, integer *incx); +static /* Subroutine */ int dlaexc_ (Tcl_Interp *interp, logical *wantq, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, integer *j1, integer *n1, integer *n2, doublereal *work, integer *info); +static /* Subroutine */ int dlasy2_ (Tcl_Interp *interp, logical *ltranl, logical *ltranr, integer *isgn, integer *n1, integer *n2, doublereal *tl, integer *ldtl, doublereal * tr, integer *ldtr, doublereal *b, integer *ldb, doublereal *scale, doublereal *x, integer *ldx, doublereal *xnorm, integer *info); +static /* Subroutine */ int zrot_ (Tcl_Interp *interp, integer *n, doublecomplex *cx, integer *incx, doublecomplex *cy, integer *incy, doublereal *c__, doublecomplex *s); +static /* Subroutine */ int zlartg_ (Tcl_Interp *interp, doublecomplex *f, doublecomplex *g, doublereal * cs, doublecomplex *sn, doublecomplex *r__); static /* Subroutine */ int dlasd2_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal * beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2, integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer * idxq, integer *coltyp, integer *info); static /* Subroutine */ int dlasd3_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma, doublereal *u, integer *ldu, doublereal *u2, integer *ldu2, doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2, integer *idxc, integer *ctot, doublereal *z__, integer *info); static /* Subroutine */ int dlamrg_ (Tcl_Interp *interp, integer *n1, integer *n2, doublereal *a, integer *dtrd1, integer *dtrd2, integer *index); @@ -263,25 +273,19 @@ static /* Subroutine */ int zgerc_ (Tcl_Interp *interp, integer *m, integer *n, static doublereal dlapy3_ (doublereal *x, doublereal *y, doublereal *z__); static /* Subroutine */ int dlasq3_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig, doublereal *qmax, integer *nfail, integer *iter, integer *ndiv, logical *ieee, integer *ttype, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *g, doublereal *tau); static integer dlaneg_ (integer *n, doublereal *d__, doublereal *lld, doublereal * sigma, doublereal *pivmin, integer *r__); -static /* Subroutine */ int dtrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublereal *t, integer * ldt, doublereal *q, integer *ldq, integer *ifst, integer *ilst, doublereal *work, integer *info); static /* Subroutine */ int dormhr_ (Tcl_Interp *interp, char *side, char *trans, integer *m, integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal * tau, doublereal *c__, integer *ldc, doublereal *work, integer *lwork, integer *info); static /* Subroutine */ int dlaqr2_ (Tcl_Interp *interp, logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer * ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal * v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer * nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork); static /* Subroutine */ int dlaqr1_ (Tcl_Interp *interp, integer *n, doublereal *h__, integer *ldh, doublereal *sr1, doublereal *si1, doublereal *sr2, doublereal *si2, doublereal *v); -static /* Subroutine */ int ztrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublecomplex *t, integer *ldt, doublecomplex *q, integer *ldq, integer *ifst, integer * ilst, integer *info); static /* Subroutine */ int zunmhr_ (Tcl_Interp *interp, char *side, char *trans, integer *m, integer *n, integer *ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *c__, integer *ldc, doublecomplex * work, integer *lwork, integer *info); static /* Subroutine */ int zlaqr2_ (Tcl_Interp *interp, logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublecomplex *h__, integer *ldh, integer *iloz, integer *ihiz, doublecomplex *z__, integer *ldz, integer *ns, integer *nd, doublecomplex *sh, doublecomplex *v, integer *ldv, integer *nh, doublecomplex *t, integer *ldt, integer *nv, doublecomplex *wv, integer *ldwv, doublecomplex *work, integer *lwork); static /* Subroutine */ int zlaqr1_ (Tcl_Interp *interp, integer *n, doublecomplex *h__, integer *ldh, doublecomplex *s1, doublecomplex *s2, doublecomplex *v); +static /* Subroutine */ int dlarfx_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublereal * v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work); static /* Subroutine */ int dlasd4_ (Tcl_Interp *interp, integer *n, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal * sigma, doublereal *work, integer *info); static /* Subroutine */ int dlasq4_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *tau, integer *ttype, doublereal *g); static /* Subroutine */ int dlasq5_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2, logical *ieee); static /* Subroutine */ int dlasq6_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2); -static /* Subroutine */ int dlaexc_ (Tcl_Interp *interp, logical *wantq, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, integer *j1, integer *n1, integer *n2, doublereal *work, integer *info); -static /* Subroutine */ int zrot_ (Tcl_Interp *interp, integer *n, doublecomplex *cx, integer *incx, doublecomplex *cy, integer *incy, doublereal *c__, doublecomplex *s); -static /* Subroutine */ int zlartg_ (Tcl_Interp *interp, doublecomplex *f, doublecomplex *g, doublereal * cs, doublecomplex *sn, doublecomplex *r__); static /* Subroutine */ int dlaed6_ (Tcl_Interp *interp, integer *kniter, logical *orgati, doublereal * rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal * tau, integer *info); static /* Subroutine */ int dlasd5_ (Tcl_Interp *interp, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal * work); -static /* Subroutine */ int dlasy2_ (Tcl_Interp *interp, logical *ltranl, logical *ltranr, integer *isgn, integer *n1, integer *n2, doublereal *tl, integer *ldtl, doublereal * tr, integer *ldtr, doublereal *b, integer *ldb, doublereal *scale, doublereal *x, integer *ldx, doublereal *xnorm, integer *info); -static /* Subroutine */ int dlarfx_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublereal * v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work); /* defined constants */ static integer dgesdd_c__1 = 1; @@ -337,6 +341,12 @@ static integer zgelsy_c__1 = 1; static integer zgelsy_c_n1 = -1; static integer zgelsy_c__0 = 0; static integer zgelsy_c__2 = 2; +static integer dgees_c__1 = 1; +static integer dgees_c__0 = 0; +static integer dgees_c_n1 = -1; +static integer zgees_c__1 = 1; +static integer zgees_c__0 = 0; +static integer zgees_c_n1 = -1; static integer dbdsdc_c__9 = 9; static integer dbdsdc_c__0 = 0; static doublereal dbdsdc_c_b15 = 1.; @@ -554,6 +564,8 @@ static integer zgecon_c__1 = 1; static doublecomplex zgerfs_c_b1 = {1.,0.}; static integer zgerfs_c__1 = 1; static integer zlantr_c__1 = 1; +static integer dtrsen_c_n1 = -1; +static integer ztrsen_c_n1 = -1; static integer dlasd0_c__0 = 0; static integer dlasd0_c__2 = 2; static integer dlasda_c__0 = 0; @@ -680,6 +692,16 @@ static doublereal dlacn2_c_b11 = 1.; static integer dlatrs_c__1 = 1; static doublereal dlatrs_c_b36 = .5; static integer zlacn2_c__1 = 1; +static integer dtrexc_c__1 = 1; +static integer dtrexc_c__2 = 2; +static integer dtrsyl_c__1 = 1; +static logical dtrsyl_c_false = FALSE_; +static integer dtrsyl_c__2 = 2; +static doublereal dtrsyl_c_b26 = 1.; +static doublereal dtrsyl_c_b30 = 0.; +static logical dtrsyl_c_true = TRUE_; +static integer ztrexc_c__1 = 1; +static integer ztrsyl_c__1 = 1; static integer dlasd1_c__0 = 0; static doublereal dlasd1_c_b7 = 1.; static integer dlasd1_c__1 = 1; @@ -755,6 +777,16 @@ static integer dlarz_c__1 = 1; static doublereal dlarz_c_b5 = 1.; static doublecomplex zlarz_c_b1 = {1.,0.}; static integer zlarz_c__1 = 1; +static integer dlaexc_c__1 = 1; +static integer dlaexc_c__4 = 4; +static logical dlaexc_c_false = FALSE_; +static integer dlaexc_c_n1 = -1; +static integer dlaexc_c__2 = 2; +static integer dlaexc_c__3 = 3; +static integer dlasy2_c__4 = 4; +static integer dlasy2_c__1 = 1; +static integer dlasy2_c__16 = 16; +static integer dlasy2_c__0 = 0; static integer dlasd2_c__1 = 1; static doublereal dlasd2_c_b30 = 0.; static integer dlasd3_c__1 = 1; @@ -765,8 +797,6 @@ static integer dlasd7_c__1 = 1; static integer dlasd8_c__1 = 1; static integer dlasd8_c__0 = 0; static doublereal dlasd8_c_b8 = 1.; -static integer dtrexc_c__1 = 1; -static integer dtrexc_c__2 = 2; static integer dormhr_c__1 = 1; static integer dormhr_c_n1 = -1; static integer dormhr_c__2 = 2; @@ -775,7 +805,6 @@ static integer dlaqr2_c_n1 = -1; static doublereal dlaqr2_c_b12 = 0.; static doublereal dlaqr2_c_b13 = 1.; static logical dlaqr2_c_true = TRUE_; -static integer ztrexc_c__1 = 1; static integer zunmhr_c__1 = 1; static integer zunmhr_c_n1 = -1; static integer zunmhr_c__2 = 2; @@ -784,16 +813,6 @@ static doublecomplex zlaqr2_c_b2 = {1.,0.}; static integer zlaqr2_c__1 = 1; static integer zlaqr2_c_n1 = -1; static logical zlaqr2_c_true = TRUE_; -static integer dlaexc_c__1 = 1; -static integer dlaexc_c__4 = 4; -static logical dlaexc_c_false = FALSE_; -static integer dlaexc_c_n1 = -1; -static integer dlaexc_c__2 = 2; -static integer dlaexc_c__3 = 3; -static integer dlasy2_c__4 = 4; -static integer dlasy2_c__1 = 1; -static integer dlasy2_c__16 = 16; -static integer dlasy2_c__0 = 0; static integer dlarfx_c__1 = 1; /* defined functions */ MODULE_SCOPE /* Subroutine */ int dgesdd_ (Tcl_Interp *interp, char *jobz, integer *m, integer *n, doublereal * a, integer *lda, doublereal *s, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *work, integer *lwork, integer *iwork, integer *info) @@ -8496,12 +8515,28 @@ return TCL_OK; } /* zgesvx_ */ -static logical lsame_ (char *ca, char *cb) +MODULE_SCOPE /* Subroutine */ int dgees_ (Tcl_Interp *interp, char *jobvs, char *sort, L_fp select, integer *n, doublereal *a, integer *lda, integer *sdim, doublereal *wr, doublereal *wi, doublereal *vs, integer *ldvs, doublereal *work, integer *lwork, logical *bwork, integer *info) { - logical ret_val; + integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2, i__3; - integer inta, intb, zcode; + double sqrt(doublereal); + integer i__; + doublereal s; + integer i1, i2, ip, ihi, ilo; + doublereal dum[1], eps, sep; + integer ibal; + doublereal anrm; + integer idum[1], ierr, itau, iwrk, inxt, icond, ieval; + logical cursl; + logical lst2sl, scalea; + doublereal cscale; + doublereal bignum; + logical lastsl; + integer minwrk, maxwrk; + doublereal smlnum; + integer hswork; + logical wantst, lquery, wantvs; @@ -8513,77 +8548,642 @@ static logical lsame_ (char *ca, char *cb) - ret_val = *(unsigned char *)ca == *(unsigned char *)cb; - if (ret_val) { - return ret_val; - } - zcode = 'Z'; - inta = *(unsigned char *)ca; - intb = *(unsigned char *)cb; - if (zcode == 90 || zcode == 122) { - if (inta >= 97 && inta <= 122) { - inta += -32; + + + + + + + + + + + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --wr; + --wi; + vs_dim1 = *ldvs; + vs_offset = 1 + vs_dim1; + vs -= vs_offset; + --work; + --bwork; + + *info = 0; + lquery = *lwork == -1; + wantvs = lsame_(jobvs, "V"); + wantst = lsame_(sort, "S"); + if (! wantvs && ! lsame_(jobvs, "N")) { + *info = -1; + } else if (! wantst && ! lsame_(sort, "N")) { + *info = -2; + } else if (*n < 0) { + *info = -4; + } else if (*lda < max(1,*n)) { + *info = -6; + } else if (*ldvs < 1 || wantvs && *ldvs < *n) { + *info = -11; + } + + + if (*info == 0) { + if (*n == 0) { + minwrk = 1; + maxwrk = 1; + } else { + maxwrk = (*n << 1) + *n * ilaenv_(&dgees_c__1, "DGEHRD", " ", n, &dgees_c__1, + n, &dgees_c__0); + minwrk = *n * 3; + + if (dhseqr_(interp, "S", jobvs, n, &dgees_c__1, n, &a[a_offset], lda, &wr[1], &wi[1], &vs[vs_offset], ldvs, &work[1], &dgees_c_n1, &ieval)!=TCL_OK) { return TCL_ERROR; } + + + hswork = (integer) work[1]; + + if (! wantvs) { + i__1 = maxwrk, i__2 = *n + hswork; + maxwrk = max(i__1,i__2); + } else { + i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&dgees_c__1, + "DORGHR", " ", n, &dgees_c__1, n, &dgees_c_n1); + maxwrk = max(i__1,i__2); + i__1 = maxwrk, i__2 = *n + hswork; + maxwrk = max(i__1,i__2); + } } - if (intb >= 97 && intb <= 122) { - intb += -32; + work[1] = (doublereal) maxwrk; + + if (*lwork < minwrk && ! lquery) { + *info = -13; } + } - } else if (zcode == 233 || zcode == 169) { + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "DGEES ", &i__1); +return TCL_ERROR; + +return TCL_OK; + } else if (lquery) { +return TCL_OK; + } + + + if (*n == 0) { + *sdim = 0; +return TCL_OK; + } + + + eps = dlamch_("P"); + smlnum = dlamch_("S"); + bignum = 1. / smlnum; + if (dlabad_(interp, &smlnum, &bignum)!=TCL_OK) { return TCL_ERROR; } + + smlnum = sqrt(smlnum) / eps; + bignum = 1. / smlnum; + + + anrm = dlange_("M", n, n, &a[a_offset], lda, dum); + scalea = FALSE_; + if (anrm > 0. && anrm < smlnum) { + scalea = TRUE_; + cscale = smlnum; + } else if (anrm > bignum) { + scalea = TRUE_; + cscale = bignum; + } + if (scalea) { + if (dlascl_(interp, "G", &dgees_c__0, &dgees_c__0, &anrm, &cscale, n, n, &a[a_offset], lda, & ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + + + ibal = 1; + if (dgebal_(interp, "P", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr)!=TCL_OK) { return TCL_ERROR; } + + + + itau = *n + ibal; + iwrk = *n + itau; + i__1 = *lwork - iwrk + 1; + if (dgehrd_(interp, n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + + if (wantvs) { + + + if (dlacpy_(interp, "L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs) !=TCL_OK) { return TCL_ERROR; } + + + + + i__1 = *lwork - iwrk + 1; + if (dorghr_(interp, n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk], &i__1, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + + *sdim = 0; + + + iwrk = itau; + i__1 = *lwork - iwrk + 1; + if (dhseqr_(interp, "S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vs[ vs_offset], ldvs, &work[iwrk], &i__1, &ieval)!=TCL_OK) { return TCL_ERROR; } + + + if (ieval > 0) { + *info = ieval; + } + + + if (wantst && *info == 0) { + if (scalea) { + if (dlascl_(interp, "G", &dgees_c__0, &dgees_c__0, &cscale, &anrm, n, &dgees_c__1, &wr[1], n, & ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (dlascl_(interp, "G", &dgees_c__0, &dgees_c__0, &cscale, &anrm, n, &dgees_c__1, &wi[1], n, & ierr)!=TCL_OK) { return TCL_ERROR; } - if (inta >= 129 && inta <= 137 || inta >= 145 && inta <= 153 || inta - >= 162 && inta <= 169) { - inta += 64; } - if (intb >= 129 && intb <= 137 || intb >= 145 && intb <= 153 || intb - >= 162 && intb <= 169) { - intb += 64; + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + bwork[i__] = (*select)(&wr[i__], &wi[i__]); } - } else if (zcode == 218 || zcode == 250) { + i__1 = *lwork - iwrk + 1; + if (dtrsen_(interp, "N", jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset], ldvs, &wr[1], &wi[1], sdim, &s, &sep, &work[iwrk], &i__1, + idum, &dgees_c__1, &icond)!=TCL_OK) { return TCL_ERROR; } - if (inta >= 225 && inta <= 250) { - inta += -32; + + if (icond > 0) { + *info = *n + icond; } - if (intb >= 225 && intb <= 250) { - intb += -32; + } + + if (wantvs) { + + + if (dgebak_(interp, "P", "R", n, &ilo, &ihi, &work[ibal], n, &vs[vs_offset], ldvs, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + + if (scalea) { + + + if (dlascl_(interp, "H", &dgees_c__0, &dgees_c__0, &cscale, &anrm, n, n, &a[a_offset], lda, & ierr)!=TCL_OK) { return TCL_ERROR; } + + + i__1 = *lda + 1; + if (dcopy_(interp, n, &a[a_offset], &i__1, &wr[1], &dgees_c__1)!=TCL_OK) { return TCL_ERROR; } + + if (cscale == smlnum) { + + + if (ieval > 0) { + i1 = ieval + 1; + i2 = ihi - 1; + i__1 = ilo - 1; + i__3 = ilo - 1; + i__2 = max(i__3,1); + if (dlascl_(interp, "G", &dgees_c__0, &dgees_c__0, &cscale, &anrm, &i__1, &dgees_c__1, &wi[ 1], &i__2, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + } else if (wantst) { + i1 = 1; + i2 = *n - 1; + } else { + i1 = ilo; + i2 = ihi - 1; + } + inxt = i1 - 1; + i__1 = i2; + for (i__ = i1; i__ <= i__1; ++i__) { + if (i__ < inxt) { + goto L20; + } + if (wi[i__] == 0.) { + inxt = i__ + 1; + } else { + if (a[i__ + 1 + i__ * a_dim1] == 0.) { + wi[i__] = 0.; + wi[i__ + 1] = 0.; + } else if (a[i__ + 1 + i__ * a_dim1] != 0. && a[i__ + ( + i__ + 1) * a_dim1] == 0.) { + wi[i__] = 0.; + wi[i__ + 1] = 0.; + if (i__ > 1) { + i__2 = i__ - 1; + if (dswap_(interp, &i__2, &a[i__ * a_dim1 + 1], &dgees_c__1, &a[( i__ + 1) * a_dim1 + 1], &dgees_c__1)!=TCL_OK) { return TCL_ERROR; } + + + } + if (*n > i__ + 1) { + i__2 = *n - i__ - 1; + if (dswap_(interp, &i__2, &a[i__ + (i__ + 2) * a_dim1], lda, & a[i__ + 1 + (i__ + 2) * a_dim1], lda)!=TCL_OK) { return TCL_ERROR; } + + + } + if (wantvs) { + if (dswap_(interp, n, &vs[i__ * vs_dim1 + 1], &dgees_c__1, &vs[(i__ + 1) * vs_dim1 + 1], &dgees_c__1)!=TCL_OK) { return TCL_ERROR; } + + + } + a[i__ + (i__ + 1) * a_dim1] = a[i__ + 1 + i__ * + a_dim1]; + a[i__ + 1 + i__ * a_dim1] = 0.; + } + inxt = i__ + 2; + } +L20: + ; + } } + + + i__1 = *n - ieval; + i__3 = *n - ieval; + i__2 = max(i__3,1); + if (dlascl_(interp, "G", &dgees_c__0, &dgees_c__0, &cscale, &anrm, &i__1, &dgees_c__1, &wi[ieval + 1], &i__2, &ierr)!=TCL_OK) { return TCL_ERROR; } + + } - ret_val = inta == intb; + if (wantst && *info == 0) { - return ret_val; -} /* lsame_ */ -static /* Subroutine */ int dbdsdc_ (Tcl_Interp *interp, char *uplo, char *compq, integer *n, doublereal * d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer * iwork, integer *info) -{ - integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; - doublereal d__1; + lastsl = TRUE_; + lst2sl = TRUE_; + *sdim = 0; + ip = 0; + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + cursl = (*select)(&wr[i__], &wi[i__]); + if (wi[i__] == 0.) { + if (cursl) { + ++(*sdim); + } + ip = 0; + if (cursl && ! lastsl) { + *info = *n + 2; + } + } else { + if (ip == 1) { - double d_sign(doublereal *, doublereal *), log(doublereal); - integer i__, j, k; - doublereal p, r__; - integer z__, ic, ii, kk; - doublereal cs; - integer is, iu; - doublereal sn; - integer nm1; - doublereal eps; - integer ivt, difl, difr, ierr, perm, mlvl, sqre; - integer poles, iuplo, nsize, start; - integer givcol; - integer icompq; - doublereal orgnrm; - integer givnum, givptr, qstart, smlsiz, wstart, smlszp; + cursl = cursl || lastsl; + lastsl = cursl; + if (cursl) { + *sdim += 2; + } + ip = -1; + if (cursl && ! lst2sl) { + *info = *n + 2; + } + } else { + + + ip = 1; + } + } + lst2sl = lastsl; + lastsl = cursl; + } + } + + work[1] = (doublereal) maxwrk; +return TCL_OK; + + +} /* dgees_ */ +MODULE_SCOPE /* Subroutine */ int zgees_ (Tcl_Interp *interp, char *jobvs, char *sort, L_fp select, integer *n, doublecomplex *a, integer *lda, integer *sdim, doublecomplex *w, doublecomplex *vs, integer *ldvs, doublecomplex *work, integer *lwork, doublereal *rwork, logical *bwork, integer *info) +{ + integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2; + + double sqrt(doublereal); + + integer i__; + doublereal s; + integer ihi, ilo; + doublereal dum[1], eps, sep; + integer ibal; + doublereal anrm; + integer ierr, itau, iwrk, icond, ieval; + logical scalea; + doublereal cscale; + doublereal bignum; + integer minwrk, maxwrk; + doublereal smlnum; + integer hswork; + logical wantst, lquery, wantvs; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --w; + vs_dim1 = *ldvs; + vs_offset = 1 + vs_dim1; + vs -= vs_offset; + --work; + --rwork; + --bwork; + + *info = 0; + lquery = *lwork == -1; + wantvs = lsame_(jobvs, "V"); + wantst = lsame_(sort, "S"); + if (! wantvs && ! lsame_(jobvs, "N")) { + *info = -1; + } else if (! wantst && ! lsame_(sort, "N")) { + *info = -2; + } else if (*n < 0) { + *info = -4; + } else if (*lda < max(1,*n)) { + *info = -6; + } else if (*ldvs < 1 || wantvs && *ldvs < *n) { + *info = -10; + } + + + if (*info == 0) { + if (*n == 0) { + minwrk = 1; + maxwrk = 1; + } else { + maxwrk = *n + *n * ilaenv_(&zgees_c__1, "ZGEHRD", " ", n, &zgees_c__1, n, & + zgees_c__0); + minwrk = *n << 1; + + if (zhseqr_(interp, "S", jobvs, n, &zgees_c__1, n, &a[a_offset], lda, &w[1], &vs[ vs_offset], ldvs, &work[1], &zgees_c_n1, &ieval)!=TCL_OK) { return TCL_ERROR; } + + + hswork = (integer) work[1].r; + + if (! wantvs) { + maxwrk = max(maxwrk,hswork); + } else { + i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&zgees_c__1, "ZUNGHR", + " ", n, &zgees_c__1, n, &zgees_c_n1); + maxwrk = max(i__1,i__2); + maxwrk = max(maxwrk,hswork); + } + } + work[1].r = (doublereal) maxwrk, work[1].i = 0.; + + if (*lwork < minwrk && ! lquery) { + *info = -12; + } + } + + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "ZGEES ", &i__1); +return TCL_ERROR; + +return TCL_OK; + } else if (lquery) { +return TCL_OK; + } + + + if (*n == 0) { + *sdim = 0; +return TCL_OK; + } + + + eps = dlamch_("P"); + smlnum = dlamch_("S"); + bignum = 1. / smlnum; + if (dlabad_(interp, &smlnum, &bignum)!=TCL_OK) { return TCL_ERROR; } + + smlnum = sqrt(smlnum) / eps; + bignum = 1. / smlnum; + + + anrm = zlange_("M", n, n, &a[a_offset], lda, dum); + scalea = FALSE_; + if (anrm > 0. && anrm < smlnum) { + scalea = TRUE_; + cscale = smlnum; + } else if (anrm > bignum) { + scalea = TRUE_; + cscale = bignum; + } + if (scalea) { + if (zlascl_(interp, "G", &zgees_c__0, &zgees_c__0, &anrm, &cscale, n, n, &a[a_offset], lda, & ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + + + ibal = 1; + if (zgebal_(interp, "P", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr)!=TCL_OK) { return TCL_ERROR; } + + + + itau = 1; + iwrk = *n + itau; + i__1 = *lwork - iwrk + 1; + if (zgehrd_(interp, n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + + if (wantvs) { + + + if (zlacpy_(interp, "L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs) !=TCL_OK) { return TCL_ERROR; } + + + + + i__1 = *lwork - iwrk + 1; + if (zunghr_(interp, n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk], &i__1, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + + *sdim = 0; + + + iwrk = itau; + i__1 = *lwork - iwrk + 1; + if (zhseqr_(interp, "S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vs[ vs_offset], ldvs, &work[iwrk], &i__1, &ieval)!=TCL_OK) { return TCL_ERROR; } + + + if (ieval > 0) { + *info = ieval; + } + + + if (wantst && *info == 0) { + if (scalea) { + if (zlascl_(interp, "G", &zgees_c__0, &zgees_c__0, &cscale, &anrm, n, &zgees_c__1, &w[1], n, & ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + bwork[i__] = (*select)(&w[i__]); + } + + + i__1 = *lwork - iwrk + 1; + if (ztrsen_(interp, "N", jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset], ldvs, &w[1], sdim, &s, &sep, &work[iwrk], &i__1, &icond)!=TCL_OK) { return TCL_ERROR; } + + + } + + if (wantvs) { + + + if (zgebak_(interp, "P", "R", n, &ilo, &ihi, &rwork[ibal], n, &vs[vs_offset], ldvs, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + + if (scalea) { + + + if (zlascl_(interp, "U", &zgees_c__0, &zgees_c__0, &cscale, &anrm, n, n, &a[a_offset], lda, & ierr)!=TCL_OK) { return TCL_ERROR; } + + + i__1 = *lda + 1; + if (zcopy_(interp, n, &a[a_offset], &i__1, &w[1], &zgees_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + + work[1].r = (doublereal) maxwrk, work[1].i = 0.; +return TCL_OK; + + +} /* zgees_ */ +static logical lsame_ (char *ca, char *cb) +{ + logical ret_val; + + integer inta, intb, zcode; + + + + + + + + + + + + + ret_val = *(unsigned char *)ca == *(unsigned char *)cb; + if (ret_val) { + return ret_val; + } + + + zcode = 'Z'; + + + inta = *(unsigned char *)ca; + intb = *(unsigned char *)cb; + + if (zcode == 90 || zcode == 122) { + + + if (inta >= 97 && inta <= 122) { + inta += -32; + } + if (intb >= 97 && intb <= 122) { + intb += -32; + } + + } else if (zcode == 233 || zcode == 169) { + + + if (inta >= 129 && inta <= 137 || inta >= 145 && inta <= 153 || inta + >= 162 && inta <= 169) { + inta += 64; + } + if (intb >= 129 && intb <= 137 || intb >= 145 && intb <= 153 || intb + >= 162 && intb <= 169) { + intb += 64; + } + + } else if (zcode == 218 || zcode == 250) { + + + if (inta >= 225 && inta <= 250) { + inta += -32; + } + if (intb >= 225 && intb <= 250) { + intb += -32; + } + } + ret_val = inta == intb; + + + + return ret_val; +} /* lsame_ */ +static /* Subroutine */ int dbdsdc_ (Tcl_Interp *interp, char *uplo, char *compq, integer *n, doublereal * d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer * iwork, integer *info) +{ + integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; + doublereal d__1; + + double d_sign(doublereal *, doublereal *), log(doublereal); + + integer i__, j, k; + doublereal p, r__; + integer z__, ic, ii, kk; + doublereal cs; + integer is, iu; + doublereal sn; + integer nm1; + doublereal eps; + integer ivt, difl, difr, ierr, perm, mlvl, sqre; + integer poles, iuplo, nsize, start; + integer givcol; + integer icompq; + doublereal orgnrm; + integer givnum, givptr, qstart, smlsiz, wstart, smlszp; @@ -28088,6 +28688,530 @@ static doublereal zlantr_ (char *norm, char *uplo, char *diag, integer *m, integ } /* zlantr_ */ +static /* Subroutine */ int dtrsen_ (Tcl_Interp *interp, char *job, char *compq, logical *select, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, doublereal *wr, doublereal *wi, integer *m, doublereal *s, doublereal *sep, doublereal *work, integer *lwork, integer *iwork, integer * liwork, integer *info) +{ + integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2; + doublereal d__1, d__2; + + double sqrt(doublereal); + + integer k, n1, n2, kk, nn, ks; + doublereal est; + integer kase; + logical pair; + integer ierr; + logical swap; + doublereal scale; + integer isave[3], lwmin; + logical wantq, wants; + doublereal rnorm; + logical wantbh; + integer liwmin; + logical wantsp, lquery; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --select; + t_dim1 = *ldt; + t_offset = 1 + t_dim1; + t -= t_offset; + q_dim1 = *ldq; + q_offset = 1 + q_dim1; + q -= q_offset; + --wr; + --wi; + --work; + --iwork; + + wantbh = lsame_(job, "B"); + wants = lsame_(job, "E") || wantbh; + wantsp = lsame_(job, "V") || wantbh; + wantq = lsame_(compq, "V"); + + *info = 0; + lquery = *lwork == -1; + if (! lsame_(job, "N") && ! wants && ! wantsp) { + *info = -1; + } else if (! lsame_(compq, "N") && ! wantq) { + *info = -2; + } else if (*n < 0) { + *info = -4; + } else if (*ldt < max(1,*n)) { + *info = -6; + } else if (*ldq < 1 || wantq && *ldq < *n) { + *info = -8; + } else { + + + *m = 0; + pair = FALSE_; + i__1 = *n; + for (k = 1; k <= i__1; ++k) { + if (pair) { + pair = FALSE_; + } else { + if (k < *n) { + if (t[k + 1 + k * t_dim1] == 0.) { + if (select[k]) { + ++(*m); + } + } else { + pair = TRUE_; + if (select[k] || select[k + 1]) { + *m += 2; + } + } + } else { + if (select[*n]) { + ++(*m); + } + } + } + } + + n1 = *m; + n2 = *n - *m; + nn = n1 * n2; + + if (wantsp) { + i__1 = 1, i__2 = nn << 1; + lwmin = max(i__1,i__2); + liwmin = max(1,nn); + } else if (lsame_(job, "N")) { + lwmin = max(1,*n); + liwmin = 1; + } else if (lsame_(job, "E")) { + lwmin = max(1,nn); + liwmin = 1; + } + + if (*lwork < lwmin && ! lquery) { + *info = -15; + } else if (*liwork < liwmin && ! lquery) { + *info = -17; + } + } + + if (*info == 0) { + work[1] = (doublereal) lwmin; + iwork[1] = liwmin; + } + + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "DTRSEN", &i__1); +return TCL_ERROR; + +return TCL_OK; + } else if (lquery) { +return TCL_OK; + } + + + if (*m == *n || *m == 0) { + if (wants) { + *s = 1.; + } + if (wantsp) { + *sep = dlange_("1", n, n, &t[t_offset], ldt, &work[1]); + } + goto L40; + } + + + ks = 0; + pair = FALSE_; + i__1 = *n; + for (k = 1; k <= i__1; ++k) { + if (pair) { + pair = FALSE_; + } else { + swap = select[k]; + if (k < *n) { + if (t[k + 1 + k * t_dim1] != 0.) { + pair = TRUE_; + swap = swap || select[k + 1]; + } + } + if (swap) { + ++ks; + + + ierr = 0; + kk = k; + if (k != ks) { + if (dtrexc_(interp, compq, n, &t[t_offset], ldt, &q[q_offset], ldq, & kk, &ks, &work[1], &ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + if (ierr == 1 || ierr == 2) { + + + *info = 1; + if (wants) { + *s = 0.; + } + if (wantsp) { + *sep = 0.; + } + goto L40; + } + if (pair) { + ++ks; + } + } + } + } + + if (wants) { + + + + if (dlacpy_(interp, "F", &n1, &n2, &t[(n1 + 1) * t_dim1 + 1], ldt, &work[1], &n1)!=TCL_OK) { return TCL_ERROR; } + + if (dtrsyl_(interp, "N", "N", &dtrsen_c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + + + rnorm = dlange_("F", &n1, &n2, &work[1], &n1, &work[1]); + if (rnorm == 0.) { + *s = 1.; + } else { + *s = scale / (sqrt(scale * scale / rnorm + rnorm) * sqrt(rnorm)); + } + } + + if (wantsp) { + + + est = 0.; + kase = 0; +L30: + if (dlacn2_(interp, &nn, &work[nn + 1], &work[1], &iwork[1], &est, &kase, isave)!=TCL_OK) { return TCL_ERROR; } + + if (kase != 0) { + if (kase == 1) { + + + if (dtrsyl_(interp, "N", "N", &dtrsen_c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, & + ierr)!=TCL_OK) { return TCL_ERROR; } + + + } else { + + + if (dtrsyl_(interp, "T", "T", &dtrsen_c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, & + ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + goto L30; + } + + *sep = scale / est; + } + +L40: + + + i__1 = *n; + for (k = 1; k <= i__1; ++k) { + wr[k] = t[k + k * t_dim1]; + wi[k] = 0.; + } + i__1 = *n - 1; + for (k = 1; k <= i__1; ++k) { + if (t[k + 1 + k * t_dim1] != 0.) { + wi[k] = sqrt((d__1 = t[k + (k + 1) * t_dim1], abs(d__1))) * sqrt(( + d__2 = t[k + 1 + k * t_dim1], abs(d__2))); + wi[k + 1] = -wi[k]; + } + } + + work[1] = (doublereal) lwmin; + iwork[1] = liwmin; + +return TCL_OK; + + +} /* dtrsen_ */ +static /* Subroutine */ int ztrsen_ (Tcl_Interp *interp, char *job, char *compq, logical *select, integer *n, doublecomplex *t, integer *ldt, doublecomplex *q, integer *ldq, doublecomplex *w, integer *m, doublereal *s, doublereal *sep, doublecomplex *work, integer *lwork, integer *info) +{ + integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3; + + double sqrt(doublereal); + + integer k, n1, n2, nn, ks; + doublereal est; + integer kase, ierr; + doublereal scale; + integer isave[3], lwmin; + logical wantq, wants; + doublereal rnorm, rwork[1]; + logical wantbh; + logical wantsp; + logical lquery; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --select; + t_dim1 = *ldt; + t_offset = 1 + t_dim1; + t -= t_offset; + q_dim1 = *ldq; + q_offset = 1 + q_dim1; + q -= q_offset; + --w; + --work; + + wantbh = lsame_(job, "B"); + wants = lsame_(job, "E") || wantbh; + wantsp = lsame_(job, "V") || wantbh; + wantq = lsame_(compq, "V"); + + + *m = 0; + i__1 = *n; + for (k = 1; k <= i__1; ++k) { + if (select[k]) { + ++(*m); + } + } + + n1 = *m; + n2 = *n - *m; + nn = n1 * n2; + + *info = 0; + lquery = *lwork == -1; + + if (wantsp) { + i__1 = 1, i__2 = nn << 1; + lwmin = max(i__1,i__2); + } else if (lsame_(job, "N")) { + lwmin = 1; + } else if (lsame_(job, "E")) { + lwmin = max(1,nn); + } + + if (! lsame_(job, "N") && ! wants && ! wantsp) { + *info = -1; + } else if (! lsame_(compq, "N") && ! wantq) { + *info = -2; + } else if (*n < 0) { + *info = -4; + } else if (*ldt < max(1,*n)) { + *info = -6; + } else if (*ldq < 1 || wantq && *ldq < *n) { + *info = -8; + } else if (*lwork < lwmin && ! lquery) { + *info = -14; + } + + if (*info == 0) { + work[1].r = (doublereal) lwmin, work[1].i = 0.; + } + + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "ZTRSEN", &i__1); +return TCL_ERROR; + +return TCL_OK; + } else if (lquery) { +return TCL_OK; + } + + + if (*m == *n || *m == 0) { + if (wants) { + *s = 1.; + } + if (wantsp) { + *sep = zlange_("1", n, n, &t[t_offset], ldt, rwork); + } + goto L40; + } + + + ks = 0; + i__1 = *n; + for (k = 1; k <= i__1; ++k) { + if (select[k]) { + ++ks; + + + if (k != ks) { + if (ztrexc_(interp, compq, n, &t[t_offset], ldt, &q[q_offset], ldq, &k, & ks, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + } + } + + if (wants) { + + + + if (zlacpy_(interp, "F", &n1, &n2, &t[(n1 + 1) * t_dim1 + 1], ldt, &work[1], &n1)!=TCL_OK) { return TCL_ERROR; } + + if (ztrsyl_(interp, "N", "N", &ztrsen_c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + + + rnorm = zlange_("F", &n1, &n2, &work[1], &n1, rwork); + if (rnorm == 0.) { + *s = 1.; + } else { + *s = scale / (sqrt(scale * scale / rnorm + rnorm) * sqrt(rnorm)); + } + } + + if (wantsp) { + + + est = 0.; + kase = 0; +L30: + if (zlacn2_(interp, &nn, &work[nn + 1], &work[1], &est, &kase, isave)!=TCL_OK) { return TCL_ERROR; } + + if (kase != 0) { + if (kase == 1) { + + + if (ztrsyl_(interp, "N", "N", &ztrsen_c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, & + ierr)!=TCL_OK) { return TCL_ERROR; } + + + } else { + + + if (ztrsyl_(interp, "C", "C", &ztrsen_c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, & + ierr)!=TCL_OK) { return TCL_ERROR; } + + + } + goto L30; + } + + *sep = scale / est; + } + +L40: + + + i__1 = *n; + for (k = 1; k <= i__1; ++k) { + i__2 = k; + i__3 = k + k * t_dim1; + w[i__2].r = t[i__3].r, w[i__2].i = t[i__3].i; + } + + work[1].r = (doublereal) lwmin, work[1].i = 0.; + +return TCL_OK; + + +} /* ztrsen_ */ static /* Subroutine */ int dlasr_ (Tcl_Interp *interp, char *side, char *pivot, char *direct, integer *m, integer *n, doublereal *c__, doublereal *s, doublereal *a, integer * lda) { integer a_dim1, a_offset, i__1, i__2; @@ -47597,32 +48721,13 @@ return TCL_OK; } /* zlacn2_ */ -static /* Subroutine */ int dlasd1_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer * iwork, doublereal *work, integer *info) +static /* Subroutine */ int dtrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublereal *t, integer * ldt, doublereal *q, integer *ldq, integer *ifst, integer *ilst, doublereal *work, integer *info) { - integer u_dim1, u_offset, vt_dim1, vt_offset, i__1; - doublereal d__1, d__2; - - integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2, idxc, - idxp, ldvt2; - integer isigma; - doublereal orgnrm; - integer coltyp; - - - - - - - - - - - - - - - + integer q_dim1, q_offset, t_dim1, t_offset, i__1; + integer nbf, nbl, here; + logical wantq; + integer nbnext; @@ -47644,198 +48749,287 @@ static /* Subroutine */ int dlasd1_ (Tcl_Interp *interp, integer *nl, integer *n - --d__; - u_dim1 = *ldu; - u_offset = 1 + u_dim1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1; - vt -= vt_offset; - --idxq; - --iwork; + t_dim1 = *ldt; + t_offset = 1 + t_dim1; + t -= t_offset; + q_dim1 = *ldq; + q_offset = 1 + q_dim1; + q -= q_offset; --work; *info = 0; - - if (*nl < 1) { + wantq = lsame_(compq, "V"); + if (! wantq && ! lsame_(compq, "N")) { *info = -1; - } else if (*nr < 1) { + } else if (*n < 0) { *info = -2; - } else if (*sqre < 0 || *sqre > 1) { - *info = -3; + } else if (*ldt < max(1,*n)) { + *info = -4; + } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) { + *info = -6; + } else if (*ifst < 1 || *ifst > *n) { + *info = -7; + } else if (*ilst < 1 || *ilst > *n) { + *info = -8; } if (*info != 0) { i__1 = -(*info); - vectcl_xerbla(interp, "DLASD1", &i__1); + vectcl_xerbla(interp, "DTREXC", &i__1); return TCL_ERROR; return TCL_OK; } - n = *nl + *nr + 1; - m = n + *sqre; - - ldu2 = n; - ldvt2 = m; + if (*n <= 1) { +return TCL_OK; + } - iz = 1; - isigma = iz + m; - iu2 = isigma + n; - ivt2 = iu2 + ldu2 * n; - iq = ivt2 + ldvt2 * m; - idx = 1; - idxc = idx + n; - coltyp = idxc + n; - idxp = coltyp + n; + if (*ifst > 1) { + if (t[*ifst + (*ifst - 1) * t_dim1] != 0.) { + --(*ifst); + } + } + nbf = 1; + if (*ifst < *n) { + if (t[*ifst + 1 + *ifst * t_dim1] != 0.) { + nbf = 2; + } + } - d__1 = abs(*alpha), d__2 = abs(*beta); - orgnrm = max(d__1,d__2); - d__[*nl + 1] = 0.; - i__1 = n; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { - orgnrm = (d__1 = d__[i__], abs(d__1)); + if (*ilst > 1) { + if (t[*ilst + (*ilst - 1) * t_dim1] != 0.) { + --(*ilst); + } + } + nbl = 1; + if (*ilst < *n) { + if (t[*ilst + 1 + *ilst * t_dim1] != 0.) { + nbl = 2; } } - if (dlascl_(interp, "G", &dlasd1_c__0, &dlasd1_c__0, &orgnrm, &dlasd1_c_b7, &n, &dlasd1_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } - - *alpha /= orgnrm; - *beta /= orgnrm; + if (*ifst == *ilst) { +return TCL_OK; + } - if (dlasd2_(interp, nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset], ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, & - work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], & - idxq[1], &iwork[coltyp], info)!=TCL_OK) { return TCL_ERROR; } + if (*ifst < *ilst) { + if (nbf == 2 && nbl == 1) { + --(*ilst); + } + if (nbf == 1 && nbl == 2) { + ++(*ilst); + } + here = *ifst; - ldq = k; - if (dlasd3_(interp, nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[ - ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info)!=TCL_OK) { return TCL_ERROR; } +L10: - if (*info != 0) { -return TCL_OK; - } + if (nbf == 1 || nbf == 2) { - if (dlascl_(interp, "G", &dlasd1_c__0, &dlasd1_c__0, &dlasd1_c_b7, &orgnrm, &n, &dlasd1_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } + nbnext = 1; + if (here + nbf + 1 <= *n) { + if (t[here + nbf + 1 + (here + nbf) * t_dim1] != 0.) { + nbnext = 2; + } + } + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &here, & nbf, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + if (*info != 0) { + *ilst = here; +return TCL_OK; + } + here += nbnext; - n1 = k; - n2 = n - k; - if (dlamrg_(interp, &n1, &n2, &d__[1], &dlasd1_c__1, &dlasd1_c_n1, &idxq[1])!=TCL_OK) { return TCL_ERROR; } + if (nbf == 2) { + if (t[here + 1 + here * t_dim1] == 0.) { + nbf = 3; + } + } -return TCL_OK; + } else { -} /* dlasd1_ */ -static /* Subroutine */ int dlasdt_ (Tcl_Interp *interp, integer *n, integer *lvl, integer *nd, integer * inode, integer *ndiml, integer *ndimr, integer *msub) -{ - integer i__1, i__2; + nbnext = 1; + if (here + 3 <= *n) { + if (t[here + 3 + (here + 2) * t_dim1] != 0.) { + nbnext = 2; + } + } + i__1 = here + 1; + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & dtrexc_c__1, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - double log(doublereal); - integer i__, il, ir, maxn; - doublereal temp; - integer nlvl, llst, ncrnt; + if (*info != 0) { + *ilst = here; +return TCL_OK; + } + if (nbnext == 1) { + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + ++here; + } else { + if (t[here + 2 + (here + 1) * t_dim1] == 0.) { + nbnext = 1; + } + if (nbnext == 2) { + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + if (*info != 0) { + *ilst = here; +return TCL_OK; + } + here += 2; + } else { + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + i__1 = here + 1; + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + here += 2; + } + } + } + if (here < *ilst) { + goto L10; + } + } else { + here = *ifst; +L20: - --ndimr; - --ndiml; - --inode; - maxn = max(1,*n); - temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.); - *lvl = (integer) temp + 1; + if (nbf == 1 || nbf == 2) { - i__ = *n / 2; - inode[1] = i__ + 1; - ndiml[1] = i__; - ndimr[1] = *n - i__ - 1; - il = 0; - ir = 1; - llst = 1; - i__1 = *lvl - 1; - for (nlvl = 1; nlvl <= i__1; ++nlvl) { + nbnext = 1; + if (here >= 3) { + if (t[here - 1 + (here - 2) * t_dim1] != 0.) { + nbnext = 2; + } + } + i__1 = here - nbnext; + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &nbf, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - i__2 = llst - 1; - for (i__ = 0; i__ <= i__2; ++i__) { - il += 2; - ir += 2; - ncrnt = llst + i__; - ndiml[il] = ndiml[ncrnt] / 2; - ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1; - inode[il] = inode[ncrnt] - ndimr[il] - 1; - ndiml[ir] = ndimr[ncrnt] / 2; - ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1; - inode[ir] = inode[ncrnt] + ndiml[ir] + 1; - } - llst <<= 1; - } - *nd = (llst << 1) - 1; + if (*info != 0) { + *ilst = here; return TCL_OK; + } + here -= nbnext; -} /* dlasdt_ */ -static /* Subroutine */ int dlasd6_ (Tcl_Interp *interp, integer *icompq, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl, doublereal *alpha, doublereal *beta, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *poles, doublereal *difl, doublereal * difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info) -{ - integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, - poles_dim1, poles_offset, i__1; - doublereal d__1, d__2; + if (nbf == 2) { + if (t[here + 1 + here * t_dim1] == 0.) { + nbf = 3; + } + } - integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw; - integer isigma; - doublereal orgnrm; + } else { + nbnext = 1; + if (here >= 3) { + if (t[here - 1 + (here - 2) * t_dim1] != 0.) { + nbnext = 2; + } + } + i__1 = here - nbnext; + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + if (*info != 0) { + *ilst = here; +return TCL_OK; + } + if (nbnext == 1) { + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &nbnext, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + --here; + } else { + if (t[here + (here - 1) * t_dim1] == 0.) { + nbnext = 1; + } + if (nbnext == 2) { + i__1 = here - 1; + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &dtrexc_c__2, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + if (*info != 0) { + *ilst = here; +return TCL_OK; + } + here += -2; + } else { + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + i__1 = here - 1; + if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + here += -2; + } + } + } + if (here > *ilst) { + goto L20; + } + } + *ilst = here; +return TCL_OK; +} /* dtrexc_ */ +static /* Subroutine */ int dtrsyl_ (Tcl_Interp *interp, char *trana, char *tranb, integer *isgn, integer *m, integer *n, doublereal *a, integer *lda, doublereal *b, integer * ldb, doublereal *c__, integer *ldc, doublereal *scale, integer *info) +{ + integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, + i__3, i__4; + doublereal d__1, d__2; + integer j, k, l; + doublereal x[4] /* was [2][2] */; + integer k1, k2, l1, l2; + doublereal a11, db, da11, vec[4] /* was [2][2] */, dum[1], eps, sgn; + integer ierr; + doublereal smin, suml, sumr; + integer knext, lnext; + doublereal xnorm; + doublereal scaloc; + doublereal bignum; + logical notrna, notrnb; + doublereal smlnum; @@ -47862,218 +49056,1058 @@ static /* Subroutine */ int dlasd6_ (Tcl_Interp *interp, integer *icompq, intege - --d__; - --vf; - --vl; - --idxq; - --perm; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1; - givcol -= givcol_offset; - poles_dim1 = *ldgnum; - poles_offset = 1 + poles_dim1; - poles -= poles_offset; - givnum_dim1 = *ldgnum; - givnum_offset = 1 + givnum_dim1; - givnum -= givnum_offset; - --difl; - --difr; - --z__; - --work; - --iwork; + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + b_dim1 = *ldb; + b_offset = 1 + b_dim1; + b -= b_offset; + c_dim1 = *ldc; + c_offset = 1 + c_dim1; + c__ -= c_offset; - *info = 0; - n = *nl + *nr + 1; - m = n + *sqre; + notrna = lsame_(trana, "N"); + notrnb = lsame_(tranb, "N"); - if (*icompq < 0 || *icompq > 1) { + *info = 0; + if (! notrna && ! lsame_(trana, "T") && ! lsame_( + trana, "C")) { *info = -1; - } else if (*nl < 1) { + } else if (! notrnb && ! lsame_(tranb, "T") && ! + lsame_(tranb, "C")) { *info = -2; - } else if (*nr < 1) { + } else if (*isgn != 1 && *isgn != -1) { *info = -3; - } else if (*sqre < 0 || *sqre > 1) { + } else if (*m < 0) { *info = -4; - } else if (*ldgcol < n) { - *info = -14; - } else if (*ldgnum < n) { - *info = -16; + } else if (*n < 0) { + *info = -5; + } else if (*lda < max(1,*m)) { + *info = -7; + } else if (*ldb < max(1,*n)) { + *info = -9; + } else if (*ldc < max(1,*m)) { + *info = -11; } if (*info != 0) { i__1 = -(*info); - vectcl_xerbla(interp, "DLASD6", &i__1); + vectcl_xerbla(interp, "DTRSYL", &i__1); return TCL_ERROR; return TCL_OK; } - isigma = 1; - iw = isigma + n; - ivfw = iw + m; - ivlw = ivfw + m; + *scale = 1.; + if (*m == 0 || *n == 0) { +return TCL_OK; + } - idx = 1; - idxc = idx + n; - idxp = idxc + n; + eps = dlamch_("P"); + smlnum = dlamch_("S"); + bignum = 1. / smlnum; + if (dlabad_(interp, &smlnum, &bignum)!=TCL_OK) { return TCL_ERROR; } - d__1 = abs(*alpha), d__2 = abs(*beta); - orgnrm = max(d__1,d__2); - d__[*nl + 1] = 0.; - i__1 = n; - for (i__ = 1; i__ <= i__1; ++i__) { - if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { - orgnrm = (d__1 = d__[i__], abs(d__1)); - } - } - if (dlascl_(interp, "G", &dlasd6_c__0, &dlasd6_c__0, &orgnrm, &dlasd6_c_b7, &n, &dlasd6_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } + smlnum = smlnum * (doublereal) (*m * *n) / eps; + bignum = 1. / smlnum; - *alpha /= orgnrm; - *beta /= orgnrm; + d__1 = smlnum, d__2 = eps * dlange_("M", m, m, &a[a_offset], lda, dum), d__1 = max(d__1,d__2), d__2 = eps * dlange_("M", n, n, + &b[b_offset], ldb, dum); + smin = max(d__1,d__2); + sgn = (doublereal) (*isgn); - if (dlasd7_(interp, icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], & work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], & - iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[ - givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s, - info)!=TCL_OK) { return TCL_ERROR; } + if (notrna && notrnb) { - if (dlasd8_(interp, icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1], ldgnum, &work[isigma], &work[iw], info)!=TCL_OK) { return TCL_ERROR; } + lnext = 1; + i__1 = *n; + for (l = 1; l <= i__1; ++l) { + if (l < lnext) { + goto L60; + } + if (l == *n) { + l1 = l; + l2 = l; + } else { + if (b[l + 1 + l * b_dim1] != 0.) { + l1 = l; + l2 = l + 1; + lnext = l + 2; + } else { + l1 = l; + l2 = l; + lnext = l + 1; + } + } - if (*icompq == 1) { - if (dcopy_(interp, k, &d__[1], &dlasd6_c__1, &poles[poles_dim1 + 1], &dlasd6_c__1)!=TCL_OK) { return TCL_ERROR; } + knext = *m; + for (k = *m; k >= 1; --k) { + if (k > knext) { + goto L50; + } + if (k == 1) { + k1 = k; + k2 = k; + } else { + if (a[k + (k - 1) * a_dim1] != 0.) { + k1 = k - 1; + k2 = k; + knext = k - 2; + } else { + k1 = k; + k2 = k; + knext = k - 1; + } + } - if (dcopy_(interp, k, &work[isigma], &dlasd6_c__1, &poles[(poles_dim1 << 1) + 1], &dlasd6_c__1)!=TCL_OK) { return TCL_ERROR; } + if (l1 == l2 && k1 == k2) { + i__2 = *m - k1; + i__3 = k1 + 1; + i__4 = k1 + 1; + suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + scaloc = 1.; + + a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1]; + da11 = abs(a11); + if (da11 <= smin) { + a11 = smin; + da11 = smin; + *info = 1; + } + db = abs(vec[0]); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + x[0] = vec[0] * scaloc / a11; - } + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + + } else if (l1 == l2 && k1 != k2) { + + i__2 = *m - k2; + i__3 = k2 + 1; + i__4 = k2 + 1; + suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + + i__2 = *m - k2; + i__3 = k2 + 1; + i__4 = k2 + 1; + suml = ddot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + + d__1 = -sgn * b[l1 + l1 * b_dim1]; + if (dlaln2_(interp, &dtrsyl_c_false, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &a[k1 + k1 * a_dim1], lda, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } - if (dlascl_(interp, "G", &dlasd6_c__0, &dlasd6_c__0, &dlasd6_c_b7, &orgnrm, &n, &dlasd6_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k2 + l1 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 == k2) { + + i__2 = *m - k1; + i__3 = k1 + 1; + i__4 = k1 + 1; + suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn * + sumr)); + + i__2 = *m - k1; + i__3 = k1 + 1; + i__4 = k1 + 1; + suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l2 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l2 * + b_dim1 + 1], &dtrsyl_c__1); + vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn * + sumr)); + + d__1 = -sgn * a[k1 + k1 * a_dim1]; + if (dlaln2_(interp, &dtrsyl_c_true, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &b[l1 + l1 * b_dim1], ldb, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } - n1 = *k; - n2 = n - *k; - if (dlamrg_(interp, &n1, &n2, &d__[1], &dlasd6_c__1, &dlasd6_c_n1, &idxq[1])!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 != k2) { + + i__2 = *m - k2; + i__3 = k2 + 1; + i__4 = k2 + 1; + suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + + i__2 = *m - k2; + i__3 = k2 + 1; + i__4 = k2 + 1; + suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l2 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l2 * + b_dim1 + 1], &dtrsyl_c__1); + vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr); + + i__2 = *m - k2; + i__3 = k2 + 1; + i__4 = k2 + 1; + suml = ddot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + + i__2 = *m - k2; + i__3 = k2 + 1; + i__4 = k2 + 1; + suml = ddot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, & + c__[min(i__4, *m)+ l2 * c_dim1], &dtrsyl_c__1); + i__2 = l1 - 1; + sumr = ddot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l2 * + b_dim1 + 1], &dtrsyl_c__1); + vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr); + + if (dlasy2_(interp, &dtrsyl_c_false, &dtrsyl_c_false, isgn, &dtrsyl_c__2, &dtrsyl_c__2, &a[k1 + k1 * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, + &dtrsyl_c__2, &scaloc, x, &dtrsyl_c__2, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } -return TCL_OK; + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[2]; + c__[k2 + l1 * c_dim1] = x[1]; + c__[k2 + l2 * c_dim1] = x[3]; + } +L50: + ; + } -} /* dlasd6_ */ -static /* Subroutine */ int dlarf_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublereal *v, integer *incv, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) -{ - integer c_dim1, c_offset; - doublereal d__1; +L60: + ; + } - integer i__; - logical applyleft; - integer lastc, lastv; + } else if (! notrna && notrnb) { + lnext = 1; + i__1 = *n; + for (l = 1; l <= i__1; ++l) { + if (l < lnext) { + goto L120; + } + if (l == *n) { + l1 = l; + l2 = l; + } else { + if (b[l + 1 + l * b_dim1] != 0.) { + l1 = l; + l2 = l + 1; + lnext = l + 2; + } else { + l1 = l; + l2 = l; + lnext = l + 1; + } + } + knext = 1; + i__2 = *m; + for (k = 1; k <= i__2; ++k) { + if (k < knext) { + goto L110; + } + if (k == *m) { + k1 = k; + k2 = k; + } else { + if (a[k + 1 + k * a_dim1] != 0.) { + k1 = k; + k2 = k + 1; + knext = k + 2; + } else { + k1 = k; + k2 = k; + knext = k + 1; + } + } + if (l1 == l2 && k1 == k2) { + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + scaloc = 1.; + + a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1]; + da11 = abs(a11); + if (da11 <= smin) { + a11 = smin; + da11 = smin; + *info = 1; + } + db = abs(vec[0]); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + x[0] = vec[0] * scaloc / a11; + if (scaloc != 1.) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + } else if (l1 == l2 && k1 != k2) { + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k2 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + d__1 = -sgn * b[l1 + l1 * b_dim1]; + if (dlaln2_(interp, &dtrsyl_c_true, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &a[k1 + k1 * a_dim1], lda, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + if (ierr != 0) { + *info = 1; + } + if (scaloc != 1.) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k2 + l1 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 == k2) { + + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn * + sumr)); + + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l2 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l2 * + b_dim1 + 1], &dtrsyl_c__1); + vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn * + sumr)); + + d__1 = -sgn * a[k1 + k1 * a_dim1]; + if (dlaln2_(interp, &dtrsyl_c_true, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &b[l1 + l1 * b_dim1], ldb, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + if (scaloc != 1.) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } - --v; - c_dim1 = *ldc; - c_offset = 1 + c_dim1; - c__ -= c_offset; - --work; + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 != k2) { + + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l2 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l2 * + b_dim1 + 1], &dtrsyl_c__1); + vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr); + + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k2 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l1 * + b_dim1 + 1], &dtrsyl_c__1); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + + i__3 = k1 - 1; + suml = ddot_(&i__3, &a[k2 * a_dim1 + 1], &dtrsyl_c__1, &c__[l2 * + c_dim1 + 1], &dtrsyl_c__1); + i__3 = l1 - 1; + sumr = ddot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l2 * + b_dim1 + 1], &dtrsyl_c__1); + vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr); + + if (dlasy2_(interp, &dtrsyl_c_true, &dtrsyl_c_false, isgn, &dtrsyl_c__2, &dtrsyl_c__2, &a[k1 + k1 * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, & + dtrsyl_c__2, &scaloc, x, &dtrsyl_c__2, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } - applyleft = lsame_(side, "L"); - lastv = 0; - lastc = 0; - if (*tau != 0.) { - if (applyleft) { - lastv = *m; - } else { - lastv = *n; - } - if (*incv > 0) { - i__ = (lastv - 1) * *incv + 1; - } else { - i__ = 1; - } - while(lastv > 0 && v[i__] == 0.) { - --lastv; - i__ -= *incv; - } - if (applyleft) { - lastc = iladlc_(&lastv, n, &c__[c_offset], ldc); - } else { - lastc = iladlr_(m, &lastv, &c__[c_offset], ldc); - } - } - if (applyleft) { + if (scaloc != 1.) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[2]; + c__[k2 + l1 * c_dim1] = x[1]; + c__[k2 + l2 * c_dim1] = x[3]; + } - if (lastv > 0) { +L110: + ; + } +L120: + ; + } + } else if (! notrna && ! notrnb) { - if (dgemv_(interp, "Transpose", &lastv, &lastc, &dlarf_c_b4, &c__[c_offset], ldc, & v[1], incv, &dlarf_c_b5, &work[1], &dlarf_c__1)!=TCL_OK) { return TCL_ERROR; } - d__1 = -(*tau); - if (dger_(interp, &lastv, &lastc, &d__1, &v[1], incv, &work[1], &dlarf_c__1, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } + lnext = *n; + for (l = *n; l >= 1; --l) { + if (l > lnext) { + goto L180; + } + if (l == 1) { + l1 = l; + l2 = l; + } else { + if (b[l + (l - 1) * b_dim1] != 0.) { + l1 = l - 1; + l2 = l; + lnext = l - 2; + } else { + l1 = l; + l2 = l; + lnext = l - 1; + } + } + + + knext = 1; + i__1 = *m; + for (k = 1; k <= i__1; ++k) { + if (k < knext) { + goto L170; + } + if (k == *m) { + k1 = k; + k2 = k; + } else { + if (a[k + 1 + k * a_dim1] != 0.) { + k1 = k; + k2 = k + 1; + knext = k + 2; + } else { + k1 = k; + k2 = k; + knext = k + 1; + } + } + + if (l1 == l2 && k1 == k2) { + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l1; + i__3 = l1 + 1; + i__4 = l1 + 1; + sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc, + &b[l1 + min(i__4, *n)* b_dim1], ldb); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + scaloc = 1.; + + a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1]; + da11 = abs(a11); + if (da11 <= smin) { + a11 = smin; + da11 = smin; + *info = 1; + } + db = abs(vec[0]); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + x[0] = vec[0] * scaloc / a11; + + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + + } else if (l1 == l2 && k1 != k2) { + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc, + &b[l1 + min(i__4, *n)* b_dim1], ldb); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k2 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc, + &b[l1 + min(i__4, *n)* b_dim1], ldb); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + + d__1 = -sgn * b[l1 + l1 * b_dim1]; + if (dlaln2_(interp, &dtrsyl_c_true, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &a[k1 + k1 * a_dim1], lda, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k2 + l1 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 == k2) { + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc, + &b[l1 + min(i__4, *n)* b_dim1], ldb); + vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn * + sumr)); + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l2 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc, + &b[l2 + min(i__4, *n)* b_dim1], ldb); + vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn * + sumr)); + + d__1 = -sgn * a[k1 + k1 * a_dim1]; + if (dlaln2_(interp, &dtrsyl_c_false, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &b[l1 + l1 * b_dim1], ldb, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 != k2) { + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc, + &b[l1 + min(i__4, *n)* b_dim1], ldb); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &dtrsyl_c__1, &c__[l2 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc, + &b[l2 + min(i__4, *n)* b_dim1], ldb); + vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr); + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k2 * a_dim1 + 1], &dtrsyl_c__1, &c__[l1 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc, + &b[l1 + min(i__4, *n)* b_dim1], ldb); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + + i__2 = k1 - 1; + suml = ddot_(&i__2, &a[k2 * a_dim1 + 1], &dtrsyl_c__1, &c__[l2 * + c_dim1 + 1], &dtrsyl_c__1); + i__2 = *n - l2; + i__3 = l2 + 1; + i__4 = l2 + 1; + sumr = ddot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc, + &b[l2 + min(i__4, *n)* b_dim1], ldb); + vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr); + + if (dlasy2_(interp, &dtrsyl_c_true, &dtrsyl_c_true, isgn, &dtrsyl_c__2, &dtrsyl_c__2, &a[k1 + k1 * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, & + dtrsyl_c__2, &scaloc, x, &dtrsyl_c__2, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[2]; + c__[k2 + l1 * c_dim1] = x[1]; + c__[k2 + l2 * c_dim1] = x[3]; + } +L170: + ; + } +L180: + ; } - } else { + } else if (notrna && ! notrnb) { - if (lastv > 0) { - if (dgemv_(interp, "No transpose", &lastc, &lastv, &dlarf_c_b4, &c__[c_offset], ldc, &v[1], incv, &dlarf_c_b5, &work[1], &dlarf_c__1)!=TCL_OK) { return TCL_ERROR; } + lnext = *n; + for (l = *n; l >= 1; --l) { + if (l > lnext) { + goto L240; + } + if (l == 1) { + l1 = l; + l2 = l; + } else { + if (b[l + (l - 1) * b_dim1] != 0.) { + l1 = l - 1; + l2 = l; + lnext = l - 2; + } else { + l1 = l; + l2 = l; + lnext = l - 1; + } + } - d__1 = -(*tau); - if (dger_(interp, &lastc, &lastv, &d__1, &work[1], &dlarf_c__1, &v[1], incv, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } + knext = *m; + for (k = *m; k >= 1; --k) { + if (k > knext) { + goto L230; + } + if (k == 1) { + k1 = k; + k2 = k; + } else { + if (a[k + (k - 1) * a_dim1] != 0.) { + k1 = k - 1; + k2 = k; + knext = k - 2; + } else { + k1 = k; + k2 = k; + knext = k - 1; + } + } + + if (l1 == l2 && k1 == k2) { + i__1 = *m - k1; + i__2 = k1 + 1; + i__3 = k1 + 1; + suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l1; + i__2 = l1 + 1; + i__3 = l1 + 1; + sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc, + &b[l1 + min(i__3, *n)* b_dim1], ldb); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + scaloc = 1.; + + a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1]; + da11 = abs(a11); + if (da11 <= smin) { + a11 = smin; + da11 = smin; + *info = 1; + } + db = abs(vec[0]); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + x[0] = vec[0] * scaloc / a11; + + if (scaloc != 1.) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + + } else if (l1 == l2 && k1 != k2) { + + i__1 = *m - k2; + i__2 = k2 + 1; + i__3 = k2 + 1; + suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc, + &b[l1 + min(i__3, *n)* b_dim1], ldb); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + + i__1 = *m - k2; + i__2 = k2 + 1; + i__3 = k2 + 1; + suml = ddot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc, + &b[l1 + min(i__3, *n)* b_dim1], ldb); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + + d__1 = -sgn * b[l1 + l1 * b_dim1]; + if (dlaln2_(interp, &dtrsyl_c_false, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &a[k1 + k1 * a_dim1], lda, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + + if (scaloc != 1.) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k2 + l1 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 == k2) { + + i__1 = *m - k1; + i__2 = k1 + 1; + i__3 = k1 + 1; + suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc, + &b[l1 + min(i__3, *n)* b_dim1], ldb); + vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn * + sumr)); + + i__1 = *m - k1; + i__2 = k1 + 1; + i__3 = k1 + 1; + suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l2 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc, + &b[l2 + min(i__3, *n)* b_dim1], ldb); + vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn * + sumr)); + + d__1 = -sgn * a[k1 + k1 * a_dim1]; + if (dlaln2_(interp, &dtrsyl_c_false, &dtrsyl_c__2, &dtrsyl_c__1, &smin, &dtrsyl_c_b26, &b[l1 + l1 * b_dim1], ldb, &dtrsyl_c_b26, &dtrsyl_c_b26, vec, &dtrsyl_c__2, &d__1, + &dtrsyl_c_b30, x, &dtrsyl_c__2, &scaloc, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + + if (scaloc != 1.) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[1]; + + } else if (l1 != l2 && k1 != k2) { + + i__1 = *m - k2; + i__2 = k2 + 1; + i__3 = k2 + 1; + suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc, + &b[l1 + min(i__3, *n)* b_dim1], ldb); + vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr); + + i__1 = *m - k2; + i__2 = k2 + 1; + i__3 = k2 + 1; + suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l2 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc, + &b[l2 + min(i__3, *n)* b_dim1], ldb); + vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr); + + i__1 = *m - k2; + i__2 = k2 + 1; + i__3 = k2 + 1; + suml = ddot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l1 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc, + &b[l1 + min(i__3, *n)* b_dim1], ldb); + vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr); + + i__1 = *m - k2; + i__2 = k2 + 1; + i__3 = k2 + 1; + suml = ddot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, & + c__[min(i__3, *m)+ l2 * c_dim1], &dtrsyl_c__1); + i__1 = *n - l2; + i__2 = l2 + 1; + i__3 = l2 + 1; + sumr = ddot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc, + &b[l2 + min(i__3, *n)* b_dim1], ldb); + vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr); + + if (dlasy2_(interp, &dtrsyl_c_false, &dtrsyl_c_true, isgn, &dtrsyl_c__2, &dtrsyl_c__2, &a[k1 + k1 * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, & + dtrsyl_c__2, &scaloc, x, &dtrsyl_c__2, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + + + if (ierr != 0) { + *info = 1; + } + + if (scaloc != 1.) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (dscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &dtrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + c__[k1 + l1 * c_dim1] = x[0]; + c__[k1 + l2 * c_dim1] = x[2]; + c__[k2 + l1 * c_dim1] = x[1]; + c__[k2 + l2 * c_dim1] = x[3]; + } +L230: + ; + } +L240: + ; } + } + return TCL_OK; -} /* dlarf_ */ -static /* Subroutine */ int dlarfg_ (Tcl_Interp *interp, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *tau) +} /* dtrsyl_ */ +static /* Subroutine */ int ztrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublecomplex *t, integer *ldt, doublecomplex *q, integer *ldq, integer *ifst, integer * ilst, integer *info) { - integer i__1; - doublereal d__1; + integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3; + doublecomplex z__1; - double d_sign(doublereal *, doublereal *); + void d_cnjg(doublecomplex *, doublecomplex *); - integer j, knt; - doublereal beta; - doublereal xnorm; - doublereal safmin, rsafmn; + integer k, m1, m2, m3; + doublereal cs; + doublecomplex t11, t22, sn, temp; + logical wantq; @@ -48093,87 +50127,125 @@ static /* Subroutine */ int dlarfg_ (Tcl_Interp *interp, integer *n, doublereal + t_dim1 = *ldt; + t_offset = 1 + t_dim1; + t -= t_offset; + q_dim1 = *ldq; + q_offset = 1 + q_dim1; + q -= q_offset; - --x; + *info = 0; + wantq = lsame_(compq, "V"); + if (! lsame_(compq, "N") && ! wantq) { + *info = -1; + } else if (*n < 0) { + *info = -2; + } else if (*ldt < max(1,*n)) { + *info = -4; + } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) { + *info = -6; + } else if (*ifst < 1 || *ifst > *n) { + *info = -7; + } else if (*ilst < 1 || *ilst > *n) { + *info = -8; + } + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "ZTREXC", &i__1); +return TCL_ERROR; - if (*n <= 1) { - *tau = 0.; return TCL_OK; } - i__1 = *n - 1; - xnorm = dnrm2_(&i__1, &x[1], incx); - if (xnorm == 0.) { + if (*n == 1 || *ifst == *ilst) { +return TCL_OK; + } + + if (*ifst < *ilst) { - *tau = 0.; + m1 = 0; + m2 = -1; + m3 = 1; } else { - d__1 = dlapy2_(alpha, &xnorm); - beta = -d_sign(&d__1, alpha); - safmin = dlamch_("S") / dlamch_("E"); - knt = 0; - if (abs(beta) < safmin) { + m1 = -1; + m2 = 0; + m3 = -1; + } + i__1 = *ilst + m2; + i__2 = m3; + for (k = *ifst + m1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) { - rsafmn = 1. / safmin; -L10: - ++knt; - i__1 = *n - 1; - if (dscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } - beta *= rsafmn; - *alpha *= rsafmn; - if (abs(beta) < safmin) { - goto L10; - } + i__3 = k + k * t_dim1; + t11.r = t[i__3].r, t11.i = t[i__3].i; + i__3 = k + 1 + (k + 1) * t_dim1; + t22.r = t[i__3].r, t22.i = t[i__3].i; - i__1 = *n - 1; - xnorm = dnrm2_(&i__1, &x[1], incx); - d__1 = dlapy2_(alpha, &xnorm); - beta = -d_sign(&d__1, alpha); - } - *tau = (beta - *alpha) / beta; - i__1 = *n - 1; - d__1 = 1. / (*alpha - beta); - if (dscal_(interp, &i__1, &d__1, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } + z__1.r = t22.r - t11.r, z__1.i = t22.i - t11.i; + if (zlartg_(interp, &t[k + (k + 1) * t_dim1], &z__1, &cs, &sn, &temp)!=TCL_OK) { return TCL_ERROR; } - i__1 = knt; - for (j = 1; j <= i__1; ++j) { - beta *= safmin; + if (k + 2 <= *n) { + i__3 = *n - k - 1; + if (zrot_(interp, &i__3, &t[k + (k + 2) * t_dim1], ldt, &t[k + 1 + (k + 2) * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + + } - *alpha = beta; - } + i__3 = k - 1; + d_cnjg(&z__1, &sn); + if (zrot_(interp, &i__3, &t[k * t_dim1 + 1], &ztrexc_c__1, &t[(k + 1) * t_dim1 + 1], & ztrexc_c__1, &cs, &z__1)!=TCL_OK) { return TCL_ERROR; } -return TCL_OK; -} /* dlarfg_ */ -static /* Subroutine */ int dlarfp_ (Tcl_Interp *interp, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *tau) -{ - integer i__1; - doublereal d__1; + i__3 = k + k * t_dim1; + t[i__3].r = t22.r, t[i__3].i = t22.i; + i__3 = k + 1 + (k + 1) * t_dim1; + t[i__3].r = t11.r, t[i__3].i = t11.i; - double d_sign(doublereal *, doublereal *); + if (wantq) { - integer j, knt; - doublereal beta; - doublereal xnorm; - doublereal safmin, rsafmn; + d_cnjg(&z__1, &sn); + if (zrot_(interp, n, &q[k * q_dim1 + 1], &ztrexc_c__1, &q[(k + 1) * q_dim1 + 1], & ztrexc_c__1, &cs, &z__1)!=TCL_OK) { return TCL_ERROR; } + } + } +return TCL_OK; +} /* ztrexc_ */ +static /* Subroutine */ int ztrsyl_ (Tcl_Interp *interp, char *trana, char *tranb, integer *isgn, integer *m, integer *n, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb, doublecomplex *c__, integer *ldc, doublereal *scale, integer *info) +{ + integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, + i__3, i__4; + doublereal d__1, d__2; + doublecomplex z__1, z__2, z__3, z__4; + double d_imag(doublecomplex *); + void d_cnjg(doublecomplex *, doublecomplex *); + integer j, k, l; + doublecomplex a11; + doublereal db; + doublecomplex x11; + doublereal da11; + doublecomplex vec; + doublereal dum[1], eps, sgn, smin; + doublecomplex suml, sumr; + doublereal scaloc; + doublereal bignum; + logical notrna, notrnb; + doublereal smlnum; @@ -48185,175 +50257,364 @@ static /* Subroutine */ int dlarfp_ (Tcl_Interp *interp, integer *n, doublereal - --x; - if (*n <= 0) { - *tau = 0.; -return TCL_OK; - } - i__1 = *n - 1; - xnorm = dnrm2_(&i__1, &x[1], incx); - if (xnorm == 0.) { - if (*alpha >= 0.) { - *tau = 0.; - } else { - *tau = 2.; - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - x[(j - 1) * *incx + 1] = 0.; - } - *alpha = -(*alpha); - } - } else { - d__1 = dlapy2_(alpha, &xnorm); - beta = d_sign(&d__1, alpha); - safmin = dlamch_("S") / dlamch_("E"); - knt = 0; - if (abs(beta) < safmin) { - rsafmn = 1. / safmin; -L10: - ++knt; - i__1 = *n - 1; - if (dscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } - beta *= rsafmn; - *alpha *= rsafmn; - if (abs(beta) < safmin) { - goto L10; - } - i__1 = *n - 1; - xnorm = dnrm2_(&i__1, &x[1], incx); - d__1 = dlapy2_(alpha, &xnorm); - beta = d_sign(&d__1, alpha); - } - *alpha += beta; - if (beta < 0.) { - beta = -beta; - *tau = -(*alpha) / beta; - } else { - *alpha = xnorm * (xnorm / *alpha); - *tau = *alpha / beta; - *alpha = -(*alpha); - } - i__1 = *n - 1; - d__1 = 1. / *alpha; - if (dscal_(interp, &i__1, &d__1, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + b_dim1 = *ldb; + b_offset = 1 + b_dim1; + b -= b_offset; + c_dim1 = *ldc; + c_offset = 1 + c_dim1; + c__ -= c_offset; + + notrna = lsame_(trana, "N"); + notrnb = lsame_(tranb, "N"); - i__1 = knt; - for (j = 1; j <= i__1; ++j) { - beta *= safmin; - } - *alpha = beta; + *info = 0; + if (! notrna && ! lsame_(trana, "C")) { + *info = -1; + } else if (! notrnb && ! lsame_(tranb, "C")) { + *info = -2; + } else if (*isgn != 1 && *isgn != -1) { + *info = -3; + } else if (*m < 0) { + *info = -4; + } else if (*n < 0) { + *info = -5; + } else if (*lda < max(1,*m)) { + *info = -7; + } else if (*ldb < max(1,*n)) { + *info = -9; + } else if (*ldc < max(1,*m)) { + *info = -11; } + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "ZTRSYL", &i__1); +return TCL_ERROR; return TCL_OK; + } -} /* dlarfp_ */ -static integer iladlc_ (integer *m, integer *n, doublereal *a, integer *lda) -{ - integer a_dim1, a_offset, ret_val, i__1; + *scale = 1.; + if (*m == 0 || *n == 0) { +return TCL_OK; + } - integer i__; + eps = dlamch_("P"); + smlnum = dlamch_("S"); + bignum = 1. / smlnum; + if (dlabad_(interp, &smlnum, &bignum)!=TCL_OK) { return TCL_ERROR; } + + smlnum = smlnum * (doublereal) (*m * *n) / eps; + bignum = 1. / smlnum; + d__1 = smlnum, d__2 = eps * zlange_("M", m, m, &a[a_offset], lda, dum), d__1 = max(d__1,d__2), d__2 = eps * zlange_("M", n, n, + &b[b_offset], ldb, dum); + smin = max(d__1,d__2); + sgn = (doublereal) (*isgn); + + if (notrna && notrnb) { + i__1 = *n; + for (l = 1; l <= i__1; ++l) { + for (k = *m; k >= 1; --k) { + + i__2 = *m - k; + i__3 = k + 1; + i__4 = k + 1; + zdotu_(&z__1, &i__2, &a[k + min(i__3, *m)* a_dim1], lda, &c__[ + min(i__4, *m)+ l * c_dim1], &ztrsyl_c__1); + suml.r = z__1.r, suml.i = z__1.i; + i__2 = l - 1; + zdotu_(&z__1, &i__2, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1] +, &ztrsyl_c__1); + sumr.r = z__1.r, sumr.i = z__1.i; + i__2 = k + l * c_dim1; + z__3.r = sgn * sumr.r, z__3.i = sgn * sumr.i; + z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i; + z__1.r = c__[i__2].r - z__2.r, z__1.i = c__[i__2].i - z__2.i; + vec.r = z__1.r, vec.i = z__1.i; + + scaloc = 1.; + i__2 = k + k * a_dim1; + i__3 = l + l * b_dim1; + z__2.r = sgn * b[i__3].r, z__2.i = sgn * b[i__3].i; + z__1.r = a[i__2].r + z__2.r, z__1.i = a[i__2].i + z__2.i; + a11.r = z__1.r, a11.i = z__1.i; + da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs( + d__2)); + if (da11 <= smin) { + a11.r = smin, a11.i = 0.; + da11 = smin; + *info = 1; + } + db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs( + d__2)); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + z__3.r = scaloc, z__3.i = 0.; + z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r * + z__3.i + vec.i * z__3.r; + zladiv_(&z__1, &z__2, &a11); + x11.r = z__1.r, x11.i = z__1.i; + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (zdscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &ztrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + i__2 = k + l * c_dim1; + c__[i__2].r = x11.r, c__[i__2].i = x11.i; + } + } + } else if (! notrna && notrnb) { - a_dim1 = *lda; - a_offset = 1 + a_dim1; - a -= a_offset; + i__1 = *n; + for (l = 1; l <= i__1; ++l) { + i__2 = *m; + for (k = 1; k <= i__2; ++k) { - if (*n == 0) { - ret_val = *n; - } else if (a[*n * a_dim1 + 1] != 0. || a[*m + *n * a_dim1] != 0.) { - ret_val = *n; - } else { - for (ret_val = *n; ret_val >= 1; --ret_val) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - if (a[i__ + ret_val * a_dim1] != 0.) { - return ret_val; + i__3 = k - 1; + zdotc_(&z__1, &i__3, &a[k * a_dim1 + 1], &ztrsyl_c__1, &c__[l * + c_dim1 + 1], &ztrsyl_c__1); + suml.r = z__1.r, suml.i = z__1.i; + i__3 = l - 1; + zdotu_(&z__1, &i__3, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1] +, &ztrsyl_c__1); + sumr.r = z__1.r, sumr.i = z__1.i; + i__3 = k + l * c_dim1; + z__3.r = sgn * sumr.r, z__3.i = sgn * sumr.i; + z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i; + z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i; + vec.r = z__1.r, vec.i = z__1.i; + + scaloc = 1.; + d_cnjg(&z__2, &a[k + k * a_dim1]); + i__3 = l + l * b_dim1; + z__3.r = sgn * b[i__3].r, z__3.i = sgn * b[i__3].i; + z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; + a11.r = z__1.r, a11.i = z__1.i; + da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs( + d__2)); + if (da11 <= smin) { + a11.r = smin, a11.i = 0.; + da11 = smin; + *info = 1; } + db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs( + d__2)); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + + z__3.r = scaloc, z__3.i = 0.; + z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r * + z__3.i + vec.i * z__3.r; + zladiv_(&z__1, &z__2, &a11); + x11.r = z__1.r, x11.i = z__1.i; + + if (scaloc != 1.) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + if (zdscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &ztrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; + } + i__3 = k + l * c_dim1; + c__[i__3].r = x11.r, c__[i__3].i = x11.i; + } } - } - return ret_val; -} /* iladlc_ */ -static integer iladlr_ (integer *m, integer *n, doublereal *a, integer *lda) -{ - integer a_dim1, a_offset, ret_val, i__1; - integer i__, j; + } else if (! notrna && ! notrnb) { + for (l = *n; l >= 1; --l) { + i__1 = *m; + for (k = 1; k <= i__1; ++k) { + i__2 = k - 1; + zdotc_(&z__1, &i__2, &a[k * a_dim1 + 1], &ztrsyl_c__1, &c__[l * + c_dim1 + 1], &ztrsyl_c__1); + suml.r = z__1.r, suml.i = z__1.i; + i__2 = *n - l; + i__3 = l + 1; + i__4 = l + 1; + zdotc_(&z__1, &i__2, &c__[k + min(i__3, *n)* c_dim1], ldc, &b[ + l + min(i__4, *n)* b_dim1], ldb); + sumr.r = z__1.r, sumr.i = z__1.i; + i__2 = k + l * c_dim1; + d_cnjg(&z__4, &sumr); + z__3.r = sgn * z__4.r, z__3.i = sgn * z__4.i; + z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i; + z__1.r = c__[i__2].r - z__2.r, z__1.i = c__[i__2].i - z__2.i; + vec.r = z__1.r, vec.i = z__1.i; + + scaloc = 1.; + i__2 = k + k * a_dim1; + i__3 = l + l * b_dim1; + z__3.r = sgn * b[i__3].r, z__3.i = sgn * b[i__3].i; + z__2.r = a[i__2].r + z__3.r, z__2.i = a[i__2].i + z__3.i; + d_cnjg(&z__1, &z__2); + a11.r = z__1.r, a11.i = z__1.i; + da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs( + d__2)); + if (da11 <= smin) { + a11.r = smin, a11.i = 0.; + da11 = smin; + *info = 1; + } + db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs( + d__2)); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + z__3.r = scaloc, z__3.i = 0.; + z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r * + z__3.i + vec.i * z__3.r; + zladiv_(&z__1, &z__2, &a11); + x11.r = z__1.r, x11.i = z__1.i; + if (scaloc != 1.) { + i__2 = *n; + for (j = 1; j <= i__2; ++j) { + if (zdscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &ztrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + } + *scale *= scaloc; + } + i__2 = k + l * c_dim1; + c__[i__2].r = x11.r, c__[i__2].i = x11.i; + } + } + } else if (notrna && ! notrnb) { - a_dim1 = *lda; - a_offset = 1 + a_dim1; - a -= a_offset; - if (*m == 0) { - ret_val = *m; - } else if (a[*m + a_dim1] != 0. || a[*m + *n * a_dim1] != 0.) { - ret_val = *m; - } else { - ret_val = 0; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (i__ = *m; i__ >= 1; --i__) { - if (a[i__ + j * a_dim1] != 0.) { - break; + for (l = *n; l >= 1; --l) { + for (k = *m; k >= 1; --k) { + + i__1 = *m - k; + i__2 = k + 1; + i__3 = k + 1; + zdotu_(&z__1, &i__1, &a[k + min(i__2, *m)* a_dim1], lda, &c__[ + min(i__3, *m)+ l * c_dim1], &ztrsyl_c__1); + suml.r = z__1.r, suml.i = z__1.i; + i__1 = *n - l; + i__2 = l + 1; + i__3 = l + 1; + zdotc_(&z__1, &i__1, &c__[k + min(i__2, *n)* c_dim1], ldc, &b[ + l + min(i__3, *n)* b_dim1], ldb); + sumr.r = z__1.r, sumr.i = z__1.i; + i__1 = k + l * c_dim1; + d_cnjg(&z__4, &sumr); + z__3.r = sgn * z__4.r, z__3.i = sgn * z__4.i; + z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i; + z__1.r = c__[i__1].r - z__2.r, z__1.i = c__[i__1].i - z__2.i; + vec.r = z__1.r, vec.i = z__1.i; + + scaloc = 1.; + i__1 = k + k * a_dim1; + d_cnjg(&z__3, &b[l + l * b_dim1]); + z__2.r = sgn * z__3.r, z__2.i = sgn * z__3.i; + z__1.r = a[i__1].r + z__2.r, z__1.i = a[i__1].i + z__2.i; + a11.r = z__1.r, a11.i = z__1.i; + da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs( + d__2)); + if (da11 <= smin) { + a11.r = smin, a11.i = 0.; + da11 = smin; + *info = 1; + } + db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs( + d__2)); + if (da11 < 1. && db > 1.) { + if (db > bignum * da11) { + scaloc = 1. / db; + } + } + + z__3.r = scaloc, z__3.i = 0.; + z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r * + z__3.i + vec.i * z__3.r; + zladiv_(&z__1, &z__2, &a11); + x11.r = z__1.r, x11.i = z__1.i; + + if (scaloc != 1.) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (zdscal_(interp, m, &scaloc, &c__[j * c_dim1 + 1], &ztrsyl_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + *scale *= scaloc; } + i__1 = k + l * c_dim1; + c__[i__1].r = x11.r, c__[i__1].i = x11.i; + } - ret_val = max(ret_val,i__); } + } - return ret_val; -} /* iladlr_ */ -static /* Subroutine */ int dtrmv_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) -{ - integer a_dim1, a_offset, i__1, i__2; - integer i__, j, ix, jx, kx, info; - doublereal temp; - logical nounit; +return TCL_OK; +} /* ztrsyl_ */ +static /* Subroutine */ int dlasd1_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer * iwork, doublereal *work, integer *info) +{ + integer u_dim1, u_offset, vt_dim1, vt_offset, i__1; + doublereal d__1, d__2; + integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2, idxc, + idxp, ldvt2; + integer isigma; + doublereal orgnrm; + integer coltyp; @@ -48382,194 +50643,183 @@ static /* Subroutine */ int dtrmv_ (Tcl_Interp *interp, char *uplo, char *trans, - a_dim1 = *lda; - a_offset = 1 + a_dim1; - a -= a_offset; - --x; - info = 0; - if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { - info = 1; - } else if (! lsame_(trans, "N") && ! lsame_(trans, - "T") && ! lsame_(trans, "C")) { - info = 2; - } else if (! lsame_(diag, "U") && ! lsame_(diag, - "N")) { - info = 3; - } else if (*n < 0) { - info = 4; - } else if (*lda < max(1,*n)) { - info = 6; - } else if (*incx == 0) { - info = 8; - } - if (info != 0) { - vectcl_xerbla(interp, "DTRMV ", &info); -return TCL_ERROR; -return TCL_OK; - } - if (*n == 0) { -return TCL_OK; - } - nounit = lsame_(diag, "N"); - if (*incx <= 0) { - kx = 1 - (*n - 1) * *incx; - } else if (*incx != 1) { - kx = 1; - } - if (lsame_(trans, "N")) { + --d__; + u_dim1 = *ldu; + u_offset = 1 + u_dim1; + u -= u_offset; + vt_dim1 = *ldvt; + vt_offset = 1 + vt_dim1; + vt -= vt_offset; + --idxq; + --iwork; + --work; + *info = 0; - if (lsame_(uplo, "U")) { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[j] != 0.) { - temp = x[j]; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - x[i__] += temp * a[i__ + j * a_dim1]; - } - if (nounit) { - x[j] *= a[j + j * a_dim1]; - } - } - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[jx] != 0.) { - temp = x[jx]; - ix = kx; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - x[ix] += temp * a[i__ + j * a_dim1]; - ix += *incx; - } - if (nounit) { - x[jx] *= a[j + j * a_dim1]; - } - } - jx += *incx; - } - } - } else { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - if (x[j] != 0.) { - temp = x[j]; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - x[i__] += temp * a[i__ + j * a_dim1]; - } - if (nounit) { - x[j] *= a[j + j * a_dim1]; - } - } - } - } else { - kx += (*n - 1) * *incx; - jx = kx; - for (j = *n; j >= 1; --j) { - if (x[jx] != 0.) { - temp = x[jx]; - ix = kx; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - x[ix] += temp * a[i__ + j * a_dim1]; - ix -= *incx; - } - if (nounit) { - x[jx] *= a[j + j * a_dim1]; - } - } - jx -= *incx; - } - } + if (*nl < 1) { + *info = -1; + } else if (*nr < 1) { + *info = -2; + } else if (*sqre < 0 || *sqre > 1) { + *info = -3; + } + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "DLASD1", &i__1); +return TCL_ERROR; + +return TCL_OK; + } + + n = *nl + *nr + 1; + m = n + *sqre; + + + ldu2 = n; + ldvt2 = m; + + iz = 1; + isigma = iz + m; + iu2 = isigma + n; + ivt2 = iu2 + ldu2 * n; + iq = ivt2 + ldvt2 * m; + + idx = 1; + idxc = idx + n; + coltyp = idxc + n; + idxp = coltyp + n; + + + d__1 = abs(*alpha), d__2 = abs(*beta); + orgnrm = max(d__1,d__2); + d__[*nl + 1] = 0.; + i__1 = n; + for (i__ = 1; i__ <= i__1; ++i__) { + if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { + orgnrm = (d__1 = d__[i__], abs(d__1)); } - } else { + } + if (dlascl_(interp, "G", &dlasd1_c__0, &dlasd1_c__0, &orgnrm, &dlasd1_c_b7, &n, &dlasd1_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } + *alpha /= orgnrm; + *beta /= orgnrm; - if (lsame_(uplo, "U")) { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - temp = x[j]; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - for (i__ = j - 1; i__ >= 1; --i__) { - temp += a[i__ + j * a_dim1] * x[i__]; - } - x[j] = temp; - } - } else { - jx = kx + (*n - 1) * *incx; - for (j = *n; j >= 1; --j) { - temp = x[jx]; - ix = jx; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - temp += a[i__ + j * a_dim1] * x[ix]; - } - x[jx] = temp; - jx -= *incx; - } - } - } else { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = x[j]; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - temp += a[i__ + j * a_dim1] * x[i__]; - } - x[j] = temp; - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = x[jx]; - ix = jx; - if (nounit) { - temp *= a[j + j * a_dim1]; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - temp += a[i__ + j * a_dim1] * x[ix]; - } - x[jx] = temp; - jx += *incx; - } - } + + if (dlasd2_(interp, nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset], ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, & + work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], & + idxq[1], &iwork[coltyp], info)!=TCL_OK) { return TCL_ERROR; } + + + + + ldq = k; + if (dlasd3_(interp, nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[ + ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info)!=TCL_OK) { return TCL_ERROR; } + + + if (*info != 0) { +return TCL_OK; + } + + + if (dlascl_(interp, "G", &dlasd1_c__0, &dlasd1_c__0, &dlasd1_c_b7, &orgnrm, &n, &dlasd1_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } + + + + n1 = k; + n2 = n - k; + if (dlamrg_(interp, &n1, &n2, &d__[1], &dlasd1_c__1, &dlasd1_c_n1, &idxq[1])!=TCL_OK) { return TCL_ERROR; } + + +return TCL_OK; + + +} /* dlasd1_ */ +static /* Subroutine */ int dlasdt_ (Tcl_Interp *interp, integer *n, integer *lvl, integer *nd, integer * inode, integer *ndiml, integer *ndimr, integer *msub) +{ + integer i__1, i__2; + + double log(doublereal); + + integer i__, il, ir, maxn; + doublereal temp; + integer nlvl, llst, ncrnt; + + + + + + + + + + + + + + + + + + + + --ndimr; + --ndiml; + --inode; + + maxn = max(1,*n); + temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.); + *lvl = (integer) temp + 1; + + i__ = *n / 2; + inode[1] = i__ + 1; + ndiml[1] = i__; + ndimr[1] = *n - i__ - 1; + il = 0; + ir = 1; + llst = 1; + i__1 = *lvl - 1; + for (nlvl = 1; nlvl <= i__1; ++nlvl) { + + + i__2 = llst - 1; + for (i__ = 0; i__ <= i__2; ++i__) { + il += 2; + ir += 2; + ncrnt = llst + i__; + ndiml[il] = ndiml[ncrnt] / 2; + ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1; + inode[il] = inode[ncrnt] - ndimr[il] - 1; + ndiml[ir] = ndimr[ncrnt] / 2; + ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1; + inode[ir] = inode[ncrnt] + ndiml[ir] + 1; } + llst <<= 1; } + *nd = (llst << 1) - 1; return TCL_OK; -} /* dtrmv_ */ -static logical dlaisnan_ (doublereal *din1, doublereal *din2) +} /* dlasdt_ */ +static /* Subroutine */ int dlasd6_ (Tcl_Interp *interp, integer *icompq, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl, doublereal *alpha, doublereal *beta, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *poles, doublereal *difl, doublereal * difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info) { - logical ret_val; + integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, + poles_dim1, poles_offset, i__1; + doublereal d__1, d__2; + integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw; + integer isigma; + doublereal orgnrm; @@ -48580,19 +50830,159 @@ static logical dlaisnan_ (doublereal *din1, doublereal *din2) - ret_val = *din1 != *din2; - return ret_val; -} /* dlaisnan_ */ -static /* Subroutine */ int zlarf_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublecomplex *v, integer *incv, doublecomplex *tau, doublecomplex *c__, integer * ldc, doublecomplex *work) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --d__; + --vf; + --vl; + --idxq; + --perm; + givcol_dim1 = *ldgcol; + givcol_offset = 1 + givcol_dim1; + givcol -= givcol_offset; + poles_dim1 = *ldgnum; + poles_offset = 1 + poles_dim1; + poles -= poles_offset; + givnum_dim1 = *ldgnum; + givnum_offset = 1 + givnum_dim1; + givnum -= givnum_offset; + --difl; + --difr; + --z__; + --work; + --iwork; + + *info = 0; + n = *nl + *nr + 1; + m = n + *sqre; + + if (*icompq < 0 || *icompq > 1) { + *info = -1; + } else if (*nl < 1) { + *info = -2; + } else if (*nr < 1) { + *info = -3; + } else if (*sqre < 0 || *sqre > 1) { + *info = -4; + } else if (*ldgcol < n) { + *info = -14; + } else if (*ldgnum < n) { + *info = -16; + } + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "DLASD6", &i__1); +return TCL_ERROR; + +return TCL_OK; + } + + + isigma = 1; + iw = isigma + n; + ivfw = iw + m; + ivlw = ivfw + m; + + idx = 1; + idxc = idx + n; + idxp = idxc + n; + + + d__1 = abs(*alpha), d__2 = abs(*beta); + orgnrm = max(d__1,d__2); + d__[*nl + 1] = 0.; + i__1 = n; + for (i__ = 1; i__ <= i__1; ++i__) { + if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { + orgnrm = (d__1 = d__[i__], abs(d__1)); + } + } + if (dlascl_(interp, "G", &dlasd6_c__0, &dlasd6_c__0, &orgnrm, &dlasd6_c_b7, &n, &dlasd6_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } + + *alpha /= orgnrm; + *beta /= orgnrm; + + + if (dlasd7_(interp, icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], & work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], & + iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[ + givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s, + info)!=TCL_OK) { return TCL_ERROR; } + + + + + if (dlasd8_(interp, icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1], ldgnum, &work[isigma], &work[iw], info)!=TCL_OK) { return TCL_ERROR; } + + + + + if (*icompq == 1) { + if (dcopy_(interp, k, &d__[1], &dlasd6_c__1, &poles[poles_dim1 + 1], &dlasd6_c__1)!=TCL_OK) { return TCL_ERROR; } + + if (dcopy_(interp, k, &work[isigma], &dlasd6_c__1, &poles[(poles_dim1 << 1) + 1], &dlasd6_c__1)!=TCL_OK) { return TCL_ERROR; } + + } + + + if (dlascl_(interp, "G", &dlasd6_c__0, &dlasd6_c__0, &dlasd6_c_b7, &orgnrm, &n, &dlasd6_c__1, &d__[1], &n, info)!=TCL_OK) { return TCL_ERROR; } + + + + n1 = *k; + n2 = n - *k; + if (dlamrg_(interp, &n1, &n2, &d__[1], &dlasd6_c__1, &dlasd6_c_n1, &idxq[1])!=TCL_OK) { return TCL_ERROR; } + + +return TCL_OK; + + +} /* dlasd6_ */ +static /* Subroutine */ int dlarf_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublereal *v, integer *incv, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) { - integer c_dim1, c_offset, i__1; - doublecomplex z__1; + integer c_dim1, c_offset; + doublereal d__1; integer i__; logical applyleft; - integer lastc; - integer lastv; - + integer lastc, lastv; @@ -48623,7 +51013,7 @@ static /* Subroutine */ int zlarf_ (Tcl_Interp *interp, char *side, integer *m, applyleft = lsame_(side, "L"); lastv = 0; lastc = 0; - if (tau->r != 0. || tau->i != 0.) { + if (*tau != 0.) { if (applyleft) { lastv = *m; } else { @@ -48634,17 +51024,14 @@ static /* Subroutine */ int zlarf_ (Tcl_Interp *interp, char *side, integer *m, } else { i__ = 1; } - for(;;) { /* while(complicated condition) */ - i__1 = i__; - if (!(lastv > 0 && (v[i__1].r == 0. && v[i__1].i == 0.))) - break; + while(lastv > 0 && v[i__] == 0.) { --lastv; i__ -= *incv; } if (applyleft) { - lastc = ilazlc_(&lastv, n, &c__[c_offset], ldc); + lastc = iladlc_(&lastv, n, &c__[c_offset], ldc); } else { - lastc = ilazlr_(m, &lastv, &c__[c_offset], ldc); + lastc = iladlr_(m, &lastv, &c__[c_offset], ldc); } } if (applyleft) { @@ -48653,13 +51040,13 @@ static /* Subroutine */ int zlarf_ (Tcl_Interp *interp, char *side, integer *m, if (lastv > 0) { - if (zgemv_(interp, "Conjugate transpose", &lastv, &lastc, &zlarf_c_b1, &c__[ c_offset], ldc, &v[1], incv, &zlarf_c_b2, &work[1], &zlarf_c__1)!=TCL_OK) { return TCL_ERROR; } + if (dgemv_(interp, "Transpose", &lastv, &lastc, &dlarf_c_b4, &c__[c_offset], ldc, & v[1], incv, &dlarf_c_b5, &work[1], &dlarf_c__1)!=TCL_OK) { return TCL_ERROR; } - z__1.r = -tau->r, z__1.i = -tau->i; - if (zgerc_(interp, &lastv, &lastc, &z__1, &v[1], incv, &work[1], &zlarf_c__1, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } + d__1 = -(*tau); + if (dger_(interp, &lastv, &lastc, &d__1, &v[1], incv, &work[1], &dlarf_c__1, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } } @@ -48669,13 +51056,13 @@ static /* Subroutine */ int zlarf_ (Tcl_Interp *interp, char *side, integer *m, if (lastv > 0) { - if (zgemv_(interp, "No transpose", &lastc, &lastv, &zlarf_c_b1, &c__[c_offset], ldc, &v[1], incv, &zlarf_c_b2, &work[1], &zlarf_c__1)!=TCL_OK) { return TCL_ERROR; } + if (dgemv_(interp, "No transpose", &lastc, &lastv, &dlarf_c_b4, &c__[c_offset], ldc, &v[1], incv, &dlarf_c_b5, &work[1], &dlarf_c__1)!=TCL_OK) { return TCL_ERROR; } - z__1.r = -tau->r, z__1.i = -tau->i; - if (zgerc_(interp, &lastc, &lastv, &z__1, &work[1], &zlarf_c__1, &v[1], incv, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } + d__1 = -(*tau); + if (dger_(interp, &lastc, &lastv, &d__1, &work[1], &dlarf_c__1, &v[1], incv, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } } @@ -48683,20 +51070,18 @@ static /* Subroutine */ int zlarf_ (Tcl_Interp *interp, char *side, integer *m, return TCL_OK; -} /* zlarf_ */ -static /* Subroutine */ int zlarfg_ (Tcl_Interp *interp, integer *n, doublecomplex *alpha, doublecomplex * x, integer *incx, doublecomplex *tau) +} /* dlarf_ */ +static /* Subroutine */ int dlarfg_ (Tcl_Interp *interp, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *tau) { integer i__1; - doublereal d__1, d__2; - doublecomplex z__1, z__2; + doublereal d__1; - double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *); + double d_sign(doublereal *, doublereal *); integer j, knt; - doublereal beta, alphi, alphr; + doublereal beta; doublereal xnorm; - doublereal safmin; - doublereal rsafmn; + doublereal safmin, rsafmn; @@ -48719,61 +51104,50 @@ static /* Subroutine */ int zlarfg_ (Tcl_Interp *interp, integer *n, doublecompl --x; - if (*n <= 0) { - tau->r = 0., tau->i = 0.; + if (*n <= 1) { + *tau = 0.; return TCL_OK; } i__1 = *n - 1; - xnorm = dznrm2_(&i__1, &x[1], incx); - alphr = alpha->r; - alphi = d_imag(alpha); + xnorm = dnrm2_(&i__1, &x[1], incx); - if (xnorm == 0. && alphi == 0.) { + if (xnorm == 0.) { - tau->r = 0., tau->i = 0.; + *tau = 0.; } else { - d__1 = dlapy3_(&alphr, &alphi, &xnorm); - beta = -d_sign(&d__1, &alphr); + d__1 = dlapy2_(alpha, &xnorm); + beta = -d_sign(&d__1, alpha); safmin = dlamch_("S") / dlamch_("E"); - rsafmn = 1. / safmin; - knt = 0; if (abs(beta) < safmin) { + rsafmn = 1. / safmin; L10: ++knt; i__1 = *n - 1; - if (zdscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } + if (dscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } beta *= rsafmn; - alphi *= rsafmn; - alphr *= rsafmn; + *alpha *= rsafmn; if (abs(beta) < safmin) { goto L10; } i__1 = *n - 1; - xnorm = dznrm2_(&i__1, &x[1], incx); - z__1.r = alphr, z__1.i = alphi; - alpha->r = z__1.r, alpha->i = z__1.i; - d__1 = dlapy3_(&alphr, &alphi, &xnorm); - beta = -d_sign(&d__1, &alphr); + xnorm = dnrm2_(&i__1, &x[1], incx); + d__1 = dlapy2_(alpha, &xnorm); + beta = -d_sign(&d__1, alpha); } - d__1 = (beta - alphr) / beta; - d__2 = -alphi / beta; - z__1.r = d__1, z__1.i = d__2; - tau->r = z__1.r, tau->i = z__1.i; - z__2.r = alpha->r - beta, z__2.i = alpha->i; - zladiv_(&z__1, &zlarfg_c_b5, &z__2); - alpha->r = z__1.r, alpha->i = z__1.i; + *tau = (beta - *alpha) / beta; i__1 = *n - 1; - if (zscal_(interp, &i__1, alpha, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } + d__1 = 1. / (*alpha - beta); + if (dscal_(interp, &i__1, &d__1, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } @@ -48781,72 +51155,24 @@ return TCL_OK; for (j = 1; j <= i__1; ++j) { beta *= safmin; } - alpha->r = beta, alpha->i = 0.; + *alpha = beta; } return TCL_OK; -} /* zlarfg_ */ -static /* Subroutine */ int zlacgv_ (Tcl_Interp *interp, integer *n, doublecomplex *x, integer *incx) -{ - integer i__1, i__2; - doublecomplex z__1; - - void d_cnjg(doublecomplex *, doublecomplex *); - - integer i__, ioff; - - - - - - - - - - - - - --x; - - if (*incx == 1) { - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - d_cnjg(&z__1, &x[i__]); - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - } - } else { - ioff = 1; - if (*incx < 0) { - ioff = 1 - (*n - 1) * *incx; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = ioff; - d_cnjg(&z__1, &x[ioff]); - x[i__2].r = z__1.r, x[i__2].i = z__1.i; - ioff += *incx; - } - } -return TCL_OK; - - -} /* zlacgv_ */ -static /* Subroutine */ int zlarfp_ (Tcl_Interp *interp, integer *n, doublecomplex *alpha, doublecomplex * x, integer *incx, doublecomplex *tau) +} /* dlarfg_ */ +static /* Subroutine */ int dlarfp_ (Tcl_Interp *interp, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *tau) { - integer i__1, i__2; - doublereal d__1, d__2; - doublecomplex z__1, z__2; + integer i__1; + doublereal d__1; - double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *); + double d_sign(doublereal *, doublereal *); integer j, knt; - doublereal beta, alphi, alphr; + doublereal beta; doublereal xnorm; - doublereal safmin; - doublereal rsafmn; + doublereal safmin, rsafmn; @@ -48870,98 +51196,66 @@ static /* Subroutine */ int zlarfp_ (Tcl_Interp *interp, integer *n, doublecompl --x; if (*n <= 0) { - tau->r = 0., tau->i = 0.; + *tau = 0.; return TCL_OK; } i__1 = *n - 1; - xnorm = dznrm2_(&i__1, &x[1], incx); - alphr = alpha->r; - alphi = d_imag(alpha); + xnorm = dnrm2_(&i__1, &x[1], incx); - if (xnorm == 0. && alphi == 0.) { + if (xnorm == 0.) { - if (alphi == 0.) { - if (alphr >= 0.) { - tau->r = 0., tau->i = 0.; - } else { - tau->r = 2., tau->i = 0.; - i__1 = *n - 1; - for (j = 1; j <= i__1; ++j) { - i__2 = (j - 1) * *incx + 1; - x[i__2].r = 0., x[i__2].i = 0.; - } - z__1.r = -alpha->r, z__1.i = -alpha->i; - alpha->r = z__1.r, alpha->i = z__1.i; - } + if (*alpha >= 0.) { + *tau = 0.; } else { - xnorm = dlapy2_(&alphr, &alphi); - d__1 = 1. - alphr / xnorm; - d__2 = -alphi / xnorm; - z__1.r = d__1, z__1.i = d__2; - tau->r = z__1.r, tau->i = z__1.i; + *tau = 2.; i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { - i__2 = (j - 1) * *incx + 1; - x[i__2].r = 0., x[i__2].i = 0.; + x[(j - 1) * *incx + 1] = 0.; } - alpha->r = xnorm, alpha->i = 0.; + *alpha = -(*alpha); } } else { - d__1 = dlapy3_(&alphr, &alphi, &xnorm); - beta = d_sign(&d__1, &alphr); + d__1 = dlapy2_(alpha, &xnorm); + beta = d_sign(&d__1, alpha); safmin = dlamch_("S") / dlamch_("E"); - rsafmn = 1. / safmin; - knt = 0; if (abs(beta) < safmin) { + rsafmn = 1. / safmin; L10: ++knt; i__1 = *n - 1; - if (zdscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } + if (dscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } beta *= rsafmn; - alphi *= rsafmn; - alphr *= rsafmn; + *alpha *= rsafmn; if (abs(beta) < safmin) { goto L10; } i__1 = *n - 1; - xnorm = dznrm2_(&i__1, &x[1], incx); - z__1.r = alphr, z__1.i = alphi; - alpha->r = z__1.r, alpha->i = z__1.i; - d__1 = dlapy3_(&alphr, &alphi, &xnorm); - beta = d_sign(&d__1, &alphr); + xnorm = dnrm2_(&i__1, &x[1], incx); + d__1 = dlapy2_(alpha, &xnorm); + beta = d_sign(&d__1, alpha); } - z__1.r = alpha->r + beta, z__1.i = alpha->i; - alpha->r = z__1.r, alpha->i = z__1.i; + *alpha += beta; if (beta < 0.) { beta = -beta; - z__2.r = -alpha->r, z__2.i = -alpha->i; - z__1.r = z__2.r / beta, z__1.i = z__2.i / beta; - tau->r = z__1.r, tau->i = z__1.i; + *tau = -(*alpha) / beta; } else { - alphr = alphi * (alphi / alpha->r); - alphr += xnorm * (xnorm / alpha->r); - d__1 = alphr / beta; - d__2 = -alphi / beta; - z__1.r = d__1, z__1.i = d__2; - tau->r = z__1.r, tau->i = z__1.i; - d__1 = -alphr; - z__1.r = d__1, z__1.i = alphi; - alpha->r = z__1.r, alpha->i = z__1.i; + *alpha = xnorm * (xnorm / *alpha); + *tau = *alpha / beta; + *alpha = -(*alpha); } - zladiv_(&z__1, &zlarfp_c_b5, alpha); - alpha->r = z__1.r, alpha->i = z__1.i; i__1 = *n - 1; - if (zscal_(interp, &i__1, alpha, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } + d__1 = 1. / *alpha; + if (dscal_(interp, &i__1, &d__1, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } @@ -48969,16 +51263,16 @@ return TCL_OK; for (j = 1; j <= i__1; ++j) { beta *= safmin; } - alpha->r = beta, alpha->i = 0.; + *alpha = beta; } return TCL_OK; -} /* zlarfp_ */ -static integer ilazlc_ (integer *m, integer *n, doublecomplex *a, integer *lda) +} /* dlarfp_ */ +static integer iladlc_ (integer *m, integer *n, doublereal *a, integer *lda) { - integer a_dim1, a_offset, ret_val, i__1, i__2; + integer a_dim1, a_offset, ret_val, i__1; integer i__; @@ -49002,29 +51296,23 @@ static integer ilazlc_ (integer *m, integer *n, doublecomplex *a, integer *lda) if (*n == 0) { ret_val = *n; - } else /* if(complicated condition) */ { - i__1 = *n * a_dim1 + 1; - i__2 = *m + *n * a_dim1; - if (a[i__1].r != 0. || a[i__1].i != 0. || (a[i__2].r != 0. || a[i__2] - .i != 0.)) { - ret_val = *n; - } else { - for (ret_val = *n; ret_val >= 1; --ret_val) { - i__1 = *m; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__ + ret_val * a_dim1; - if (a[i__2].r != 0. || a[i__2].i != 0.) { - return ret_val; - } + } else if (a[*n * a_dim1 + 1] != 0. || a[*m + *n * a_dim1] != 0.) { + ret_val = *n; + } else { + for (ret_val = *n; ret_val >= 1; --ret_val) { + i__1 = *m; + for (i__ = 1; i__ <= i__1; ++i__) { + if (a[i__ + ret_val * a_dim1] != 0.) { + return ret_val; } } } } return ret_val; -} /* ilazlc_ */ -static integer ilazlr_ (integer *m, integer *n, doublecomplex *a, integer *lda) +} /* iladlc_ */ +static integer iladlr_ (integer *m, integer *n, doublereal *a, integer *lda) { - integer a_dim1, a_offset, ret_val, i__1, i__2; + integer a_dim1, a_offset, ret_val, i__1; integer i__, j; @@ -49048,38 +51336,29 @@ static integer ilazlr_ (integer *m, integer *n, doublecomplex *a, integer *lda) if (*m == 0) { ret_val = *m; - } else /* if(complicated condition) */ { - i__1 = *m + a_dim1; - i__2 = *m + *n * a_dim1; - if (a[i__1].r != 0. || a[i__1].i != 0. || (a[i__2].r != 0. || a[i__2] - .i != 0.)) { - ret_val = *m; - } else { - ret_val = 0; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - for (i__ = *m; i__ >= 1; --i__) { - i__2 = i__ + j * a_dim1; - if (a[i__2].r != 0. || a[i__2].i != 0.) { - break; - } + } else if (a[*m + a_dim1] != 0. || a[*m + *n * a_dim1] != 0.) { + ret_val = *m; + } else { + ret_val = 0; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + for (i__ = *m; i__ >= 1; --i__) { + if (a[i__ + j * a_dim1] != 0.) { + break; } - ret_val = max(ret_val,i__); } + ret_val = max(ret_val,i__); } } return ret_val; -} /* ilazlr_ */ -static /* Subroutine */ int ztrmv_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, integer *n, doublecomplex *a, integer *lda, doublecomplex *x, integer *incx) +} /* iladlr_ */ +static /* Subroutine */ int dtrmv_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) { - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2, z__3; - - void d_cnjg(doublecomplex *, doublecomplex *); + integer a_dim1, a_offset, i__1, i__2; integer i__, j, ix, jx, kx, info; - doublecomplex temp; - logical noconj, nounit; + doublereal temp; + logical nounit; @@ -49133,7 +51412,7 @@ static /* Subroutine */ int ztrmv_ (Tcl_Interp *interp, char *uplo, char *trans, info = 8; } if (info != 0) { - vectcl_xerbla(interp, "ZTRMV ", &info); + vectcl_xerbla(interp, "DTRMV ", &info); return TCL_ERROR; return TCL_OK; @@ -49144,7 +51423,6 @@ return TCL_OK; return TCL_OK; } - noconj = lsame_(trans, "T"); nounit = lsame_(diag, "N"); @@ -49162,30 +51440,14 @@ return TCL_OK; if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { - i__2 = j; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - i__2 = j; - temp.r = x[i__2].r, temp.i = x[i__2].i; + if (x[j] != 0.) { + temp = x[j]; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__; - i__4 = i__; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[ - i__5].r; - z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i + - z__2.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; + x[i__] += temp * a[i__ + j * a_dim1]; } if (nounit) { - i__2 = j; - i__3 = j; - i__4 = j + j * a_dim1; - z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[ - i__4].i, z__1.i = x[i__3].r * a[i__4].i + - x[i__3].i * a[i__4].r; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; + x[j] *= a[j + j * a_dim1]; } } } @@ -49193,32 +51455,16 @@ return TCL_OK; jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { - i__2 = jx; - if (x[i__2].r != 0. || x[i__2].i != 0.) { - i__2 = jx; - temp.r = x[i__2].r, temp.i = x[i__2].i; + if (x[jx] != 0.) { + temp = x[jx]; ix = kx; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = ix; - i__4 = ix; - i__5 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, - z__2.i = temp.r * a[i__5].i + temp.i * a[ - i__5].r; - z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i + - z__2.i; - x[i__3].r = z__1.r, x[i__3].i = z__1.i; + x[ix] += temp * a[i__ + j * a_dim1]; ix += *incx; } if (nounit) { - i__2 = jx; - i__3 = jx; - i__4 = j + j * a_dim1; - z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[ - i__4].i, z__1.i = x[i__3].r * a[i__4].i + - x[i__3].i * a[i__4].r; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; + x[jx] *= a[j + j * a_dim1]; } } jx += *incx; @@ -49227,30 +51473,14 @@ return TCL_OK; } else { if (*incx == 1) { for (j = *n; j >= 1; --j) { - i__1 = j; - if (x[i__1].r != 0. || x[i__1].i != 0.) { - i__1 = j; - temp.r = x[i__1].r, temp.i = x[i__1].i; + if (x[j] != 0.) { + temp = x[j]; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { - i__2 = i__; - i__3 = i__; - i__4 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i, - z__2.i = temp.r * a[i__4].i + temp.i * a[ - i__4].r; - z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + - z__2.i; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; + x[i__] += temp * a[i__ + j * a_dim1]; } if (nounit) { - i__1 = j; - i__2 = j; - i__3 = j + j * a_dim1; - z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ - i__3].i, z__1.i = x[i__2].r * a[i__3].i + - x[i__2].i * a[i__3].r; - x[i__1].r = z__1.r, x[i__1].i = z__1.i; + x[j] *= a[j + j * a_dim1]; } } } @@ -49258,32 +51488,16 @@ return TCL_OK; kx += (*n - 1) * *incx; jx = kx; for (j = *n; j >= 1; --j) { - i__1 = jx; - if (x[i__1].r != 0. || x[i__1].i != 0.) { - i__1 = jx; - temp.r = x[i__1].r, temp.i = x[i__1].i; + if (x[jx] != 0.) { + temp = x[jx]; ix = kx; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { - i__2 = ix; - i__3 = ix; - i__4 = i__ + j * a_dim1; - z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i, - z__2.i = temp.r * a[i__4].i + temp.i * a[ - i__4].r; - z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + - z__2.i; - x[i__2].r = z__1.r, x[i__2].i = z__1.i; + x[ix] += temp * a[i__ + j * a_dim1]; ix -= *incx; } if (nounit) { - i__1 = jx; - i__2 = jx; - i__3 = j + j * a_dim1; - z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ - i__3].i, z__1.i = x[i__2].r * a[i__3].i + - x[i__2].i * a[i__3].r; - x[i__1].r = z__1.r, x[i__1].i = z__1.i; + x[jx] *= a[j + j * a_dim1]; } } jx -= *incx; @@ -49296,95 +51510,28 @@ return TCL_OK; if (lsame_(uplo, "U")) { if (*incx == 1) { for (j = *n; j >= 1; --j) { - i__1 = j; - temp.r = x[i__1].r, temp.i = x[i__1].i; - if (noconj) { - if (nounit) { - i__1 = j + j * a_dim1; - z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, - z__1.i = temp.r * a[i__1].i + temp.i * a[ - i__1].r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - i__1 = i__ + j * a_dim1; - i__2 = i__; - z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ - i__2].i, z__2.i = a[i__1].r * x[i__2].i + - a[i__1].i * x[i__2].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__1 = i__; - z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, - z__2.i = z__3.r * x[i__1].i + z__3.i * x[ - i__1].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } + temp = x[j]; + if (nounit) { + temp *= a[j + j * a_dim1]; } - i__1 = j; - x[i__1].r = temp.r, x[i__1].i = temp.i; + for (i__ = j - 1; i__ >= 1; --i__) { + temp += a[i__ + j * a_dim1] * x[i__]; + } + x[j] = temp; } } else { jx = kx + (*n - 1) * *incx; for (j = *n; j >= 1; --j) { - i__1 = jx; - temp.r = x[i__1].r, temp.i = x[i__1].i; + temp = x[jx]; ix = jx; - if (noconj) { - if (nounit) { - i__1 = j + j * a_dim1; - z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, - z__1.i = temp.r * a[i__1].i + temp.i * a[ - i__1].r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - i__1 = i__ + j * a_dim1; - i__2 = ix; - z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ - i__2].i, z__2.i = a[i__1].r * x[i__2].i + - a[i__1].i * x[i__2].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__1 = ix; - z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, - z__2.i = z__3.r * x[i__1].i + z__3.i * x[ - i__1].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } + if (nounit) { + temp *= a[j + j * a_dim1]; } - i__1 = jx; - x[i__1].r = temp.r, x[i__1].i = temp.i; + for (i__ = j - 1; i__ >= 1; --i__) { + ix -= *incx; + temp += a[i__ + j * a_dim1] * x[ix]; + } + x[jx] = temp; jx -= *incx; } } @@ -49392,156 +51539,47 @@ return TCL_OK; if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { - i__2 = j; - temp.r = x[i__2].r, temp.i = x[i__2].i; - if (noconj) { - if (nounit) { - i__2 = j + j * a_dim1; - z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, - z__1.i = temp.r * a[i__2].i + temp.i * a[ - i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ - i__4].i, z__2.i = a[i__3].r * x[i__4].i + - a[i__3].i * x[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = i__; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[ - i__3].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } + temp = x[j]; + if (nounit) { + temp *= a[j + j * a_dim1]; } - i__2 = j; - x[i__2].r = temp.r, x[i__2].i = temp.i; + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + temp += a[i__ + j * a_dim1] * x[i__]; + } + x[j] = temp; } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { - i__2 = jx; - temp.r = x[i__2].r, temp.i = x[i__2].i; + temp = x[jx]; ix = jx; - if (noconj) { - if (nounit) { - i__2 = j + j * a_dim1; - z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, - z__1.i = temp.r * a[i__2].i + temp.i * a[ - i__2].r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - i__3 = i__ + j * a_dim1; - i__4 = ix; - z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ - i__4].i, z__2.i = a[i__3].r * x[i__4].i + - a[i__3].i * x[i__4].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } - } else { - if (nounit) { - d_cnjg(&z__2, &a[j + j * a_dim1]); - z__1.r = temp.r * z__2.r - temp.i * z__2.i, - z__1.i = temp.r * z__2.i + temp.i * - z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - } - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - d_cnjg(&z__3, &a[i__ + j * a_dim1]); - i__3 = ix; - z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, - z__2.i = z__3.r * x[i__3].i + z__3.i * x[ - i__3].r; - z__1.r = temp.r + z__2.r, z__1.i = temp.i + - z__2.i; - temp.r = z__1.r, temp.i = z__1.i; - } + if (nounit) { + temp *= a[j + j * a_dim1]; } - i__2 = jx; - x[i__2].r = temp.r, x[i__2].i = temp.i; - jx += *incx; - } - } - } - } + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + ix += *incx; + temp += a[i__ + j * a_dim1] * x[ix]; + } + x[jx] = temp; + jx += *incx; + } + } + } + } return TCL_OK; -} /* ztrmv_ */ -static /* Subroutine */ int dlaruv_ (Tcl_Interp *interp, integer *iseed, integer *n, doublereal *x) +} /* dtrmv_ */ +static logical dlaisnan_ (doublereal *din1, doublereal *din2) { + logical ret_val; - static integer mm[512] /* was [128][4] */ = { 494,2637,255,2008,1253, - 3344,4084,1739,3143,3468,688,1657,1238,3166,1292,3422,1270,2016, - 154,2862,697,1706,491,931,1444,444,3577,3944,2184,1661,3482,657, - 3023,3618,1267,1828,164,3798,3087,2400,2870,3876,1905,1593,1797, - 1234,3460,328,2861,1950,617,2070,3331,769,1558,2412,2800,189,287, - 2045,1227,2838,209,2770,3654,3993,192,2253,3491,2889,2857,2094, - 1818,688,1407,634,3231,815,3524,1914,516,164,303,2144,3480,119, - 3357,837,2826,2332,2089,3780,1700,3712,150,2000,3375,1621,3090, - 3765,1149,3146,33,3082,2741,359,3316,1749,185,2784,2202,2199,1364, - 1244,2020,3160,2785,2772,1217,1822,1245,2252,3904,2774,997,2573, - 1148,545,322,789,1440,752,2859,123,1848,643,2405,2638,2344,46, - 3814,913,3649,339,3808,822,2832,3078,3633,2970,637,2249,2081,4019, - 1478,242,481,2075,4058,622,3376,812,234,641,4005,1122,3135,2640, - 2302,40,1832,2247,2034,2637,1287,1691,496,1597,2394,2584,1843,336, - 1472,2407,433,2096,1761,2810,566,442,41,1238,1086,603,840,3168, - 1499,1084,3438,2408,1589,2391,288,26,512,1456,171,1677,2657,2270, - 2587,2961,1970,1817,676,1410,3723,2803,3185,184,663,499,3784,1631, - 1925,3912,1398,1349,1441,2224,2411,1907,3192,2786,382,37,759,2948, - 1862,3802,2423,2051,2295,1332,1832,2405,3638,3661,327,3660,716, - 1842,3987,1368,1848,2366,2508,3754,1766,3572,2893,307,1297,3966, - 758,2598,3406,2922,1038,2934,2091,2451,1580,1958,2055,1507,1078, - 3273,17,854,2916,3971,2889,3831,2621,1541,893,736,3992,787,2125, - 2364,2460,257,1574,3912,1216,3248,3401,2124,2762,149,2245,166,466, - 4018,1399,190,2879,153,2320,18,712,2159,2318,2091,3443,1510,449, - 1956,2201,3137,3399,1321,2271,3667,2703,629,2365,2431,1113,3922, - 2554,184,2099,3228,4012,1921,3452,3901,572,3309,3171,817,3039, - 1696,1256,3715,2077,3019,1497,1101,717,51,981,1978,1813,3881,76, - 3846,3694,1682,124,1660,3997,479,1141,886,3514,1301,3604,1888, - 1836,1990,2058,692,1194,20,3285,2046,2107,3508,3525,3801,2549, - 1145,2253,305,3301,1065,3133,2913,3285,1241,1197,3729,2501,1673, - 541,2753,949,2361,1165,4081,2725,3305,3069,3617,3733,409,2157, - 1361,3973,1865,2525,1409,3445,3577,77,3761,2149,1449,3005,225,85, - 3673,3117,3089,1349,2057,413,65,1845,697,3085,3441,1573,3689,2941, - 929,533,2841,4077,721,2821,2249,2397,2817,245,1913,1997,3121,997, - 1833,2877,1633,981,2009,941,2449,197,2441,285,1473,2741,3129,909, - 2801,421,4073,2813,2337,1429,1177,1901,81,1669,2633,2269,129,1141, - 249,3917,2481,3941,2217,2749,3041,1877,345,2861,1809,3141,2825, - 157,2881,3637,1465,2829,2161,3365,361,2685,3745,2325,3609,3821, - 3537,517,3017,2141,1537 }; - integer i__1; - integer i__, i1, i2, i3, i4, it1, it2, it3, it4; @@ -49550,596 +51588,407 @@ static /* Subroutine */ int dlaruv_ (Tcl_Interp *interp, integer *iseed, integer + ret_val = *din1 != *din2; + return ret_val; +} /* dlaisnan_ */ +static /* Subroutine */ int zlarf_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublecomplex *v, integer *incv, doublecomplex *tau, doublecomplex *c__, integer * ldc, doublecomplex *work) +{ + integer c_dim1, c_offset, i__1; + doublecomplex z__1; + integer i__; + logical applyleft; + integer lastc; + integer lastv; - --iseed; - --x; - i1 = iseed[1]; - i2 = iseed[2]; - i3 = iseed[3]; - i4 = iseed[4]; - i__1 = min(*n,128); - for (i__ = 1; i__ <= i__1; ++i__) { -L20: - it4 = i4 * mm[i__ + 383]; - it3 = it4 / 4096; - it4 -= it3 << 12; - it3 = it3 + i3 * mm[i__ + 383] + i4 * mm[i__ + 255]; - it2 = it3 / 4096; - it3 -= it2 << 12; - it2 = it2 + i2 * mm[i__ + 383] + i3 * mm[i__ + 255] + i4 * mm[i__ + - 127]; - it1 = it2 / 4096; - it2 -= it1 << 12; - it1 = it1 + i1 * mm[i__ + 383] + i2 * mm[i__ + 255] + i3 * mm[i__ + - 127] + i4 * mm[i__ - 1]; - it1 %= 4096; - x[i__] = ((doublereal) it1 + ((doublereal) it2 + ((doublereal) it3 + ( - doublereal) it4 * 2.44140625e-4) * 2.44140625e-4) * - 2.44140625e-4) * 2.44140625e-4; - if (x[i__] == 1.) { - i1 += 2; - i2 += 2; - i3 += 2; - i4 += 2; - goto L20; - } - } - iseed[1] = it1; - iseed[2] = it2; - iseed[3] = it3; - iseed[4] = it4; -return TCL_OK; -} /* dlaruv_ */ -static /* Subroutine */ int dlasq2_ (Tcl_Interp *interp, integer *n, doublereal *z__, integer *info) -{ - integer i__1, i__2, i__3; - doublereal d__1, d__2; - double sqrt(doublereal); - doublereal d__, e, g; - integer k; - doublereal s, t; - integer i0, i4, n0; - doublereal dn; - integer pp; - doublereal dn1, dn2, dee, eps, tau, tol; - integer ipn4; - doublereal tol2; - logical ieee; - integer nbig; - doublereal dmin__, emin, emax; - integer kmin, ndiv, iter; - doublereal qmin, temp, qmax, zmax; - integer splt; - doublereal dmin1, dmin2; - integer nfail; - doublereal desig, trace, sigma; - integer iinfo, ttype; - doublereal deemin; - integer iwhila, iwhilb; - doublereal oldemn, safmin; + --v; + c_dim1 = *ldc; + c_offset = 1 + c_dim1; + c__ -= c_offset; + --work; + applyleft = lsame_(side, "L"); + lastv = 0; + lastc = 0; + if (tau->r != 0. || tau->i != 0.) { + if (applyleft) { + lastv = *m; + } else { + lastv = *n; + } + if (*incv > 0) { + i__ = (lastv - 1) * *incv + 1; + } else { + i__ = 1; + } + for(;;) { /* while(complicated condition) */ + i__1 = i__; + if (!(lastv > 0 && (v[i__1].r == 0. && v[i__1].i == 0.))) + break; + --lastv; + i__ -= *incv; + } + if (applyleft) { + lastc = ilazlc_(&lastv, n, &c__[c_offset], ldc); + } else { + lastc = ilazlr_(m, &lastv, &c__[c_offset], ldc); + } + } + if (applyleft) { + if (lastv > 0) { + if (zgemv_(interp, "Conjugate transpose", &lastv, &lastc, &zlarf_c_b1, &c__[ c_offset], ldc, &v[1], incv, &zlarf_c_b2, &work[1], &zlarf_c__1)!=TCL_OK) { return TCL_ERROR; } + z__1.r = -tau->r, z__1.i = -tau->i; + if (zgerc_(interp, &lastv, &lastc, &z__1, &v[1], incv, &work[1], &zlarf_c__1, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } + } + } else { + if (lastv > 0) { + if (zgemv_(interp, "No transpose", &lastc, &lastv, &zlarf_c_b1, &c__[c_offset], ldc, &v[1], incv, &zlarf_c_b2, &work[1], &zlarf_c__1)!=TCL_OK) { return TCL_ERROR; } - --z__; - *info = 0; - eps = dlamch_("Precision"); - safmin = dlamch_("Safe minimum"); - tol = eps * 100.; - d__1 = tol; - tol2 = d__1 * d__1; + z__1.r = -tau->r, z__1.i = -tau->i; + if (zgerc_(interp, &lastc, &lastv, &z__1, &work[1], &zlarf_c__1, &v[1], incv, &c__[ c_offset], ldc)!=TCL_OK) { return TCL_ERROR; } - if (*n < 0) { - *info = -1; - vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__1); -return TCL_ERROR; + } + } return TCL_OK; - } else if (*n == 0) { -return TCL_OK; - } else if (*n == 1) { - if (z__[1] < 0.) { - *info = -201; - vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); -return TCL_ERROR; +} /* zlarf_ */ +static /* Subroutine */ int zlarfg_ (Tcl_Interp *interp, integer *n, doublecomplex *alpha, doublecomplex * x, integer *incx, doublecomplex *tau) +{ + integer i__1; + doublereal d__1, d__2; + doublecomplex z__1, z__2; - } -return TCL_OK; - } else if (*n == 2) { + double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *); + integer j, knt; + doublereal beta, alphi, alphr; + doublereal xnorm; + doublereal safmin; + doublereal rsafmn; - if (z__[2] < 0. || z__[3] < 0.) { - *info = -2; - vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); -return TCL_ERROR; -return TCL_OK; - } else if (z__[3] > z__[1]) { - d__ = z__[3]; - z__[3] = z__[1]; - z__[1] = d__; - } - z__[5] = z__[1] + z__[2] + z__[3]; - if (z__[2] > z__[3] * tol2) { - t = (z__[1] - z__[3] + z__[2]) * .5; - s = z__[3] * (z__[2] / t); - if (s <= t) { - s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.))); - } else { - s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s))); - } - t = z__[1] + (s + z__[2]); - z__[3] *= z__[1] / t; - z__[1] = t; - } - z__[2] = z__[3]; - z__[6] = z__[2] + z__[1]; -return TCL_OK; - } - z__[*n * 2] = 0.; - emin = z__[2]; - qmax = 0.; - zmax = 0.; - d__ = 0.; - e = 0.; - i__1 = *n - 1 << 1; - for (k = 1; k <= i__1; k += 2) { - if (z__[k] < 0.) { - *info = -(k + 200); - vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); -return TCL_ERROR; -return TCL_OK; - } else if (z__[k + 1] < 0.) { - *info = -(k + 201); - vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); -return TCL_ERROR; -return TCL_OK; - } - d__ += z__[k]; - e += z__[k + 1]; - d__1 = qmax, d__2 = z__[k]; - qmax = max(d__1,d__2); - d__1 = emin, d__2 = z__[k + 1]; - emin = min(d__1,d__2); - d__1 = max(qmax,zmax), d__2 = z__[k + 1]; - zmax = max(d__1,d__2); - } - if (z__[(*n << 1) - 1] < 0.) { - *info = -((*n << 1) + 199); - vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); -return TCL_ERROR; -return TCL_OK; - } - d__ += z__[(*n << 1) - 1]; - d__1 = qmax, d__2 = z__[(*n << 1) - 1]; - qmax = max(d__1,d__2); - zmax = max(qmax,zmax); - if (e == 0.) { - i__1 = *n; - for (k = 2; k <= i__1; ++k) { - z__[k] = z__[(k << 1) - 1]; - } - if (dlasrt_(interp, "D", n, &z__[1], &iinfo)!=TCL_OK) { return TCL_ERROR; } - z__[(*n << 1) - 1] = d__; -return TCL_OK; - } - trace = d__ + e; - if (trace == 0.) { - z__[(*n << 1) - 1] = 0.; -return TCL_OK; - } - ieee = ilaenv_(&dlasq2_c__10, "DLASQ2", "N", &dlasq2_c__1, &dlasq2_c__2, &dlasq2_c__3, &dlasq2_c__4) == 1 && ilaenv_(&dlasq2_c__11, "DLASQ2", "N", &dlasq2_c__1, &dlasq2_c__2, - &dlasq2_c__3, &dlasq2_c__4) == 1; - for (k = *n << 1; k >= 2; k += -2) { - z__[k * 2] = 0.; - z__[(k << 1) - 1] = z__[k]; - z__[(k << 1) - 2] = 0.; - z__[(k << 1) - 3] = z__[k - 1]; - } - i0 = 1; - n0 = *n; + --x; - if (z__[(i0 << 2) - 3] * 1.5 < z__[(n0 << 2) - 3]) { - ipn4 = i0 + n0 << 2; - i__1 = i0 + n0 - 1 << 1; - for (i4 = i0 << 2; i4 <= i__1; i4 += 4) { - temp = z__[i4 - 3]; - z__[i4 - 3] = z__[ipn4 - i4 - 3]; - z__[ipn4 - i4 - 3] = temp; - temp = z__[i4 - 1]; - z__[i4 - 1] = z__[ipn4 - i4 - 5]; - z__[ipn4 - i4 - 5] = temp; - } + if (*n <= 0) { + tau->r = 0., tau->i = 0.; +return TCL_OK; } + i__1 = *n - 1; + xnorm = dznrm2_(&i__1, &x[1], incx); + alphr = alpha->r; + alphi = d_imag(alpha); - pp = 0; - - for (k = 1; k <= 2; ++k) { - - d__ = z__[(n0 << 2) + pp - 3]; - i__1 = (i0 << 2) + pp; - for (i4 = (n0 - 1 << 2) + pp; i4 >= i__1; i4 += -4) { - if (z__[i4 - 1] <= tol2 * d__) { - z__[i4 - 1] = -0.; - d__ = z__[i4 - 3]; - } else { - d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1])); - } - } + if (xnorm == 0. && alphi == 0.) { - emin = z__[(i0 << 2) + pp + 1]; - d__ = z__[(i0 << 2) + pp - 3]; - i__1 = (n0 - 1 << 2) + pp; - for (i4 = (i0 << 2) + pp; i4 <= i__1; i4 += 4) { - z__[i4 - (pp << 1) - 2] = d__ + z__[i4 - 1]; - if (z__[i4 - 1] <= tol2 * d__) { - z__[i4 - 1] = -0.; - z__[i4 - (pp << 1) - 2] = d__; - z__[i4 - (pp << 1)] = 0.; - d__ = z__[i4 + 1]; - } else if (safmin * z__[i4 + 1] < z__[i4 - (pp << 1) - 2] && - safmin * z__[i4 - (pp << 1) - 2] < z__[i4 + 1]) { - temp = z__[i4 + 1] / z__[i4 - (pp << 1) - 2]; - z__[i4 - (pp << 1)] = z__[i4 - 1] * temp; - d__ *= temp; - } else { - z__[i4 - (pp << 1)] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - ( - pp << 1) - 2]); - d__ = z__[i4 + 1] * (d__ / z__[i4 - (pp << 1) - 2]); - } - d__1 = emin, d__2 = z__[i4 - (pp << 1)]; - emin = min(d__1,d__2); - } - z__[(n0 << 2) - pp - 2] = d__; + tau->r = 0., tau->i = 0.; + } else { - qmax = z__[(i0 << 2) - pp - 2]; - i__1 = (n0 << 2) - pp - 2; - for (i4 = (i0 << 2) - pp + 2; i4 <= i__1; i4 += 4) { - d__1 = qmax, d__2 = z__[i4]; - qmax = max(d__1,d__2); - } + d__1 = dlapy3_(&alphr, &alphi, &xnorm); + beta = -d_sign(&d__1, &alphr); + safmin = dlamch_("S") / dlamch_("E"); + rsafmn = 1. / safmin; + knt = 0; + if (abs(beta) < safmin) { - pp = 1 - pp; - } +L10: + ++knt; + i__1 = *n - 1; + if (zdscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } - ttype = 0; - dmin1 = 0.; - dmin2 = 0.; - dn = 0.; - dn1 = 0.; - dn2 = 0.; - g = 0.; - tau = 0.; + beta *= rsafmn; + alphi *= rsafmn; + alphr *= rsafmn; + if (abs(beta) < safmin) { + goto L10; + } - iter = 2; - nfail = 0; - ndiv = n0 - i0 << 1; - i__1 = *n + 1; - for (iwhila = 1; iwhila <= i__1; ++iwhila) { - if (n0 < 1) { - goto L170; + i__1 = *n - 1; + xnorm = dznrm2_(&i__1, &x[1], incx); + z__1.r = alphr, z__1.i = alphi; + alpha->r = z__1.r, alpha->i = z__1.i; + d__1 = dlapy3_(&alphr, &alphi, &xnorm); + beta = -d_sign(&d__1, &alphr); } + d__1 = (beta - alphr) / beta; + d__2 = -alphi / beta; + z__1.r = d__1, z__1.i = d__2; + tau->r = z__1.r, tau->i = z__1.i; + z__2.r = alpha->r - beta, z__2.i = alpha->i; + zladiv_(&z__1, &zlarfg_c_b5, &z__2); + alpha->r = z__1.r, alpha->i = z__1.i; + i__1 = *n - 1; + if (zscal_(interp, &i__1, alpha, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } - desig = 0.; - if (n0 == *n) { - sigma = 0.; - } else { - sigma = -z__[(n0 << 2) - 1]; + i__1 = knt; + for (j = 1; j <= i__1; ++j) { + beta *= safmin; } - if (sigma < 0.) { - *info = 1; + alpha->r = beta, alpha->i = 0.; + } + return TCL_OK; - } - emax = 0.; - if (n0 > i0) { - emin = (d__1 = z__[(n0 << 2) - 5], abs(d__1)); - } else { - emin = 0.; - } - qmin = z__[(n0 << 2) - 3]; - qmax = qmin; - for (i4 = n0 << 2; i4 >= 8; i4 += -4) { - if (z__[i4 - 5] <= 0.) { - goto L100; - } - if (qmin >= emax * 4.) { - d__1 = qmin, d__2 = z__[i4 - 3]; - qmin = min(d__1,d__2); - d__1 = emax, d__2 = z__[i4 - 5]; - emax = max(d__1,d__2); - } - d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5]; - qmax = max(d__1,d__2); - d__1 = emin, d__2 = z__[i4 - 5]; - emin = min(d__1,d__2); - } - i4 = 4; +} /* zlarfg_ */ +static /* Subroutine */ int zlacgv_ (Tcl_Interp *interp, integer *n, doublecomplex *x, integer *incx) +{ + integer i__1, i__2; + doublecomplex z__1; -L100: - i0 = i4 / 4; - pp = 0; + void d_cnjg(doublecomplex *, doublecomplex *); - if (n0 - i0 > 1) { - dee = z__[(i0 << 2) - 3]; - deemin = dee; - kmin = i0; - i__2 = (n0 << 2) - 3; - for (i4 = (i0 << 2) + 1; i4 <= i__2; i4 += 4) { - dee = z__[i4] * (dee / (dee + z__[i4 - 2])); - if (dee <= deemin) { - deemin = dee; - kmin = (i4 + 3) / 4; - } - } - if (kmin - i0 << 1 < n0 - kmin && deemin <= z__[(n0 << 2) - 3] * - .5) { - ipn4 = i0 + n0 << 2; - pp = 2; - i__2 = i0 + n0 - 1 << 1; - for (i4 = i0 << 2; i4 <= i__2; i4 += 4) { - temp = z__[i4 - 3]; - z__[i4 - 3] = z__[ipn4 - i4 - 3]; - z__[ipn4 - i4 - 3] = temp; - temp = z__[i4 - 2]; - z__[i4 - 2] = z__[ipn4 - i4 - 2]; - z__[ipn4 - i4 - 2] = temp; - temp = z__[i4 - 1]; - z__[i4 - 1] = z__[ipn4 - i4 - 5]; - z__[ipn4 - i4 - 5] = temp; - temp = z__[i4]; - z__[i4] = z__[ipn4 - i4 - 4]; - z__[ipn4 - i4 - 4] = temp; - } - } - } + integer i__, ioff; - d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax); - dmin__ = -max(d__1,d__2); - nbig = (n0 - i0 + 1) * 30; - i__2 = nbig; - for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) { - if (i0 > n0) { - goto L150; - } - if (dlasq3_(interp, &i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, & nfail, &iter, &ndiv, &ieee, &ttype, &dmin1, &dmin2, &dn, & - dn1, &dn2, &g, &tau)!=TCL_OK) { return TCL_ERROR; } - pp = 1 - pp; - if (pp == 0 && n0 - i0 >= 3) { - if (z__[n0 * 4] <= tol2 * qmax || z__[(n0 << 2) - 1] <= tol2 * - sigma) { - splt = i0 - 1; - qmax = z__[(i0 << 2) - 3]; - emin = z__[(i0 << 2) - 1]; - oldemn = z__[i0 * 4]; - i__3 = n0 - 3 << 2; - for (i4 = i0 << 2; i4 <= i__3; i4 += 4) { - if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <= - tol2 * sigma) { - z__[i4 - 1] = -sigma; - splt = i4 / 4; - qmax = 0.; - emin = z__[i4 + 3]; - oldemn = z__[i4 + 4]; - } else { - d__1 = qmax, d__2 = z__[i4 + 1]; - qmax = max(d__1,d__2); - d__1 = emin, d__2 = z__[i4 - 1]; - emin = min(d__1,d__2); - d__1 = oldemn, d__2 = z__[i4]; - oldemn = min(d__1,d__2); - } - } - z__[(n0 << 2) - 1] = emin; - z__[n0 * 4] = oldemn; - i0 = splt + 1; - } - } - } + --x; - *info = 2; + if (*incx == 1) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + d_cnjg(&z__1, &x[i__]); + x[i__2].r = z__1.r, x[i__2].i = z__1.i; + } + } else { + ioff = 1; + if (*incx < 0) { + ioff = 1 - (*n - 1) * *incx; + } + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = ioff; + d_cnjg(&z__1, &x[ioff]); + x[i__2].r = z__1.r, x[i__2].i = z__1.i; + ioff += *incx; + } + } return TCL_OK; -L150: +} /* zlacgv_ */ +static /* Subroutine */ int zlarfp_ (Tcl_Interp *interp, integer *n, doublecomplex *alpha, doublecomplex * x, integer *incx, doublecomplex *tau) +{ + integer i__1, i__2; + doublereal d__1, d__2; + doublecomplex z__1, z__2; - ; - } + double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *); - *info = 3; -return TCL_OK; + integer j, knt; + doublereal beta, alphi, alphr; + doublereal xnorm; + doublereal safmin; + doublereal rsafmn; -L170: - i__1 = *n; - for (k = 2; k <= i__1; ++k) { - z__[k] = z__[(k << 2) - 3]; - } - if (dlasrt_(interp, "D", n, &z__[1], &iinfo)!=TCL_OK) { return TCL_ERROR; } - e = 0.; - for (k = *n; k >= 1; --k) { - e += z__[k]; - } - z__[(*n << 1) + 1] = trace; - z__[(*n << 1) + 2] = e; - z__[(*n << 1) + 3] = (doublereal) iter; - i__1 = *n; - z__[(*n << 1) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1); - z__[(*n << 1) + 5] = nfail * 100. / (doublereal) iter; -return TCL_OK; -} /* dlasq2_ */ -static /* Subroutine */ int dlarra_ (Tcl_Interp *interp, integer *n, doublereal *d__, doublereal *e, doublereal *e2, doublereal *spltol, doublereal *tnrm, integer *nsplit, integer *isplit, integer *info) -{ - integer i__1; - doublereal d__1, d__2; - double sqrt(doublereal); - integer i__; - doublereal tmp1, eabs; + --x; + if (*n <= 0) { + tau->r = 0., tau->i = 0.; +return TCL_OK; + } + i__1 = *n - 1; + xnorm = dznrm2_(&i__1, &x[1], incx); + alphr = alpha->r; + alphi = d_imag(alpha); + if (xnorm == 0. && alphi == 0.) { + if (alphi == 0.) { + if (alphr >= 0.) { + tau->r = 0., tau->i = 0.; + } else { + tau->r = 2., tau->i = 0.; + i__1 = *n - 1; + for (j = 1; j <= i__1; ++j) { + i__2 = (j - 1) * *incx + 1; + x[i__2].r = 0., x[i__2].i = 0.; + } + z__1.r = -alpha->r, z__1.i = -alpha->i; + alpha->r = z__1.r, alpha->i = z__1.i; + } + } else { + xnorm = dlapy2_(&alphr, &alphi); + d__1 = 1. - alphr / xnorm; + d__2 = -alphi / xnorm; + z__1.r = d__1, z__1.i = d__2; + tau->r = z__1.r, tau->i = z__1.i; + i__1 = *n - 1; + for (j = 1; j <= i__1; ++j) { + i__2 = (j - 1) * *incx + 1; + x[i__2].r = 0., x[i__2].i = 0.; + } + alpha->r = xnorm, alpha->i = 0.; + } + } else { + d__1 = dlapy3_(&alphr, &alphi, &xnorm); + beta = d_sign(&d__1, &alphr); + safmin = dlamch_("S") / dlamch_("E"); + rsafmn = 1. / safmin; + knt = 0; + if (abs(beta) < safmin) { +L10: + ++knt; + i__1 = *n - 1; + if (zdscal_(interp, &i__1, &rsafmn, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } + beta *= rsafmn; + alphi *= rsafmn; + alphr *= rsafmn; + if (abs(beta) < safmin) { + goto L10; + } + i__1 = *n - 1; + xnorm = dznrm2_(&i__1, &x[1], incx); + z__1.r = alphr, z__1.i = alphi; + alpha->r = z__1.r, alpha->i = z__1.i; + d__1 = dlapy3_(&alphr, &alphi, &xnorm); + beta = d_sign(&d__1, &alphr); + } + z__1.r = alpha->r + beta, z__1.i = alpha->i; + alpha->r = z__1.r, alpha->i = z__1.i; + if (beta < 0.) { + beta = -beta; + z__2.r = -alpha->r, z__2.i = -alpha->i; + z__1.r = z__2.r / beta, z__1.i = z__2.i / beta; + tau->r = z__1.r, tau->i = z__1.i; + } else { + alphr = alphi * (alphi / alpha->r); + alphr += xnorm * (xnorm / alpha->r); + d__1 = alphr / beta; + d__2 = -alphi / beta; + z__1.r = d__1, z__1.i = d__2; + tau->r = z__1.r, tau->i = z__1.i; + d__1 = -alphr; + z__1.r = d__1, z__1.i = alphi; + alpha->r = z__1.r, alpha->i = z__1.i; + } + zladiv_(&z__1, &zlarfp_c_b5, alpha); + alpha->r = z__1.r, alpha->i = z__1.i; + i__1 = *n - 1; + if (zscal_(interp, &i__1, alpha, &x[1], incx)!=TCL_OK) { return TCL_ERROR; } - --isplit; - --e2; - --e; - --d__; - *info = 0; - *nsplit = 1; - if (*spltol < 0.) { - tmp1 = abs(*spltol) * *tnrm; - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - eabs = (d__1 = e[i__], abs(d__1)); - if (eabs <= tmp1) { - e[i__] = 0.; - e2[i__] = 0.; - isplit[*nsplit] = i__; - ++(*nsplit); - } - } - } else { - i__1 = *n - 1; - for (i__ = 1; i__ <= i__1; ++i__) { - eabs = (d__1 = e[i__], abs(d__1)); - if (eabs <= *spltol * sqrt((d__1 = d__[i__], abs(d__1))) * sqrt(( - d__2 = d__[i__ + 1], abs(d__2)))) { - e[i__] = 0.; - e2[i__] = 0.; - isplit[*nsplit] = i__; - ++(*nsplit); - } + i__1 = knt; + for (j = 1; j <= i__1; ++j) { + beta *= safmin; } + alpha->r = beta, alpha->i = 0.; } - isplit[*nsplit] = *n; + return TCL_OK; -} /* dlarra_ */ -static /* Subroutine */ int dlarrb_ (Tcl_Interp *interp, integer *n, doublereal *d__, doublereal *lld, integer *ifirst, integer *ilast, doublereal *rtol1, doublereal *rtol2, integer *offset, doublereal *w, doublereal *wgap, doublereal *werr, doublereal *work, integer *iwork, doublereal *pivmin, doublereal * spdiam, integer *twist, integer *info) +} /* zlarfp_ */ +static integer ilazlc_ (integer *m, integer *n, doublecomplex *a, integer *lda) { - integer i__1; - doublereal d__1, d__2; - - double log(doublereal); - - integer i__, k, r__, i1, ii, ip; - doublereal gap, mid, tmp, back, lgap, rgap, left; - integer iter, nint, prev, next; - doublereal cvrgd, right, width; - integer negcnt; - doublereal mnwdth; - integer olnint, maxitr; - - - + integer a_dim1, a_offset, ret_val, i__1, i__2; + integer i__; @@ -50155,8 +52004,37 @@ static /* Subroutine */ int dlarrb_ (Tcl_Interp *interp, integer *n, doublereal + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + if (*n == 0) { + ret_val = *n; + } else /* if(complicated condition) */ { + i__1 = *n * a_dim1 + 1; + i__2 = *m + *n * a_dim1; + if (a[i__1].r != 0. || a[i__1].i != 0. || (a[i__2].r != 0. || a[i__2] + .i != 0.)) { + ret_val = *n; + } else { + for (ret_val = *n; ret_val >= 1; --ret_val) { + i__1 = *m; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__ + ret_val * a_dim1; + if (a[i__2].r != 0. || a[i__2].i != 0.) { + return ret_val; + } + } + } + } + } + return ret_val; +} /* ilazlc_ */ +static integer ilazlr_ (integer *m, integer *n, doublecomplex *a, integer *lda) +{ + integer a_dim1, a_offset, ret_val, i__1, i__2; + integer i__, j; @@ -50164,187 +52042,52 @@ static /* Subroutine */ int dlarrb_ (Tcl_Interp *interp, integer *n, doublereal - --iwork; - --work; - --werr; - --wgap; - --w; - --lld; - --d__; - *info = 0; - maxitr = (integer) ((log(*spdiam + *pivmin) - log(*pivmin)) / log(2.)) + - 2; - mnwdth = *pivmin * 2.; - r__ = *twist; - if (r__ < 1 || r__ > *n) { - r__ = *n; - } - i1 = *ifirst; - nint = 0; - prev = 0; - rgap = wgap[i1 - *offset]; - i__1 = *ilast; - for (i__ = i1; i__ <= i__1; ++i__) { - k = i__ << 1; - ii = i__ - *offset; - left = w[ii] - werr[ii]; - right = w[ii] + werr[ii]; - lgap = rgap; - rgap = wgap[ii]; - gap = min(lgap,rgap); - back = werr[ii]; -L20: - negcnt = dlaneg_(n, &d__[1], &lld[1], &left, pivmin, &r__); - if (negcnt > i__ - 1) { - left -= back; - back *= 2.; - goto L20; - } + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; - back = werr[ii]; -L50: - negcnt = dlaneg_(n, &d__[1], &lld[1], &right, pivmin, &r__); - if (negcnt < i__) { - right += back; - back *= 2.; - goto L50; - } - width = (d__1 = left - right, abs(d__1)) * .5; - d__1 = abs(left), d__2 = abs(right); - tmp = max(d__1,d__2); - d__1 = *rtol1 * gap, d__2 = *rtol2 * tmp; - cvrgd = max(d__1,d__2); - if (width <= cvrgd || width <= mnwdth) { - iwork[k - 1] = -1; - if (i__ == i1 && i__ < *ilast) { - i1 = i__ + 1; - } - if (prev >= i1 && i__ <= *ilast) { - iwork[(prev << 1) - 1] = i__ + 1; - } + if (*m == 0) { + ret_val = *m; + } else /* if(complicated condition) */ { + i__1 = *m + a_dim1; + i__2 = *m + *n * a_dim1; + if (a[i__1].r != 0. || a[i__1].i != 0. || (a[i__2].r != 0. || a[i__2] + .i != 0.)) { + ret_val = *m; } else { - prev = i__; - ++nint; - iwork[k - 1] = i__ + 1; - iwork[k] = negcnt; - } - work[k - 1] = left; - work[k] = right; - } - - - iter = 0; -L80: - prev = i1 - 1; - i__ = i1; - olnint = nint; - i__1 = olnint; - for (ip = 1; ip <= i__1; ++ip) { - k = i__ << 1; - ii = i__ - *offset; - rgap = wgap[ii]; - lgap = rgap; - if (ii > 1) { - lgap = wgap[ii - 1]; - } - gap = min(lgap,rgap); - next = iwork[k - 1]; - left = work[k - 1]; - right = work[k]; - mid = (left + right) * .5; - width = right - mid; - d__1 = abs(left), d__2 = abs(right); - tmp = max(d__1,d__2); - d__1 = *rtol1 * gap, d__2 = *rtol2 * tmp; - cvrgd = max(d__1,d__2); - if (width <= cvrgd || width <= mnwdth || iter == maxitr) { - --nint; - iwork[k - 1] = 0; - if (i1 == i__) { - i1 = next; - } else { - if (prev >= i1) { - iwork[(prev << 1) - 1] = next; + ret_val = 0; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + for (i__ = *m; i__ >= 1; --i__) { + i__2 = i__ + j * a_dim1; + if (a[i__2].r != 0. || a[i__2].i != 0.) { + break; + } } + ret_val = max(ret_val,i__); } - i__ = next; - goto L100; - } - prev = i__; - - - negcnt = dlaneg_(n, &d__[1], &lld[1], &mid, pivmin, &r__); - if (negcnt <= i__ - 1) { - work[k - 1] = mid; - } else { - work[k] = mid; - } - i__ = next; -L100: - ; - } - ++iter; - if (nint > 0 && iter <= maxitr) { - goto L80; - } - - - i__1 = *ilast; - for (i__ = *ifirst; i__ <= i__1; ++i__) { - k = i__ << 1; - ii = i__ - *offset; - if (iwork[k - 1] == 0) { - w[ii] = (work[k - 1] + work[k]) * .5; - werr[ii] = work[k] - w[ii]; } } - - i__1 = *ilast; - for (i__ = *ifirst + 1; i__ <= i__1; ++i__) { - k = i__ << 1; - ii = i__ - *offset; - d__1 = 0., d__2 = w[ii] - werr[ii] - w[ii - 1] - werr[ii - 1]; - wgap[ii - 1] = max(d__1,d__2); - } -return TCL_OK; - - -} /* dlarrb_ */ -static /* Subroutine */ int dlarrd_ (Tcl_Interp *interp, char *range, char *order, integer *n, doublereal *vl, doublereal *vu, integer *il, integer *iu, doublereal *gers, doublereal *reltol, doublereal *d__, doublereal *e, doublereal *e2, doublereal *pivmin, integer *nsplit, integer *isplit, integer *m, doublereal *w, doublereal *werr, doublereal *wl, doublereal *wu, integer *iblock, integer *indexw, doublereal *work, integer *iwork, integer *info) + return ret_val; +} /* ilazlr_ */ +static /* Subroutine */ int ztrmv_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, integer *n, doublecomplex *a, integer *lda, doublecomplex *x, integer *incx) { - integer i__1, i__2, i__3; - doublereal d__1, d__2; - - double log(doublereal); + integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; + doublecomplex z__1, z__2, z__3; - integer i__, j, ib, ie, je, nb; - doublereal gl; - integer im, in; - doublereal gu; - integer iw, jee; - doublereal eps; - integer nwl; - doublereal wlu, wul; - integer nwu; - doublereal tmp1, tmp2; - integer iend, jblk, ioff, iout, itmp1, itmp2, jdisc; - integer iinfo; - doublereal atoli; - integer iwoff, itmax; - doublereal wkill, rtoli, uflow, tnorm; - integer ibegin; - integer irange, idiscl, idumma[1]; - integer idiscu; - logical ncnvrg, toofew; + void d_cnjg(doublecomplex *, doublecomplex *); + integer i__, j, ix, jx, kx, info; + doublecomplex temp; + logical noconj, nounit; @@ -50376,44 +52119,1309 @@ static /* Subroutine */ int dlarrd_ (Tcl_Interp *interp, char *range, char *orde + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + info = 0; + if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { + info = 1; + } else if (! lsame_(trans, "N") && ! lsame_(trans, + "T") && ! lsame_(trans, "C")) { + info = 2; + } else if (! lsame_(diag, "U") && ! lsame_(diag, + "N")) { + info = 3; + } else if (*n < 0) { + info = 4; + } else if (*lda < max(1,*n)) { + info = 6; + } else if (*incx == 0) { + info = 8; + } + if (info != 0) { + vectcl_xerbla(interp, "ZTRMV ", &info); +return TCL_ERROR; +return TCL_OK; + } - --iwork; - --work; - --indexw; - --iblock; - --werr; - --w; - --isplit; - --e2; - --e; - --d__; - --gers; + if (*n == 0) { +return TCL_OK; + } - *info = 0; + noconj = lsame_(trans, "T"); + nounit = lsame_(diag, "N"); - if (lsame_(range, "A")) { - irange = 1; - } else if (lsame_(range, "V")) { - irange = 2; - } else if (lsame_(range, "I")) { - irange = 3; - } else { - irange = 0; + if (*incx <= 0) { + kx = 1 - (*n - 1) * *incx; + } else if (*incx != 1) { + kx = 1; } - if (irange <= 0) { - *info = -1; - } else if (! (lsame_(order, "B") || lsame_(order, - "E"))) { - *info = -2; - } else if (*n < 0) { - *info = -3; - } else if (irange == 2) { + if (lsame_(trans, "N")) { + + + if (lsame_(uplo, "U")) { + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + if (x[i__2].r != 0. || x[i__2].i != 0.) { + i__2 = j; + temp.r = x[i__2].r, temp.i = x[i__2].i; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + i__3 = i__; + i__4 = i__; + i__5 = i__ + j * a_dim1; + z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, + z__2.i = temp.r * a[i__5].i + temp.i * a[ + i__5].r; + z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i + + z__2.i; + x[i__3].r = z__1.r, x[i__3].i = z__1.i; + } + if (nounit) { + i__2 = j; + i__3 = j; + i__4 = j + j * a_dim1; + z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[ + i__4].i, z__1.i = x[i__3].r * a[i__4].i + + x[i__3].i * a[i__4].r; + x[i__2].r = z__1.r, x[i__2].i = z__1.i; + } + } + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jx; + if (x[i__2].r != 0. || x[i__2].i != 0.) { + i__2 = jx; + temp.r = x[i__2].r, temp.i = x[i__2].i; + ix = kx; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + i__3 = ix; + i__4 = ix; + i__5 = i__ + j * a_dim1; + z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, + z__2.i = temp.r * a[i__5].i + temp.i * a[ + i__5].r; + z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i + + z__2.i; + x[i__3].r = z__1.r, x[i__3].i = z__1.i; + ix += *incx; + } + if (nounit) { + i__2 = jx; + i__3 = jx; + i__4 = j + j * a_dim1; + z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[ + i__4].i, z__1.i = x[i__3].r * a[i__4].i + + x[i__3].i * a[i__4].r; + x[i__2].r = z__1.r, x[i__2].i = z__1.i; + } + } + jx += *incx; + } + } + } else { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + i__1 = j; + if (x[i__1].r != 0. || x[i__1].i != 0.) { + i__1 = j; + temp.r = x[i__1].r, temp.i = x[i__1].i; + i__1 = j + 1; + for (i__ = *n; i__ >= i__1; --i__) { + i__2 = i__; + i__3 = i__; + i__4 = i__ + j * a_dim1; + z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i, + z__2.i = temp.r * a[i__4].i + temp.i * a[ + i__4].r; + z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + + z__2.i; + x[i__2].r = z__1.r, x[i__2].i = z__1.i; + } + if (nounit) { + i__1 = j; + i__2 = j; + i__3 = j + j * a_dim1; + z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ + i__3].i, z__1.i = x[i__2].r * a[i__3].i + + x[i__2].i * a[i__3].r; + x[i__1].r = z__1.r, x[i__1].i = z__1.i; + } + } + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + i__1 = jx; + if (x[i__1].r != 0. || x[i__1].i != 0.) { + i__1 = jx; + temp.r = x[i__1].r, temp.i = x[i__1].i; + ix = kx; + i__1 = j + 1; + for (i__ = *n; i__ >= i__1; --i__) { + i__2 = ix; + i__3 = ix; + i__4 = i__ + j * a_dim1; + z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i, + z__2.i = temp.r * a[i__4].i + temp.i * a[ + i__4].r; + z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + + z__2.i; + x[i__2].r = z__1.r, x[i__2].i = z__1.i; + ix -= *incx; + } + if (nounit) { + i__1 = jx; + i__2 = jx; + i__3 = j + j * a_dim1; + z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ + i__3].i, z__1.i = x[i__2].r * a[i__3].i + + x[i__2].i * a[i__3].r; + x[i__1].r = z__1.r, x[i__1].i = z__1.i; + } + } + jx -= *incx; + } + } + } + } else { + + + if (lsame_(uplo, "U")) { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + i__1 = j; + temp.r = x[i__1].r, temp.i = x[i__1].i; + if (noconj) { + if (nounit) { + i__1 = j + j * a_dim1; + z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, + z__1.i = temp.r * a[i__1].i + temp.i * a[ + i__1].r; + temp.r = z__1.r, temp.i = z__1.i; + } + for (i__ = j - 1; i__ >= 1; --i__) { + i__1 = i__ + j * a_dim1; + i__2 = i__; + z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ + i__2].i, z__2.i = a[i__1].r * x[i__2].i + + a[i__1].i * x[i__2].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[j + j * a_dim1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } + for (i__ = j - 1; i__ >= 1; --i__) { + d_cnjg(&z__3, &a[i__ + j * a_dim1]); + i__1 = i__; + z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, + z__2.i = z__3.r * x[i__1].i + z__3.i * x[ + i__1].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } + i__1 = j; + x[i__1].r = temp.r, x[i__1].i = temp.i; + } + } else { + jx = kx + (*n - 1) * *incx; + for (j = *n; j >= 1; --j) { + i__1 = jx; + temp.r = x[i__1].r, temp.i = x[i__1].i; + ix = jx; + if (noconj) { + if (nounit) { + i__1 = j + j * a_dim1; + z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i, + z__1.i = temp.r * a[i__1].i + temp.i * a[ + i__1].r; + temp.r = z__1.r, temp.i = z__1.i; + } + for (i__ = j - 1; i__ >= 1; --i__) { + ix -= *incx; + i__1 = i__ + j * a_dim1; + i__2 = ix; + z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ + i__2].i, z__2.i = a[i__1].r * x[i__2].i + + a[i__1].i * x[i__2].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[j + j * a_dim1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } + for (i__ = j - 1; i__ >= 1; --i__) { + ix -= *incx; + d_cnjg(&z__3, &a[i__ + j * a_dim1]); + i__1 = ix; + z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, + z__2.i = z__3.r * x[i__1].i + z__3.i * x[ + i__1].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } + i__1 = jx; + x[i__1].r = temp.r, x[i__1].i = temp.i; + jx -= *incx; + } + } + } else { + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + temp.r = x[i__2].r, temp.i = x[i__2].i; + if (noconj) { + if (nounit) { + i__2 = j + j * a_dim1; + z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, + z__1.i = temp.r * a[i__2].i + temp.i * a[ + i__2].r; + temp.r = z__1.r, temp.i = z__1.i; + } + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + i__3 = i__ + j * a_dim1; + i__4 = i__; + z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ + i__4].i, z__2.i = a[i__3].r * x[i__4].i + + a[i__3].i * x[i__4].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[j + j * a_dim1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + d_cnjg(&z__3, &a[i__ + j * a_dim1]); + i__3 = i__; + z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, + z__2.i = z__3.r * x[i__3].i + z__3.i * x[ + i__3].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } + i__2 = j; + x[i__2].r = temp.r, x[i__2].i = temp.i; + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jx; + temp.r = x[i__2].r, temp.i = x[i__2].i; + ix = jx; + if (noconj) { + if (nounit) { + i__2 = j + j * a_dim1; + z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i, + z__1.i = temp.r * a[i__2].i + temp.i * a[ + i__2].r; + temp.r = z__1.r, temp.i = z__1.i; + } + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + ix += *incx; + i__3 = i__ + j * a_dim1; + i__4 = ix; + z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[ + i__4].i, z__2.i = a[i__3].r * x[i__4].i + + a[i__3].i * x[i__4].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[j + j * a_dim1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + ix += *incx; + d_cnjg(&z__3, &a[i__ + j * a_dim1]); + i__3 = ix; + z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, + z__2.i = z__3.r * x[i__3].i + z__3.i * x[ + i__3].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + } + } + i__2 = jx; + x[i__2].r = temp.r, x[i__2].i = temp.i; + jx += *incx; + } + } + } + } + +return TCL_OK; + + +} /* ztrmv_ */ +static /* Subroutine */ int dlaruv_ (Tcl_Interp *interp, integer *iseed, integer *n, doublereal *x) +{ + + static integer mm[512] /* was [128][4] */ = { 494,2637,255,2008,1253, + 3344,4084,1739,3143,3468,688,1657,1238,3166,1292,3422,1270,2016, + 154,2862,697,1706,491,931,1444,444,3577,3944,2184,1661,3482,657, + 3023,3618,1267,1828,164,3798,3087,2400,2870,3876,1905,1593,1797, + 1234,3460,328,2861,1950,617,2070,3331,769,1558,2412,2800,189,287, + 2045,1227,2838,209,2770,3654,3993,192,2253,3491,2889,2857,2094, + 1818,688,1407,634,3231,815,3524,1914,516,164,303,2144,3480,119, + 3357,837,2826,2332,2089,3780,1700,3712,150,2000,3375,1621,3090, + 3765,1149,3146,33,3082,2741,359,3316,1749,185,2784,2202,2199,1364, + 1244,2020,3160,2785,2772,1217,1822,1245,2252,3904,2774,997,2573, + 1148,545,322,789,1440,752,2859,123,1848,643,2405,2638,2344,46, + 3814,913,3649,339,3808,822,2832,3078,3633,2970,637,2249,2081,4019, + 1478,242,481,2075,4058,622,3376,812,234,641,4005,1122,3135,2640, + 2302,40,1832,2247,2034,2637,1287,1691,496,1597,2394,2584,1843,336, + 1472,2407,433,2096,1761,2810,566,442,41,1238,1086,603,840,3168, + 1499,1084,3438,2408,1589,2391,288,26,512,1456,171,1677,2657,2270, + 2587,2961,1970,1817,676,1410,3723,2803,3185,184,663,499,3784,1631, + 1925,3912,1398,1349,1441,2224,2411,1907,3192,2786,382,37,759,2948, + 1862,3802,2423,2051,2295,1332,1832,2405,3638,3661,327,3660,716, + 1842,3987,1368,1848,2366,2508,3754,1766,3572,2893,307,1297,3966, + 758,2598,3406,2922,1038,2934,2091,2451,1580,1958,2055,1507,1078, + 3273,17,854,2916,3971,2889,3831,2621,1541,893,736,3992,787,2125, + 2364,2460,257,1574,3912,1216,3248,3401,2124,2762,149,2245,166,466, + 4018,1399,190,2879,153,2320,18,712,2159,2318,2091,3443,1510,449, + 1956,2201,3137,3399,1321,2271,3667,2703,629,2365,2431,1113,3922, + 2554,184,2099,3228,4012,1921,3452,3901,572,3309,3171,817,3039, + 1696,1256,3715,2077,3019,1497,1101,717,51,981,1978,1813,3881,76, + 3846,3694,1682,124,1660,3997,479,1141,886,3514,1301,3604,1888, + 1836,1990,2058,692,1194,20,3285,2046,2107,3508,3525,3801,2549, + 1145,2253,305,3301,1065,3133,2913,3285,1241,1197,3729,2501,1673, + 541,2753,949,2361,1165,4081,2725,3305,3069,3617,3733,409,2157, + 1361,3973,1865,2525,1409,3445,3577,77,3761,2149,1449,3005,225,85, + 3673,3117,3089,1349,2057,413,65,1845,697,3085,3441,1573,3689,2941, + 929,533,2841,4077,721,2821,2249,2397,2817,245,1913,1997,3121,997, + 1833,2877,1633,981,2009,941,2449,197,2441,285,1473,2741,3129,909, + 2801,421,4073,2813,2337,1429,1177,1901,81,1669,2633,2269,129,1141, + 249,3917,2481,3941,2217,2749,3041,1877,345,2861,1809,3141,2825, + 157,2881,3637,1465,2829,2161,3365,361,2685,3745,2325,3609,3821, + 3537,517,3017,2141,1537 }; + + integer i__1; + + integer i__, i1, i2, i3, i4, it1, it2, it3, it4; + + + + + + + + + + + + + + + + --iseed; + --x; + + + i1 = iseed[1]; + i2 = iseed[2]; + i3 = iseed[3]; + i4 = iseed[4]; + + i__1 = min(*n,128); + for (i__ = 1; i__ <= i__1; ++i__) { + +L20: + + + it4 = i4 * mm[i__ + 383]; + it3 = it4 / 4096; + it4 -= it3 << 12; + it3 = it3 + i3 * mm[i__ + 383] + i4 * mm[i__ + 255]; + it2 = it3 / 4096; + it3 -= it2 << 12; + it2 = it2 + i2 * mm[i__ + 383] + i3 * mm[i__ + 255] + i4 * mm[i__ + + 127]; + it1 = it2 / 4096; + it2 -= it1 << 12; + it1 = it1 + i1 * mm[i__ + 383] + i2 * mm[i__ + 255] + i3 * mm[i__ + + 127] + i4 * mm[i__ - 1]; + it1 %= 4096; + + + x[i__] = ((doublereal) it1 + ((doublereal) it2 + ((doublereal) it3 + ( + doublereal) it4 * 2.44140625e-4) * 2.44140625e-4) * + 2.44140625e-4) * 2.44140625e-4; + + if (x[i__] == 1.) { + i1 += 2; + i2 += 2; + i3 += 2; + i4 += 2; + goto L20; + } + + } + + + iseed[1] = it1; + iseed[2] = it2; + iseed[3] = it3; + iseed[4] = it4; +return TCL_OK; + + +} /* dlaruv_ */ +static /* Subroutine */ int dlasq2_ (Tcl_Interp *interp, integer *n, doublereal *z__, integer *info) +{ + integer i__1, i__2, i__3; + doublereal d__1, d__2; + + double sqrt(doublereal); + + doublereal d__, e, g; + integer k; + doublereal s, t; + integer i0, i4, n0; + doublereal dn; + integer pp; + doublereal dn1, dn2, dee, eps, tau, tol; + integer ipn4; + doublereal tol2; + logical ieee; + integer nbig; + doublereal dmin__, emin, emax; + integer kmin, ndiv, iter; + doublereal qmin, temp, qmax, zmax; + integer splt; + doublereal dmin1, dmin2; + integer nfail; + doublereal desig, trace, sigma; + integer iinfo, ttype; + doublereal deemin; + integer iwhila, iwhilb; + doublereal oldemn, safmin; + + + + + + + + + + + + + + + + + + + --z__; + + *info = 0; + eps = dlamch_("Precision"); + safmin = dlamch_("Safe minimum"); + tol = eps * 100.; + d__1 = tol; + tol2 = d__1 * d__1; + + if (*n < 0) { + *info = -1; + vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__1); +return TCL_ERROR; + +return TCL_OK; + } else if (*n == 0) { +return TCL_OK; + } else if (*n == 1) { + + + if (z__[1] < 0.) { + *info = -201; + vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); +return TCL_ERROR; + + } +return TCL_OK; + } else if (*n == 2) { + + + if (z__[2] < 0. || z__[3] < 0.) { + *info = -2; + vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); +return TCL_ERROR; + +return TCL_OK; + } else if (z__[3] > z__[1]) { + d__ = z__[3]; + z__[3] = z__[1]; + z__[1] = d__; + } + z__[5] = z__[1] + z__[2] + z__[3]; + if (z__[2] > z__[3] * tol2) { + t = (z__[1] - z__[3] + z__[2]) * .5; + s = z__[3] * (z__[2] / t); + if (s <= t) { + s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.))); + } else { + s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s))); + } + t = z__[1] + (s + z__[2]); + z__[3] *= z__[1] / t; + z__[1] = t; + } + z__[2] = z__[3]; + z__[6] = z__[2] + z__[1]; +return TCL_OK; + } + + + z__[*n * 2] = 0.; + emin = z__[2]; + qmax = 0.; + zmax = 0.; + d__ = 0.; + e = 0.; + + i__1 = *n - 1 << 1; + for (k = 1; k <= i__1; k += 2) { + if (z__[k] < 0.) { + *info = -(k + 200); + vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); +return TCL_ERROR; + +return TCL_OK; + } else if (z__[k + 1] < 0.) { + *info = -(k + 201); + vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); +return TCL_ERROR; + +return TCL_OK; + } + d__ += z__[k]; + e += z__[k + 1]; + d__1 = qmax, d__2 = z__[k]; + qmax = max(d__1,d__2); + d__1 = emin, d__2 = z__[k + 1]; + emin = min(d__1,d__2); + d__1 = max(qmax,zmax), d__2 = z__[k + 1]; + zmax = max(d__1,d__2); + } + if (z__[(*n << 1) - 1] < 0.) { + *info = -((*n << 1) + 199); + vectcl_xerbla(interp, "DLASQ2", &dlasq2_c__2); +return TCL_ERROR; + +return TCL_OK; + } + d__ += z__[(*n << 1) - 1]; + d__1 = qmax, d__2 = z__[(*n << 1) - 1]; + qmax = max(d__1,d__2); + zmax = max(qmax,zmax); + + + if (e == 0.) { + i__1 = *n; + for (k = 2; k <= i__1; ++k) { + z__[k] = z__[(k << 1) - 1]; + } + if (dlasrt_(interp, "D", n, &z__[1], &iinfo)!=TCL_OK) { return TCL_ERROR; } + + z__[(*n << 1) - 1] = d__; +return TCL_OK; + } + + trace = d__ + e; + + + if (trace == 0.) { + z__[(*n << 1) - 1] = 0.; +return TCL_OK; + } + + + ieee = ilaenv_(&dlasq2_c__10, "DLASQ2", "N", &dlasq2_c__1, &dlasq2_c__2, &dlasq2_c__3, &dlasq2_c__4) == 1 && ilaenv_(&dlasq2_c__11, "DLASQ2", "N", &dlasq2_c__1, &dlasq2_c__2, + &dlasq2_c__3, &dlasq2_c__4) == 1; + + + for (k = *n << 1; k >= 2; k += -2) { + z__[k * 2] = 0.; + z__[(k << 1) - 1] = z__[k]; + z__[(k << 1) - 2] = 0.; + z__[(k << 1) - 3] = z__[k - 1]; + } + + i0 = 1; + n0 = *n; + + + if (z__[(i0 << 2) - 3] * 1.5 < z__[(n0 << 2) - 3]) { + ipn4 = i0 + n0 << 2; + i__1 = i0 + n0 - 1 << 1; + for (i4 = i0 << 2; i4 <= i__1; i4 += 4) { + temp = z__[i4 - 3]; + z__[i4 - 3] = z__[ipn4 - i4 - 3]; + z__[ipn4 - i4 - 3] = temp; + temp = z__[i4 - 1]; + z__[i4 - 1] = z__[ipn4 - i4 - 5]; + z__[ipn4 - i4 - 5] = temp; + } + } + + + pp = 0; + + for (k = 1; k <= 2; ++k) { + + d__ = z__[(n0 << 2) + pp - 3]; + i__1 = (i0 << 2) + pp; + for (i4 = (n0 - 1 << 2) + pp; i4 >= i__1; i4 += -4) { + if (z__[i4 - 1] <= tol2 * d__) { + z__[i4 - 1] = -0.; + d__ = z__[i4 - 3]; + } else { + d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1])); + } + } + + + emin = z__[(i0 << 2) + pp + 1]; + d__ = z__[(i0 << 2) + pp - 3]; + i__1 = (n0 - 1 << 2) + pp; + for (i4 = (i0 << 2) + pp; i4 <= i__1; i4 += 4) { + z__[i4 - (pp << 1) - 2] = d__ + z__[i4 - 1]; + if (z__[i4 - 1] <= tol2 * d__) { + z__[i4 - 1] = -0.; + z__[i4 - (pp << 1) - 2] = d__; + z__[i4 - (pp << 1)] = 0.; + d__ = z__[i4 + 1]; + } else if (safmin * z__[i4 + 1] < z__[i4 - (pp << 1) - 2] && + safmin * z__[i4 - (pp << 1) - 2] < z__[i4 + 1]) { + temp = z__[i4 + 1] / z__[i4 - (pp << 1) - 2]; + z__[i4 - (pp << 1)] = z__[i4 - 1] * temp; + d__ *= temp; + } else { + z__[i4 - (pp << 1)] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - ( + pp << 1) - 2]); + d__ = z__[i4 + 1] * (d__ / z__[i4 - (pp << 1) - 2]); + } + d__1 = emin, d__2 = z__[i4 - (pp << 1)]; + emin = min(d__1,d__2); + } + z__[(n0 << 2) - pp - 2] = d__; + + + qmax = z__[(i0 << 2) - pp - 2]; + i__1 = (n0 << 2) - pp - 2; + for (i4 = (i0 << 2) - pp + 2; i4 <= i__1; i4 += 4) { + d__1 = qmax, d__2 = z__[i4]; + qmax = max(d__1,d__2); + } + + + pp = 1 - pp; + } + + + ttype = 0; + dmin1 = 0.; + dmin2 = 0.; + dn = 0.; + dn1 = 0.; + dn2 = 0.; + g = 0.; + tau = 0.; + + iter = 2; + nfail = 0; + ndiv = n0 - i0 << 1; + + i__1 = *n + 1; + for (iwhila = 1; iwhila <= i__1; ++iwhila) { + if (n0 < 1) { + goto L170; + } + + + + desig = 0.; + if (n0 == *n) { + sigma = 0.; + } else { + sigma = -z__[(n0 << 2) - 1]; + } + if (sigma < 0.) { + *info = 1; +return TCL_OK; + } + + + emax = 0.; + if (n0 > i0) { + emin = (d__1 = z__[(n0 << 2) - 5], abs(d__1)); + } else { + emin = 0.; + } + qmin = z__[(n0 << 2) - 3]; + qmax = qmin; + for (i4 = n0 << 2; i4 >= 8; i4 += -4) { + if (z__[i4 - 5] <= 0.) { + goto L100; + } + if (qmin >= emax * 4.) { + d__1 = qmin, d__2 = z__[i4 - 3]; + qmin = min(d__1,d__2); + d__1 = emax, d__2 = z__[i4 - 5]; + emax = max(d__1,d__2); + } + d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5]; + qmax = max(d__1,d__2); + d__1 = emin, d__2 = z__[i4 - 5]; + emin = min(d__1,d__2); + } + i4 = 4; + +L100: + i0 = i4 / 4; + pp = 0; + + if (n0 - i0 > 1) { + dee = z__[(i0 << 2) - 3]; + deemin = dee; + kmin = i0; + i__2 = (n0 << 2) - 3; + for (i4 = (i0 << 2) + 1; i4 <= i__2; i4 += 4) { + dee = z__[i4] * (dee / (dee + z__[i4 - 2])); + if (dee <= deemin) { + deemin = dee; + kmin = (i4 + 3) / 4; + } + } + if (kmin - i0 << 1 < n0 - kmin && deemin <= z__[(n0 << 2) - 3] * + .5) { + ipn4 = i0 + n0 << 2; + pp = 2; + i__2 = i0 + n0 - 1 << 1; + for (i4 = i0 << 2; i4 <= i__2; i4 += 4) { + temp = z__[i4 - 3]; + z__[i4 - 3] = z__[ipn4 - i4 - 3]; + z__[ipn4 - i4 - 3] = temp; + temp = z__[i4 - 2]; + z__[i4 - 2] = z__[ipn4 - i4 - 2]; + z__[ipn4 - i4 - 2] = temp; + temp = z__[i4 - 1]; + z__[i4 - 1] = z__[ipn4 - i4 - 5]; + z__[ipn4 - i4 - 5] = temp; + temp = z__[i4]; + z__[i4] = z__[ipn4 - i4 - 4]; + z__[ipn4 - i4 - 4] = temp; + } + } + } + + + d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax); + dmin__ = -max(d__1,d__2); + + + nbig = (n0 - i0 + 1) * 30; + i__2 = nbig; + for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) { + if (i0 > n0) { + goto L150; + } + + + if (dlasq3_(interp, &i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, & nfail, &iter, &ndiv, &ieee, &ttype, &dmin1, &dmin2, &dn, & + dn1, &dn2, &g, &tau)!=TCL_OK) { return TCL_ERROR; } + + + + pp = 1 - pp; + + + if (pp == 0 && n0 - i0 >= 3) { + if (z__[n0 * 4] <= tol2 * qmax || z__[(n0 << 2) - 1] <= tol2 * + sigma) { + splt = i0 - 1; + qmax = z__[(i0 << 2) - 3]; + emin = z__[(i0 << 2) - 1]; + oldemn = z__[i0 * 4]; + i__3 = n0 - 3 << 2; + for (i4 = i0 << 2; i4 <= i__3; i4 += 4) { + if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <= + tol2 * sigma) { + z__[i4 - 1] = -sigma; + splt = i4 / 4; + qmax = 0.; + emin = z__[i4 + 3]; + oldemn = z__[i4 + 4]; + } else { + d__1 = qmax, d__2 = z__[i4 + 1]; + qmax = max(d__1,d__2); + d__1 = emin, d__2 = z__[i4 - 1]; + emin = min(d__1,d__2); + d__1 = oldemn, d__2 = z__[i4]; + oldemn = min(d__1,d__2); + } + } + z__[(n0 << 2) - 1] = emin; + z__[n0 * 4] = oldemn; + i0 = splt + 1; + } + } + + } + + *info = 2; +return TCL_OK; + + +L150: + + ; + } + + *info = 3; +return TCL_OK; + + +L170: + + + i__1 = *n; + for (k = 2; k <= i__1; ++k) { + z__[k] = z__[(k << 2) - 3]; + } + + + if (dlasrt_(interp, "D", n, &z__[1], &iinfo)!=TCL_OK) { return TCL_ERROR; } + + + e = 0.; + for (k = *n; k >= 1; --k) { + e += z__[k]; + } + + + z__[(*n << 1) + 1] = trace; + z__[(*n << 1) + 2] = e; + z__[(*n << 1) + 3] = (doublereal) iter; + i__1 = *n; + z__[(*n << 1) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1); + z__[(*n << 1) + 5] = nfail * 100. / (doublereal) iter; +return TCL_OK; + + +} /* dlasq2_ */ +static /* Subroutine */ int dlarra_ (Tcl_Interp *interp, integer *n, doublereal *d__, doublereal *e, doublereal *e2, doublereal *spltol, doublereal *tnrm, integer *nsplit, integer *isplit, integer *info) +{ + integer i__1; + doublereal d__1, d__2; + + double sqrt(doublereal); + + integer i__; + doublereal tmp1, eabs; + + + + + + + + + + + + + + + + + + + + + + --isplit; + --e2; + --e; + --d__; + + *info = 0; + *nsplit = 1; + if (*spltol < 0.) { + tmp1 = abs(*spltol) * *tnrm; + i__1 = *n - 1; + for (i__ = 1; i__ <= i__1; ++i__) { + eabs = (d__1 = e[i__], abs(d__1)); + if (eabs <= tmp1) { + e[i__] = 0.; + e2[i__] = 0.; + isplit[*nsplit] = i__; + ++(*nsplit); + } + } + } else { + i__1 = *n - 1; + for (i__ = 1; i__ <= i__1; ++i__) { + eabs = (d__1 = e[i__], abs(d__1)); + if (eabs <= *spltol * sqrt((d__1 = d__[i__], abs(d__1))) * sqrt(( + d__2 = d__[i__ + 1], abs(d__2)))) { + e[i__] = 0.; + e2[i__] = 0.; + isplit[*nsplit] = i__; + ++(*nsplit); + } + } + } + isplit[*nsplit] = *n; +return TCL_OK; + + +} /* dlarra_ */ +static /* Subroutine */ int dlarrb_ (Tcl_Interp *interp, integer *n, doublereal *d__, doublereal *lld, integer *ifirst, integer *ilast, doublereal *rtol1, doublereal *rtol2, integer *offset, doublereal *w, doublereal *wgap, doublereal *werr, doublereal *work, integer *iwork, doublereal *pivmin, doublereal * spdiam, integer *twist, integer *info) +{ + integer i__1; + doublereal d__1, d__2; + + double log(doublereal); + + integer i__, k, r__, i1, ii, ip; + doublereal gap, mid, tmp, back, lgap, rgap, left; + integer iter, nint, prev, next; + doublereal cvrgd, right, width; + integer negcnt; + doublereal mnwdth; + integer olnint, maxitr; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --iwork; + --work; + --werr; + --wgap; + --w; + --lld; + --d__; + + *info = 0; + + maxitr = (integer) ((log(*spdiam + *pivmin) - log(*pivmin)) / log(2.)) + + 2; + mnwdth = *pivmin * 2.; + + r__ = *twist; + if (r__ < 1 || r__ > *n) { + r__ = *n; + } + + + i1 = *ifirst; + nint = 0; + prev = 0; + rgap = wgap[i1 - *offset]; + i__1 = *ilast; + for (i__ = i1; i__ <= i__1; ++i__) { + k = i__ << 1; + ii = i__ - *offset; + left = w[ii] - werr[ii]; + right = w[ii] + werr[ii]; + lgap = rgap; + rgap = wgap[ii]; + gap = min(lgap,rgap); + + + back = werr[ii]; +L20: + negcnt = dlaneg_(n, &d__[1], &lld[1], &left, pivmin, &r__); + if (negcnt > i__ - 1) { + left -= back; + back *= 2.; + goto L20; + } + + + back = werr[ii]; +L50: + negcnt = dlaneg_(n, &d__[1], &lld[1], &right, pivmin, &r__); + if (negcnt < i__) { + right += back; + back *= 2.; + goto L50; + } + width = (d__1 = left - right, abs(d__1)) * .5; + d__1 = abs(left), d__2 = abs(right); + tmp = max(d__1,d__2); + d__1 = *rtol1 * gap, d__2 = *rtol2 * tmp; + cvrgd = max(d__1,d__2); + if (width <= cvrgd || width <= mnwdth) { + iwork[k - 1] = -1; + if (i__ == i1 && i__ < *ilast) { + i1 = i__ + 1; + } + if (prev >= i1 && i__ <= *ilast) { + iwork[(prev << 1) - 1] = i__ + 1; + } + } else { + prev = i__; + ++nint; + iwork[k - 1] = i__ + 1; + iwork[k] = negcnt; + } + work[k - 1] = left; + work[k] = right; + } + + + iter = 0; +L80: + prev = i1 - 1; + i__ = i1; + olnint = nint; + i__1 = olnint; + for (ip = 1; ip <= i__1; ++ip) { + k = i__ << 1; + ii = i__ - *offset; + rgap = wgap[ii]; + lgap = rgap; + if (ii > 1) { + lgap = wgap[ii - 1]; + } + gap = min(lgap,rgap); + next = iwork[k - 1]; + left = work[k - 1]; + right = work[k]; + mid = (left + right) * .5; + width = right - mid; + d__1 = abs(left), d__2 = abs(right); + tmp = max(d__1,d__2); + d__1 = *rtol1 * gap, d__2 = *rtol2 * tmp; + cvrgd = max(d__1,d__2); + if (width <= cvrgd || width <= mnwdth || iter == maxitr) { + --nint; + iwork[k - 1] = 0; + if (i1 == i__) { + i1 = next; + } else { + if (prev >= i1) { + iwork[(prev << 1) - 1] = next; + } + } + i__ = next; + goto L100; + } + prev = i__; + + + negcnt = dlaneg_(n, &d__[1], &lld[1], &mid, pivmin, &r__); + if (negcnt <= i__ - 1) { + work[k - 1] = mid; + } else { + work[k] = mid; + } + i__ = next; +L100: + ; + } + ++iter; + if (nint > 0 && iter <= maxitr) { + goto L80; + } + + + i__1 = *ilast; + for (i__ = *ifirst; i__ <= i__1; ++i__) { + k = i__ << 1; + ii = i__ - *offset; + if (iwork[k - 1] == 0) { + w[ii] = (work[k - 1] + work[k]) * .5; + werr[ii] = work[k] - w[ii]; + } + } + + i__1 = *ilast; + for (i__ = *ifirst + 1; i__ <= i__1; ++i__) { + k = i__ << 1; + ii = i__ - *offset; + d__1 = 0., d__2 = w[ii] - werr[ii] - w[ii - 1] - werr[ii - 1]; + wgap[ii - 1] = max(d__1,d__2); + } +return TCL_OK; + + +} /* dlarrb_ */ +static /* Subroutine */ int dlarrd_ (Tcl_Interp *interp, char *range, char *order, integer *n, doublereal *vl, doublereal *vu, integer *il, integer *iu, doublereal *gers, doublereal *reltol, doublereal *d__, doublereal *e, doublereal *e2, doublereal *pivmin, integer *nsplit, integer *isplit, integer *m, doublereal *w, doublereal *werr, doublereal *wl, doublereal *wu, integer *iblock, integer *indexw, doublereal *work, integer *iwork, integer *info) +{ + integer i__1, i__2, i__3; + doublereal d__1, d__2; + + double log(doublereal); + + integer i__, j, ib, ie, je, nb; + doublereal gl; + integer im, in; + doublereal gu; + integer iw, jee; + doublereal eps; + integer nwl; + doublereal wlu, wul; + integer nwu; + doublereal tmp1, tmp2; + integer iend, jblk, ioff, iout, itmp1, itmp2, jdisc; + integer iinfo; + doublereal atoli; + integer iwoff, itmax; + doublereal wkill, rtoli, uflow, tnorm; + integer ibegin; + integer irange, idiscl, idumma[1]; + integer idiscu; + logical ncnvrg, toofew; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --iwork; + --work; + --indexw; + --iblock; + --werr; + --w; + --isplit; + --e2; + --e; + --d__; + --gers; + + *info = 0; + + + if (lsame_(range, "A")) { + irange = 1; + } else if (lsame_(range, "V")) { + irange = 2; + } else if (lsame_(range, "I")) { + irange = 3; + } else { + irange = 0; + } + + + if (irange <= 0) { + *info = -1; + } else if (! (lsame_(order, "B") || lsame_(order, + "E"))) { + *info = -2; + } else if (*n < 0) { + *info = -3; + } else if (irange == 2) { if (*vl >= *vu) { *info = -5; } @@ -57615,766 +60623,255 @@ return TCL_OK; ix = kx; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = ix; - z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = - x[i__5].r * temp.i + x[i__5].i * temp.r; - z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - ix += *incx; - } - } - jy += *incy; - } - } - -return TCL_OK; - - -} /* zgeru_ */ -static /* Subroutine */ int dtrsv_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) -{ - integer a_dim1, a_offset, i__1, i__2; - - integer i__, j, ix, jx, kx, info; - doublereal temp; - logical nounit; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - a_dim1 = *lda; - a_offset = 1 + a_dim1; - a -= a_offset; - --x; - - info = 0; - if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { - info = 1; - } else if (! lsame_(trans, "N") && ! lsame_(trans, - "T") && ! lsame_(trans, "C")) { - info = 2; - } else if (! lsame_(diag, "U") && ! lsame_(diag, - "N")) { - info = 3; - } else if (*n < 0) { - info = 4; - } else if (*lda < max(1,*n)) { - info = 6; - } else if (*incx == 0) { - info = 8; - } - if (info != 0) { - vectcl_xerbla(interp, "DTRSV ", &info); -return TCL_ERROR; - -return TCL_OK; - } - - - if (*n == 0) { -return TCL_OK; - } - - nounit = lsame_(diag, "N"); - - - if (*incx <= 0) { - kx = 1 - (*n - 1) * *incx; - } else if (*incx != 1) { - kx = 1; - } - - - if (lsame_(trans, "N")) { - - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - if (x[j] != 0.) { - if (nounit) { - x[j] /= a[j + j * a_dim1]; - } - temp = x[j]; - for (i__ = j - 1; i__ >= 1; --i__) { - x[i__] -= temp * a[i__ + j * a_dim1]; - } - } - } - } else { - jx = kx + (*n - 1) * *incx; - for (j = *n; j >= 1; --j) { - if (x[jx] != 0.) { - if (nounit) { - x[jx] /= a[j + j * a_dim1]; - } - temp = x[jx]; - ix = jx; - for (i__ = j - 1; i__ >= 1; --i__) { - ix -= *incx; - x[ix] -= temp * a[i__ + j * a_dim1]; - } - } - jx -= *incx; - } - } - } else { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[j] != 0.) { - if (nounit) { - x[j] /= a[j + j * a_dim1]; - } - temp = x[j]; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - x[i__] -= temp * a[i__ + j * a_dim1]; - } - } - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - if (x[jx] != 0.) { - if (nounit) { - x[jx] /= a[j + j * a_dim1]; - } - temp = x[jx]; - ix = jx; - i__2 = *n; - for (i__ = j + 1; i__ <= i__2; ++i__) { - ix += *incx; - x[ix] -= temp * a[i__ + j * a_dim1]; - } - } - jx += *incx; - } - } - } - } else { - - - if (lsame_(uplo, "U")) { - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = x[j]; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - temp -= a[i__ + j * a_dim1] * x[i__]; - } - if (nounit) { - temp /= a[j + j * a_dim1]; - } - x[j] = temp; - } - } else { - jx = kx; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - temp = x[jx]; - ix = kx; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - temp -= a[i__ + j * a_dim1] * x[ix]; - ix += *incx; - } - if (nounit) { - temp /= a[j + j * a_dim1]; - } - x[jx] = temp; - jx += *incx; - } - } - } else { - if (*incx == 1) { - for (j = *n; j >= 1; --j) { - temp = x[j]; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - temp -= a[i__ + j * a_dim1] * x[i__]; - } - if (nounit) { - temp /= a[j + j * a_dim1]; - } - x[j] = temp; - } - } else { - kx += (*n - 1) * *incx; - jx = kx; - for (j = *n; j >= 1; --j) { - temp = x[jx]; - ix = kx; - i__1 = j + 1; - for (i__ = *n; i__ >= i__1; --i__) { - temp -= a[i__ + j * a_dim1] * x[ix]; - ix -= *incx; - } - if (nounit) { - temp /= a[j + j * a_dim1]; - } - x[jx] = temp; - jx -= *incx; - } - } - } - } - -return TCL_OK; - - -} /* dtrsv_ */ -static integer izmax1_ (integer *n, doublecomplex *cx, integer *incx) -{ - integer ret_val, i__1; - - double z_abs(doublecomplex *); - - integer i__, ix; - doublereal smax; - - - - - - - - - - - - - - - - --cx; - - ret_val = 0; - if (*n < 1) { - return ret_val; - } - ret_val = 1; - if (*n == 1) { - return ret_val; - } - if (*incx == 1) { - goto L30; - } - - - ix = 1; - smax = z_abs(&cx[1]); - ix += *incx; - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - if (z_abs(&cx[ix]) <= smax) { - goto L10; - } - ret_val = i__; - smax = z_abs(&cx[ix]); -L10: - ix += *incx; - } - return ret_val; - - -L30: - smax = z_abs(&cx[1]); - i__1 = *n; - for (i__ = 2; i__ <= i__1; ++i__) { - if (z_abs(&cx[i__]) <= smax) { - goto L40; - } - ret_val = i__; - smax = z_abs(&cx[i__]); -L40: - ; - } - return ret_val; - - -} /* izmax1_ */ -static doublereal dzsum1_ (integer *n, doublecomplex *cx, integer *incx) -{ - integer i__1, i__2; - doublereal ret_val; - - double z_abs(doublecomplex *); - - integer i__, nincx; - doublereal stemp; - - - - - - - - - - - - - - - --cx; - - ret_val = 0.; - stemp = 0.; - if (*n <= 0) { - return ret_val; - } - if (*incx == 1) { - goto L20; - } - - - nincx = *n * *incx; - i__1 = nincx; - i__2 = *incx; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - - - stemp += z_abs(&cx[i__]); - } - ret_val = stemp; - return ret_val; - - -L20: - i__2 = *n; - for (i__ = 1; i__ <= i__2; ++i__) { - - - stemp += z_abs(&cx[i__]); - } - ret_val = stemp; - return ret_val; - - -} /* dzsum1_ */ -static /* Subroutine */ int dlasd2_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal * beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2, integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer * idxq, integer *coltyp, integer *info) -{ - integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset, - vt2_dim1, vt2_offset, i__1; - doublereal d__1, d__2; - - doublereal c__; - integer i__, j, m, n; - doublereal s; - integer k2; - doublereal z1; - integer ct, jp; - doublereal eps, tau, tol; - integer psm[4], nlp1, nlp2, idxi, idxj; - integer ctot[4], idxjp; - integer jprev; - doublereal hlftol; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - --d__; - --z__; - u_dim1 = *ldu; - u_offset = 1 + u_dim1; - u -= u_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1; - vt -= vt_offset; - --dsigma; - u2_dim1 = *ldu2; - u2_offset = 1 + u2_dim1; - u2 -= u2_offset; - vt2_dim1 = *ldvt2; - vt2_offset = 1 + vt2_dim1; - vt2 -= vt2_offset; - --idxp; - --idx; - --idxc; - --idxq; - --coltyp; - - *info = 0; - - if (*nl < 1) { - *info = -1; - } else if (*nr < 1) { - *info = -2; - } else if (*sqre != 1 && *sqre != 0) { - *info = -3; - } - - n = *nl + *nr + 1; - m = n + *sqre; - - if (*ldu < n) { - *info = -10; - } else if (*ldvt < m) { - *info = -12; - } else if (*ldu2 < n) { - *info = -15; - } else if (*ldvt2 < m) { - *info = -17; - } - if (*info != 0) { - i__1 = -(*info); - vectcl_xerbla(interp, "DLASD2", &i__1); -return TCL_ERROR; - -return TCL_OK; - } - - nlp1 = *nl + 1; - nlp2 = *nl + 2; - - - z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1]; - z__[1] = z1; - for (i__ = *nl; i__ >= 1; --i__) { - z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1]; - d__[i__ + 1] = d__[i__]; - idxq[i__ + 1] = idxq[i__] + 1; - } - - - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1]; + i__3 = i__ + j * a_dim1; + i__4 = i__ + j * a_dim1; + i__5 = ix; + z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = + x[i__5].r * temp.i + x[i__5].i * temp.r; + z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; + a[i__3].r = z__1.r, a[i__3].i = z__1.i; + ix += *incx; + } + } + jy += *incy; + } } +return TCL_OK; - i__1 = nlp1; - for (i__ = 2; i__ <= i__1; ++i__) { - coltyp[i__] = 1; - } - i__1 = n; - for (i__ = nlp2; i__ <= i__1; ++i__) { - coltyp[i__] = 2; - } +} /* zgeru_ */ +static /* Subroutine */ int dtrsv_ (Tcl_Interp *interp, char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) +{ + integer a_dim1, a_offset, i__1, i__2; - i__1 = n; - for (i__ = nlp2; i__ <= i__1; ++i__) { - idxq[i__] += nlp1; - } + integer i__, j, ix, jx, kx, info; + doublereal temp; + logical nounit; - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - dsigma[i__] = d__[idxq[i__]]; - u2[i__ + u2_dim1] = z__[idxq[i__]]; - idxc[i__] = coltyp[idxq[i__]]; - } - if (dlamrg_(interp, nl, nr, &dsigma[2], &dlasd2_c__1, &dlasd2_c__1, &idx[2])!=TCL_OK) { return TCL_ERROR; } - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - idxi = idx[i__] + 1; - d__[i__] = dsigma[idxi]; - z__[i__] = u2[idxi + u2_dim1]; - coltyp[i__] = idxc[idxi]; - } - eps = dlamch_("Epsilon"); - d__1 = abs(*alpha), d__2 = abs(*beta); - tol = max(d__1,d__2); - d__2 = (d__1 = d__[n], abs(d__1)); - tol = eps * 8. * max(d__2,tol); - *k = 1; - k2 = n + 1; - i__1 = n; - for (j = 2; j <= i__1; ++j) { - if ((d__1 = z__[j], abs(d__1)) <= tol) { - --k2; - idxp[k2] = j; - coltyp[j] = 4; - if (j == n) { - goto L120; - } - } else { - jprev = j; - goto L90; - } - } -L90: - j = jprev; -L100: - ++j; - if (j > n) { - goto L110; - } - if ((d__1 = z__[j], abs(d__1)) <= tol) { - --k2; - idxp[k2] = j; - coltyp[j] = 4; - } else { - if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { - s = z__[jprev]; - c__ = z__[j]; - tau = dlapy2_(&c__, &s); - c__ /= tau; - s = -s / tau; - z__[j] = tau; - z__[jprev] = 0.; - idxjp = idxq[idx[jprev] + 1]; - idxj = idxq[idx[j] + 1]; - if (idxjp <= nlp1) { - --idxjp; - } - if (idxj <= nlp1) { - --idxj; - } - if (drot_(interp, &n, &u[idxjp * u_dim1 + 1], &dlasd2_c__1, &u[idxj * u_dim1 + 1], & dlasd2_c__1, &c__, &s)!=TCL_OK) { return TCL_ERROR; } - if (drot_(interp, &m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, & c__, &s)!=TCL_OK) { return TCL_ERROR; } - if (coltyp[j] != coltyp[jprev]) { - coltyp[j] = 3; - } - coltyp[jprev] = 4; - --k2; - idxp[k2] = jprev; - jprev = j; - } else { - ++(*k); - u2[*k + u2_dim1] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; - jprev = j; - } - } - goto L100; -L110: - ++(*k); - u2[*k + u2_dim1] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; -L120: + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; - for (j = 1; j <= 4; ++j) { - ctot[j - 1] = 0; - } - i__1 = n; - for (j = 2; j <= i__1; ++j) { - ct = coltyp[j]; - ++ctot[ct - 1]; + info = 0; + if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { + info = 1; + } else if (! lsame_(trans, "N") && ! lsame_(trans, + "T") && ! lsame_(trans, "C")) { + info = 2; + } else if (! lsame_(diag, "U") && ! lsame_(diag, + "N")) { + info = 3; + } else if (*n < 0) { + info = 4; + } else if (*lda < max(1,*n)) { + info = 6; + } else if (*incx == 0) { + info = 8; } + if (info != 0) { + vectcl_xerbla(interp, "DTRSV ", &info); +return TCL_ERROR; - - psm[0] = 2; - psm[1] = ctot[0] + 2; - psm[2] = psm[1] + ctot[1]; - psm[3] = psm[2] + ctot[2]; - - - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - ct = coltyp[jp]; - idxc[psm[ct - 1]] = j; - ++psm[ct - 1]; +return TCL_OK; } - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - dsigma[j] = d__[jp]; - idxj = idxq[idx[idxp[idxc[j]]] + 1]; - if (idxj <= nlp1) { - --idxj; - } - if (dcopy_(interp, &n, &u[idxj * u_dim1 + 1], &dlasd2_c__1, &u2[j * u2_dim1 + 1], &dlasd2_c__1)!=TCL_OK) { return TCL_ERROR; } - - if (dcopy_(interp, &m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2)!=TCL_OK) { return TCL_ERROR; } - + if (*n == 0) { +return TCL_OK; } - - dsigma[1] = 0.; - hlftol = tol / 2.; - if (abs(dsigma[2]) <= hlftol) { - dsigma[2] = hlftol; - } - if (m > n) { - z__[1] = dlapy2_(&z1, &z__[m]); - if (z__[1] <= tol) { - c__ = 1.; - s = 0.; - z__[1] = tol; - } else { - c__ = z1 / z__[1]; - s = z__[m] / z__[1]; - } - } else { - if (abs(z1) <= tol) { - z__[1] = tol; - } else { - z__[1] = z1; - } - } + nounit = lsame_(diag, "N"); - i__1 = *k - 1; - if (dcopy_(interp, &i__1, &u2[u2_dim1 + 2], &dlasd2_c__1, &z__[2], &dlasd2_c__1)!=TCL_OK) { return TCL_ERROR; } + if (*incx <= 0) { + kx = 1 - (*n - 1) * *incx; + } else if (*incx != 1) { + kx = 1; + } + if (lsame_(trans, "N")) { - if (dlaset_(interp, "A", &n, &dlasd2_c__1, &dlasd2_c_b30, &dlasd2_c_b30, &u2[u2_offset], ldu2)!=TCL_OK) { return TCL_ERROR; } - u2[nlp1 + u2_dim1] = 1.; - if (m > n) { - i__1 = nlp1; - for (i__ = 1; i__ <= i__1; ++i__) { - vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1]; - vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1]; - } - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1]; - vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1]; + if (lsame_(uplo, "U")) { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + if (x[j] != 0.) { + if (nounit) { + x[j] /= a[j + j * a_dim1]; + } + temp = x[j]; + for (i__ = j - 1; i__ >= 1; --i__) { + x[i__] -= temp * a[i__ + j * a_dim1]; + } + } + } + } else { + jx = kx + (*n - 1) * *incx; + for (j = *n; j >= 1; --j) { + if (x[jx] != 0.) { + if (nounit) { + x[jx] /= a[j + j * a_dim1]; + } + temp = x[jx]; + ix = jx; + for (i__ = j - 1; i__ >= 1; --i__) { + ix -= *incx; + x[ix] -= temp * a[i__ + j * a_dim1]; + } + } + jx -= *incx; + } + } + } else { + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (x[j] != 0.) { + if (nounit) { + x[j] /= a[j + j * a_dim1]; + } + temp = x[j]; + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + x[i__] -= temp * a[i__ + j * a_dim1]; + } + } + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (x[jx] != 0.) { + if (nounit) { + x[jx] /= a[j + j * a_dim1]; + } + temp = x[jx]; + ix = jx; + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + ix += *incx; + x[ix] -= temp * a[i__ + j * a_dim1]; + } + } + jx += *incx; + } + } } } else { - if (dcopy_(interp, &m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2)!=TCL_OK) { return TCL_ERROR; } - - } - if (m > n) { - if (dcopy_(interp, &m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2)!=TCL_OK) { return TCL_ERROR; } - - } - - - if (n > *k) { - i__1 = n - *k; - if (dcopy_(interp, &i__1, &dsigma[*k + 1], &dlasd2_c__1, &d__[*k + 1], &dlasd2_c__1)!=TCL_OK) { return TCL_ERROR; } - - i__1 = n - *k; - if (dlacpy_(interp, "A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1) * u_dim1 + 1], ldu)!=TCL_OK) { return TCL_ERROR; } - - - i__1 = n - *k; - if (dlacpy_(interp, "A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 + vt_dim1], ldvt)!=TCL_OK) { return TCL_ERROR; } - - - } - for (j = 1; j <= 4; ++j) { - coltyp[j] = ctot[j - 1]; + if (lsame_(uplo, "U")) { + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp = x[j]; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + temp -= a[i__ + j * a_dim1] * x[i__]; + } + if (nounit) { + temp /= a[j + j * a_dim1]; + } + x[j] = temp; + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp = x[jx]; + ix = kx; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + temp -= a[i__ + j * a_dim1] * x[ix]; + ix += *incx; + } + if (nounit) { + temp /= a[j + j * a_dim1]; + } + x[jx] = temp; + jx += *incx; + } + } + } else { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + temp = x[j]; + i__1 = j + 1; + for (i__ = *n; i__ >= i__1; --i__) { + temp -= a[i__ + j * a_dim1] * x[i__]; + } + if (nounit) { + temp /= a[j + j * a_dim1]; + } + x[j] = temp; + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + temp = x[jx]; + ix = kx; + i__1 = j + 1; + for (i__ = *n; i__ >= i__1; --i__) { + temp -= a[i__ + j * a_dim1] * x[ix]; + ix -= *incx; + } + if (nounit) { + temp /= a[j + j * a_dim1]; + } + x[jx] = temp; + jx -= *incx; + } + } + } } return TCL_OK; -} /* dlasd2_ */ -static /* Subroutine */ int dlasd3_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma, doublereal *u, integer *ldu, doublereal *u2, integer *ldu2, doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2, integer *idxc, integer *ctot, doublereal *z__, integer *info) +} /* dtrsv_ */ +static integer izmax1_ (integer *n, doublecomplex *cx, integer *incx) { - integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, - vt_offset, vt2_dim1, vt2_offset, i__1, i__2; - doublereal d__1, d__2; - - double sqrt(doublereal), d_sign(doublereal *, doublereal *); + integer ret_val, i__1; - integer i__, j, m, n, jc; - doublereal rho; - integer nlp1, nlp2, nrp1; - doublereal temp; - integer ctemp; - integer ktemp; + double z_abs(doublecomplex *); + integer i__, ix; + doublereal smax; @@ -58390,15 +60887,62 @@ static /* Subroutine */ int dlasd3_ (Tcl_Interp *interp, integer *nl, integer *n + --cx; + ret_val = 0; + if (*n < 1) { + return ret_val; + } + ret_val = 1; + if (*n == 1) { + return ret_val; + } + if (*incx == 1) { + goto L30; + } + ix = 1; + smax = z_abs(&cx[1]); + ix += *incx; + i__1 = *n; + for (i__ = 2; i__ <= i__1; ++i__) { + if (z_abs(&cx[ix]) <= smax) { + goto L10; + } + ret_val = i__; + smax = z_abs(&cx[ix]); +L10: + ix += *incx; + } + return ret_val; +L30: + smax = z_abs(&cx[1]); + i__1 = *n; + for (i__ = 2; i__ <= i__1; ++i__) { + if (z_abs(&cx[i__]) <= smax) { + goto L40; + } + ret_val = i__; + smax = z_abs(&cx[i__]); +L40: + ; + } + return ret_val; +} /* izmax1_ */ +static doublereal dzsum1_ (integer *n, doublecomplex *cx, integer *incx) +{ + integer i__1, i__2; + doublereal ret_val; + double z_abs(doublecomplex *); + integer i__, nincx; + doublereal stemp; @@ -58410,637 +60954,416 @@ static /* Subroutine */ int dlasd3_ (Tcl_Interp *interp, integer *nl, integer *n - --d__; - q_dim1 = *ldq; - q_offset = 1 + q_dim1; - q -= q_offset; - --dsigma; - u_dim1 = *ldu; - u_offset = 1 + u_dim1; - u -= u_offset; - u2_dim1 = *ldu2; - u2_offset = 1 + u2_dim1; - u2 -= u2_offset; - vt_dim1 = *ldvt; - vt_offset = 1 + vt_dim1; - vt -= vt_offset; - vt2_dim1 = *ldvt2; - vt2_offset = 1 + vt2_dim1; - vt2 -= vt2_offset; - --idxc; - --ctot; - --z__; - *info = 0; - if (*nl < 1) { - *info = -1; - } else if (*nr < 1) { - *info = -2; - } else if (*sqre != 1 && *sqre != 0) { - *info = -3; - } - n = *nl + *nr + 1; - m = n + *sqre; - nlp1 = *nl + 1; - nlp2 = *nl + 2; + --cx; - if (*k < 1 || *k > n) { - *info = -4; - } else if (*ldq < *k) { - *info = -7; - } else if (*ldu < n) { - *info = -10; - } else if (*ldu2 < n) { - *info = -12; - } else if (*ldvt < m) { - *info = -14; - } else if (*ldvt2 < m) { - *info = -16; + ret_val = 0.; + stemp = 0.; + if (*n <= 0) { + return ret_val; } - if (*info != 0) { - i__1 = -(*info); - vectcl_xerbla(interp, "DLASD3", &i__1); -return TCL_ERROR; - -return TCL_OK; + if (*incx == 1) { + goto L20; } - if (*k == 1) { - d__[1] = abs(z__[1]); - if (dcopy_(interp, &m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt)!=TCL_OK) { return TCL_ERROR; } + nincx = *n * *incx; + i__1 = nincx; + i__2 = *incx; + for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - if (z__[1] > 0.) { - if (dcopy_(interp, &n, &u2[u2_dim1 + 1], &dlasd3_c__1, &u[u_dim1 + 1], &dlasd3_c__1)!=TCL_OK) { return TCL_ERROR; } - } else { - i__1 = n; - for (i__ = 1; i__ <= i__1; ++i__) { - u[i__ + u_dim1] = -u2[i__ + u2_dim1]; - } - } -return TCL_OK; + stemp += z_abs(&cx[i__]); } + ret_val = stemp; + return ret_val; - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; +L20: + i__2 = *n; + for (i__ = 1; i__ <= i__2; ++i__) { + + + stemp += z_abs(&cx[i__]); } + ret_val = stemp; + return ret_val; - if (dcopy_(interp, k, &z__[1], &dlasd3_c__1, &q[q_offset], &dlasd3_c__1)!=TCL_OK) { return TCL_ERROR; } +} /* dzsum1_ */ +static /* Subroutine */ int dlaexc_ (Tcl_Interp *interp, logical *wantq, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, integer *j1, integer *n1, integer *n2, doublereal *work, integer *info) +{ + integer q_dim1, q_offset, t_dim1, t_offset, i__1; + doublereal d__1, d__2, d__3; + doublereal d__[16] /* was [4][4] */; + integer k; + doublereal u[3], x[4] /* was [2][2] */; + integer j2, j3, j4; + doublereal u1[3], u2[3]; + integer nd; + doublereal cs, t11, t22, t33, sn, wi1, wi2, wr1, wr2, eps, tau, tau1, + tau2; + integer ierr; + doublereal temp; + doublereal scale, dnorm, xnorm; + doublereal thresh, smlnum; - rho = dnrm2_(k, &z__[1], &dlasd3_c__1); - if (dlascl_(interp, "G", &dlasd3_c__0, &dlasd3_c__0, &rho, &dlasd3_c_b13, k, &dlasd3_c__1, &z__[1], k, info)!=TCL_OK) { return TCL_ERROR; } - rho *= rho; - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - if (dlasd4_(interp, k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j], &vt[j * vt_dim1 + 1], info)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { -return TCL_OK; - } - } - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1]; - i__2 = i__ - 1; - for (j = 1; j <= i__2; ++j) { - z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ - i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]); - } - i__2 = *k - 1; - for (j = i__; j <= i__2; ++j) { - z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ - i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]); - } - d__2 = sqrt((d__1 = z__[i__], abs(d__1))); - z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]); - } - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ * - vt_dim1 + 1]; - u[i__ * u_dim1 + 1] = -1.; - i__2 = *k; - for (j = 2; j <= i__2; ++j) { - vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__ - * vt_dim1]; - u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1]; - } - temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &dlasd3_c__1); - q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp; - i__2 = *k; - for (j = 2; j <= i__2; ++j) { - jc = idxc[j]; - q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp; - } - } - if (*k == 2) { - if (dgemm_(interp, "N", "N", &n, k, k, &dlasd3_c_b13, &u2[u2_offset], ldu2, &q[q_offset], ldq, &dlasd3_c_b26, &u[u_offset], ldu)!=TCL_OK) { return TCL_ERROR; } - goto L100; - } - if (ctot[1] > 0) { - if (dgemm_(interp, "N", "N", nl, k, &ctot[1], &dlasd3_c_b13, &u2[(u2_dim1 << 1) + 1], ldu2, &q[q_dim1 + 2], ldq, &dlasd3_c_b26, &u[u_dim1 + 1], ldu)!=TCL_OK) { return TCL_ERROR; } - if (ctot[3] > 0) { - ktemp = ctot[1] + 2 + ctot[2]; - if (dgemm_(interp, "N", "N", nl, k, &ctot[3], &dlasd3_c_b13, &u2[ktemp * u2_dim1 + 1], ldu2, &q[ktemp + q_dim1], ldq, &dlasd3_c_b13, &u[u_dim1 + 1], - ldu)!=TCL_OK) { return TCL_ERROR; } - } - } else if (ctot[3] > 0) { - ktemp = ctot[1] + 2 + ctot[2]; - if (dgemm_(interp, "N", "N", nl, k, &ctot[3], &dlasd3_c_b13, &u2[ktemp * u2_dim1 + 1], ldu2, &q[ktemp + q_dim1], ldq, &dlasd3_c_b26, &u[u_dim1 + 1], ldu)!=TCL_OK) { return TCL_ERROR; } + t_dim1 = *ldt; + t_offset = 1 + t_dim1; + t -= t_offset; + q_dim1 = *ldq; + q_offset = 1 + q_dim1; + q -= q_offset; + --work; + *info = 0; - } else { - if (dlacpy_(interp, "F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu)!=TCL_OK) { return TCL_ERROR; } + if (*n == 0 || *n1 == 0 || *n2 == 0) { +return TCL_OK; + } + if (*j1 + *n1 > *n) { +return TCL_OK; } - if (dcopy_(interp, k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu)!=TCL_OK) { return TCL_ERROR; } - - ktemp = ctot[1] + 2; - ctemp = ctot[2] + ctot[3]; - if (dgemm_(interp, "N", "N", nr, k, &ctemp, &dlasd3_c_b13, &u2[nlp2 + ktemp * u2_dim1], ldu2, &q[ktemp + q_dim1], ldq, &dlasd3_c_b26, &u[nlp2 + u_dim1], ldu)!=TCL_OK) { return TCL_ERROR; } + j2 = *j1 + 1; + j3 = *j1 + 2; + j4 = *j1 + 3; + if (*n1 == 1 && *n2 == 1) { -L100: - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &dlasd3_c__1); - q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp; - i__2 = *k; - for (j = 2; j <= i__2; ++j) { - jc = idxc[j]; - q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp; - } - } + t11 = t[*j1 + *j1 * t_dim1]; + t22 = t[j2 + j2 * t_dim1]; - if (*k == 2) { - if (dgemm_(interp, "N", "N", k, &m, k, &dlasd3_c_b13, &q[q_offset], ldq, &vt2[vt2_offset], ldvt2, &dlasd3_c_b26, &vt[vt_offset], ldvt)!=TCL_OK) { return TCL_ERROR; } + d__1 = t22 - t11; + if (dlartg_(interp, &t[*j1 + j2 * t_dim1], &d__1, &cs, &sn, &temp)!=TCL_OK) { return TCL_ERROR; } -return TCL_OK; - } - ktemp = ctot[1] + 1; - if (dgemm_(interp, "N", "N", k, &nlp1, &ktemp, &dlasd3_c_b13, &q[q_dim1 + 1], ldq, &vt2[ vt2_dim1 + 1], ldvt2, &dlasd3_c_b26, &vt[vt_dim1 + 1], ldvt)!=TCL_OK) { return TCL_ERROR; } + if (j3 <= *n) { + i__1 = *n - *j1 - 1; + if (drot_(interp, &i__1, &t[*j1 + j3 * t_dim1], ldt, &t[j2 + j3 * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - ktemp = ctot[1] + 2 + ctot[2]; - if (ktemp <= *ldvt2) { - if (dgemm_(interp, "N", "N", k, &nlp1, &ctot[3], &dlasd3_c_b13, &q[ktemp * q_dim1 + 1], ldq, &vt2[ktemp + vt2_dim1], ldvt2, &dlasd3_c_b13, &vt[vt_dim1 + 1], - ldvt)!=TCL_OK) { return TCL_ERROR; } + } + i__1 = *j1 - 1; + if (drot_(interp, &i__1, &t[*j1 * t_dim1 + 1], &dlaexc_c__1, &t[j2 * t_dim1 + 1], &dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - } - ktemp = ctot[1] + 1; - nrp1 = *nr + *sqre; - if (ktemp > 1) { - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - q[i__ + ktemp * q_dim1] = q[i__ + q_dim1]; - } - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1]; - } - } - ctemp = ctot[2] + 1 + ctot[3]; - if (dgemm_(interp, "N", "N", k, &nrp1, &ctemp, &dlasd3_c_b13, &q[ktemp * q_dim1 + 1], ldq, & vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &dlasd3_c_b26, &vt[nlp2 * vt_dim1 + - 1], ldvt)!=TCL_OK) { return TCL_ERROR; } + t[*j1 + *j1 * t_dim1] = t22; + t[j2 + j2 * t_dim1] = t11; + if (*wantq) { -return TCL_OK; + if (drot_(interp, n, &q[*j1 * q_dim1 + 1], &dlaexc_c__1, &q[j2 * q_dim1 + 1], &dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } -} /* dlasd3_ */ -static /* Subroutine */ int dlamrg_ (Tcl_Interp *interp, integer *n1, integer *n2, doublereal *a, integer *dtrd1, integer *dtrd2, integer *index) -{ - integer i__1; - integer i__, ind1, ind2, n1sv, n2sv; + } + } else { + nd = *n1 + *n2; + if (dlacpy_(interp, "Full", &nd, &nd, &t[*j1 + *j1 * t_dim1], ldt, d__, &dlaexc_c__4)!=TCL_OK) { return TCL_ERROR; } + dnorm = dlange_("Max", &nd, &nd, d__, &dlaexc_c__4, &work[1]); + eps = dlamch_("P"); + smlnum = dlamch_("S") / eps; + d__1 = eps * 10. * dnorm; + thresh = max(d__1,smlnum); + if (dlasy2_(interp, &dlaexc_c_false, &dlaexc_c_false, &dlaexc_c_n1, n1, n2, d__, &dlaexc_c__4, &d__[*n1 + 1 + (*n1 + 1 << 2) - 5], &dlaexc_c__4, &d__[(*n1 + 1 << 2) - 4], &dlaexc_c__4, & + scale, x, &dlaexc_c__2, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } - --index; - --a; + k = *n1 + *n1 + *n2 - 3; + switch (k) { + case 1: goto L10; + case 2: goto L20; + case 3: goto L30; + } - n1sv = *n1; - n2sv = *n2; - if (*dtrd1 > 0) { - ind1 = 1; - } else { - ind1 = *n1; - } - if (*dtrd2 > 0) { - ind2 = *n1 + 1; - } else { - ind2 = *n1 + *n2; - } - i__ = 1; L10: - if (n1sv > 0 && n2sv > 0) { - if (a[ind1] <= a[ind2]) { - index[i__] = ind1; - ++i__; - ind1 += *dtrd1; - --n1sv; - } else { - index[i__] = ind2; - ++i__; - ind2 += *dtrd2; - --n2sv; - } - goto L10; - } - if (n1sv == 0) { - i__1 = n2sv; - for (n1sv = 1; n1sv <= i__1; ++n1sv) { - index[i__] = ind2; - ++i__; - ind2 += *dtrd2; - } - } else { - i__1 = n1sv; - for (n2sv = 1; n2sv <= i__1; ++n2sv) { - index[i__] = ind1; - ++i__; - ind1 += *dtrd1; - } - } -return TCL_OK; -} /* dlamrg_ */ -static /* Subroutine */ int dlasd7_ (Tcl_Interp *interp, integer *icompq, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl, doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal * dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *c__, doublereal *s, integer *info) -{ - integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1; - doublereal d__1, d__2; + u[0] = scale; + u[1] = x[0]; + u[2] = x[2]; + if (dlarfg_(interp, &dlaexc_c__3, &u[2], u, &dlaexc_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } - integer i__, j, m, n, k2; - doublereal z1; - integer jp; - doublereal eps, tau, tol; - integer nlp1, nlp2, idxi, idxj; - integer idxjp; - integer jprev; - doublereal hlftol; + u[2] = 1.; + t11 = t[*j1 + *j1 * t_dim1]; + + + if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + + if (dlarfx_(interp, "R", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + d__2 = abs(d__[2]), d__3 = abs(d__[6]), d__2 = max(d__2,d__3), d__3 = + (d__1 = d__[10] - t11, abs(d__1)); + if (max(d__2,d__3) > thresh) { + goto L50; + } + i__1 = *n - *j1 + 1; + if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u, &tau, &t[*j1 + *j1 * t_dim1], ldt, & work[1])!=TCL_OK) { return TCL_ERROR; } + if (dlarfx_(interp, "R", &j2, &dlaexc_c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1])!=TCL_OK) { return TCL_ERROR; } + t[j3 + *j1 * t_dim1] = 0.; + t[j3 + j2 * t_dim1] = 0.; + t[j3 + j3 * t_dim1] = t11; + if (*wantq) { + if (dlarfx_(interp, "R", n, &dlaexc_c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1])!=TCL_OK) { return TCL_ERROR; } + } + goto L40; +L20: + u[0] = -x[0]; + u[1] = -x[1]; + u[2] = scale; + if (dlarfg_(interp, &dlaexc_c__3, u, &u[1], &dlaexc_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } + u[0] = 1.; + t33 = t[j3 + j3 * t_dim1]; + if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + if (dlarfx_(interp, "R", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + d__2 = abs(d__[1]), d__3 = abs(d__[2]), d__2 = max(d__2,d__3), d__3 = + (d__1 = d__[0] - t33, abs(d__1)); + if (max(d__2,d__3) > thresh) { + goto L50; + } + if (dlarfx_(interp, "R", &j3, &dlaexc_c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1])!=TCL_OK) { return TCL_ERROR; } + i__1 = *n - *j1; + if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u, &tau, &t[*j1 + j2 * t_dim1], ldt, &work[ 1])!=TCL_OK) { return TCL_ERROR; } + t[*j1 + *j1 * t_dim1] = t33; + t[j2 + *j1 * t_dim1] = 0.; + t[j3 + *j1 * t_dim1] = 0.; + if (*wantq) { + if (dlarfx_(interp, "R", n, &dlaexc_c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1])!=TCL_OK) { return TCL_ERROR; } + } + goto L40; +L30: + u1[0] = -x[0]; + u1[1] = -x[1]; + u1[2] = scale; + if (dlarfg_(interp, &dlaexc_c__3, u1, &u1[1], &dlaexc_c__1, &tau1)!=TCL_OK) { return TCL_ERROR; } + u1[0] = 1.; - --d__; - --z__; - --zw; - --vf; - --vfw; - --vl; - --vlw; - --dsigma; - --idx; - --idxp; - --idxq; - --perm; - givcol_dim1 = *ldgcol; - givcol_offset = 1 + givcol_dim1; - givcol -= givcol_offset; - givnum_dim1 = *ldgnum; - givnum_offset = 1 + givnum_dim1; - givnum -= givnum_offset; + temp = -tau1 * (x[2] + u1[1] * x[3]); + u2[0] = -temp * u1[1] - x[3]; + u2[1] = -temp * u1[2]; + u2[2] = scale; + if (dlarfg_(interp, &dlaexc_c__3, u2, &u2[1], &dlaexc_c__1, &tau2)!=TCL_OK) { return TCL_ERROR; } - *info = 0; - n = *nl + *nr + 1; - m = n + *sqre; + u2[0] = 1.; - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*nl < 1) { - *info = -2; - } else if (*nr < 1) { - *info = -3; - } else if (*sqre < 0 || *sqre > 1) { - *info = -4; - } else if (*ldgcol < n) { - *info = -22; - } else if (*ldgnum < n) { - *info = -24; - } - if (*info != 0) { - i__1 = -(*info); - vectcl_xerbla(interp, "DLASD7", &i__1); -return TCL_ERROR; -return TCL_OK; - } + if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__4, u1, &tau1, d__, &dlaexc_c__4, &work[1]) !=TCL_OK) { return TCL_ERROR; } - nlp1 = *nl + 1; - nlp2 = *nl + 2; - if (*icompq == 1) { - *givptr = 0; - } + if (dlarfx_(interp, "R", &dlaexc_c__4, &dlaexc_c__3, u1, &tau1, d__, &dlaexc_c__4, &work[1]) !=TCL_OK) { return TCL_ERROR; } - z1 = *alpha * vl[nlp1]; - vl[nlp1] = 0.; - tau = vf[nlp1]; - for (i__ = *nl; i__ >= 1; --i__) { - z__[i__ + 1] = *alpha * vl[i__]; - vl[i__] = 0.; - vf[i__ + 1] = vf[i__]; - d__[i__ + 1] = d__[i__]; - idxq[i__ + 1] = idxq[i__] + 1; - } - vf[1] = tau; + if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__4, u2, &tau2, &d__[1], &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } - i__1 = m; - for (i__ = nlp2; i__ <= i__1; ++i__) { - z__[i__] = *beta * vf[i__]; - vf[i__] = 0.; - } + if (dlarfx_(interp, "R", &dlaexc_c__4, &dlaexc_c__3, u2, &tau2, &d__[4], &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } - i__1 = n; - for (i__ = nlp2; i__ <= i__1; ++i__) { - idxq[i__] += nlp1; - } + d__1 = abs(d__[2]), d__2 = abs(d__[6]), d__1 = max(d__1,d__2), d__2 = + abs(d__[3]), d__1 = max(d__1,d__2), d__2 = abs(d__[7]); + if (max(d__1,d__2) > thresh) { + goto L50; + } - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - dsigma[i__] = d__[idxq[i__]]; - zw[i__] = z__[idxq[i__]]; - vfw[i__] = vf[idxq[i__]]; - vlw[i__] = vl[idxq[i__]]; - } - if (dlamrg_(interp, nl, nr, &dsigma[2], &dlasd7_c__1, &dlasd7_c__1, &idx[2])!=TCL_OK) { return TCL_ERROR; } + i__1 = *n - *j1 + 1; + if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u1, &tau1, &t[*j1 + *j1 * t_dim1], ldt, & work[1])!=TCL_OK) { return TCL_ERROR; } - i__1 = n; - for (i__ = 2; i__ <= i__1; ++i__) { - idxi = idx[i__] + 1; - d__[i__] = dsigma[idxi]; - z__[i__] = zw[idxi]; - vf[i__] = vfw[idxi]; - vl[i__] = vlw[idxi]; - } + if (dlarfx_(interp, "R", &j4, &dlaexc_c__3, u1, &tau1, &t[*j1 * t_dim1 + 1], ldt, &work[ 1])!=TCL_OK) { return TCL_ERROR; } - eps = dlamch_("Epsilon"); - d__1 = abs(*alpha), d__2 = abs(*beta); - tol = max(d__1,d__2); - d__2 = (d__1 = d__[n], abs(d__1)); - tol = eps * 64. * max(d__2,tol); + i__1 = *n - *j1 + 1; + if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u2, &tau2, &t[j2 + *j1 * t_dim1], ldt, & work[1])!=TCL_OK) { return TCL_ERROR; } + if (dlarfx_(interp, "R", &j4, &dlaexc_c__3, u2, &tau2, &t[j2 * t_dim1 + 1], ldt, &work[1])!=TCL_OK) { return TCL_ERROR; } - *k = 1; - k2 = n + 1; - i__1 = n; - for (j = 2; j <= i__1; ++j) { - if ((d__1 = z__[j], abs(d__1)) <= tol) { + t[j3 + *j1 * t_dim1] = 0.; + t[j3 + j2 * t_dim1] = 0.; + t[j4 + *j1 * t_dim1] = 0.; + t[j4 + j2 * t_dim1] = 0.; + if (*wantq) { - --k2; - idxp[k2] = j; - if (j == n) { - goto L100; - } - } else { - jprev = j; - goto L70; - } - } -L70: - j = jprev; -L80: - ++j; - if (j > n) { - goto L90; - } - if ((d__1 = z__[j], abs(d__1)) <= tol) { + if (dlarfx_(interp, "R", n, &dlaexc_c__3, u1, &tau1, &q[*j1 * q_dim1 + 1], ldq, & work[1])!=TCL_OK) { return TCL_ERROR; } - --k2; - idxp[k2] = j; - } else { + if (dlarfx_(interp, "R", n, &dlaexc_c__3, u2, &tau2, &q[j2 * q_dim1 + 1], ldq, &work[ 1])!=TCL_OK) { return TCL_ERROR; } - if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { + } - *s = z__[jprev]; - *c__ = z__[j]; +L40: + if (*n2 == 2) { - tau = dlapy2_(c__, s); - z__[j] = tau; - z__[jprev] = 0.; - *c__ /= tau; - *s = -(*s) / tau; + if (dlanv2_(interp, &t[*j1 + *j1 * t_dim1], &t[*j1 + j2 * t_dim1], &t[j2 + * j1 * t_dim1], &t[j2 + j2 * t_dim1], &wr1, &wi1, &wr2, & + wi2, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - if (*icompq == 1) { - ++(*givptr); - idxjp = idxq[idx[jprev] + 1]; - idxj = idxq[idx[j] + 1]; - if (idxjp <= nlp1) { - --idxjp; - } - if (idxj <= nlp1) { - --idxj; - } - givcol[*givptr + (givcol_dim1 << 1)] = idxjp; - givcol[*givptr + givcol_dim1] = idxj; - givnum[*givptr + (givnum_dim1 << 1)] = *c__; - givnum[*givptr + givnum_dim1] = *s; - } - if (drot_(interp, &dlasd7_c__1, &vf[jprev], &dlasd7_c__1, &vf[j], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } - if (drot_(interp, &dlasd7_c__1, &vl[jprev], &dlasd7_c__1, &vl[j], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } + i__1 = *n - *j1 - 1; + if (drot_(interp, &i__1, &t[*j1 + (*j1 + 2) * t_dim1], ldt, &t[j2 + (*j1 + 2) * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - --k2; - idxp[k2] = jprev; - jprev = j; - } else { - ++(*k); - zw[*k] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; - jprev = j; - } - } - goto L80; -L90: + i__1 = *j1 - 1; + if (drot_(interp, &i__1, &t[*j1 * t_dim1 + 1], &dlaexc_c__1, &t[j2 * t_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - ++(*k); - zw[*k] = z__[jprev]; - dsigma[*k] = d__[jprev]; - idxp[*k] = jprev; -L100: + if (*wantq) { + if (drot_(interp, n, &q[*j1 * q_dim1 + 1], &dlaexc_c__1, &q[j2 * q_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - dsigma[j] = d__[jp]; - vfw[j] = vf[jp]; - vlw[j] = vl[jp]; - } - if (*icompq == 1) { - i__1 = n; - for (j = 2; j <= i__1; ++j) { - jp = idxp[j]; - perm[j] = idxq[idx[jp] + 1]; - if (perm[j] <= nlp1) { - --perm[j]; } } - } + + if (*n1 == 2) { - i__1 = n - *k; - if (dcopy_(interp, &i__1, &dsigma[*k + 1], &dlasd7_c__1, &d__[*k + 1], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } + j3 = *j1 + *n2; + j4 = j3 + 1; + if (dlanv2_(interp, &t[j3 + j3 * t_dim1], &t[j3 + j4 * t_dim1], &t[j4 + j3 * t_dim1], &t[j4 + j4 * t_dim1], &wr1, &wi1, &wr2, &wi2, & + cs, &sn)!=TCL_OK) { return TCL_ERROR; } + if (j3 + 2 <= *n) { + i__1 = *n - j3 - 1; + if (drot_(interp, &i__1, &t[j3 + (j3 + 2) * t_dim1], ldt, &t[j4 + (j3 + 2) * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - dsigma[1] = 0.; - hlftol = tol / 2.; - if (abs(dsigma[2]) <= hlftol) { - dsigma[2] = hlftol; - } - if (m > n) { - z__[1] = dlapy2_(&z1, &z__[m]); - if (z__[1] <= tol) { - *c__ = 1.; - *s = 0.; - z__[1] = tol; - } else { - *c__ = z1 / z__[1]; - *s = -z__[m] / z__[1]; - } - if (drot_(interp, &dlasd7_c__1, &vf[m], &dlasd7_c__1, &vf[1], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } - if (drot_(interp, &dlasd7_c__1, &vl[m], &dlasd7_c__1, &vl[1], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } + } + i__1 = j3 - 1; + if (drot_(interp, &i__1, &t[j3 * t_dim1 + 1], &dlaexc_c__1, &t[j4 * t_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - } else { - if (abs(z1) <= tol) { - z__[1] = tol; - } else { - z__[1] = z1; - } - } + if (*wantq) { + if (drot_(interp, n, &q[j3 * q_dim1 + 1], &dlaexc_c__1, &q[j4 * q_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - i__1 = *k - 1; - if (dcopy_(interp, &i__1, &zw[2], &dlasd7_c__1, &z__[2], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } - i__1 = n - 1; - if (dcopy_(interp, &i__1, &vfw[2], &dlasd7_c__1, &vf[2], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } + } + } - i__1 = n - 1; - if (dcopy_(interp, &i__1, &vlw[2], &dlasd7_c__1, &vl[2], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } + } +return TCL_OK; +L50: + *info = 1; return TCL_OK; -} /* dlasd7_ */ -static /* Subroutine */ int dlasd8_ (Tcl_Interp *interp, integer *icompq, integer *k, doublereal *d__, doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl, doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal * work, integer *info) +} /* dlaexc_ */ +static /* Subroutine */ int dlasy2_ (Tcl_Interp *interp, logical *ltranl, logical *ltranr, integer *isgn, integer *n1, integer *n2, doublereal *tl, integer *ldtl, doublereal * tr, integer *ldtr, doublereal *b, integer *ldb, doublereal *scale, doublereal *x, integer *ldx, doublereal *xnorm, integer *info) { - integer difr_dim1, difr_offset, i__1, i__2; - doublereal d__1, d__2; - double sqrt(doublereal), d_sign(doublereal *, doublereal *); + static integer locu12[4] = { 3,4,1,2 }; + static integer locl21[4] = { 2,1,4,3 }; + static integer locu22[4] = { 4,3,2,1 }; + static logical xswpiv[4] = { FALSE_,FALSE_,TRUE_,TRUE_ }; + static logical bswpiv[4] = { FALSE_,TRUE_,FALSE_,TRUE_ }; - integer i__, j; - doublereal dj, rho; - integer iwk1, iwk2, iwk3; + integer b_dim1, b_offset, tl_dim1, tl_offset, tr_dim1, tr_offset, x_dim1, + x_offset; + doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8; + + integer i__, j, k; + doublereal x2[2], l21, u11, u12; + integer ip, jp; + doublereal u22, t16[16] /* was [4][4] */, gam, bet, eps, sgn, tmp[4], + tau1, btmp[4], smin; + integer ipiv; doublereal temp; - integer iwk2i, iwk3i; - doublereal diflj, difrj, dsigj; - doublereal dsigjp; + integer jpiv[4]; + doublereal xmax; + integer ipsv, jpsv; + logical bswap; + logical xswap; + doublereal smlnum; @@ -59067,155 +61390,399 @@ static /* Subroutine */ int dlasd8_ (Tcl_Interp *interp, integer *icompq, intege - --d__; - --z__; - --vf; - --vl; - --difl; - difr_dim1 = *lddifr; - difr_offset = 1 + difr_dim1; - difr -= difr_offset; - --dsigma; - --work; + tl_dim1 = *ldtl; + tl_offset = 1 + tl_dim1; + tl -= tl_offset; + tr_dim1 = *ldtr; + tr_offset = 1 + tr_dim1; + tr -= tr_offset; + b_dim1 = *ldb; + b_offset = 1 + b_dim1; + b -= b_offset; + x_dim1 = *ldx; + x_offset = 1 + x_dim1; + x -= x_offset; + + *info = 0; - if (*icompq < 0 || *icompq > 1) { - *info = -1; - } else if (*k < 1) { - *info = -2; - } else if (*lddifr < *k) { - *info = -9; - } - if (*info != 0) { - i__1 = -(*info); - vectcl_xerbla(interp, "DLASD8", &i__1); -return TCL_ERROR; + if (*n1 == 0 || *n2 == 0) { return TCL_OK; } - if (*k == 1) { - d__[1] = abs(z__[1]); - difl[1] = d__[1]; - if (*icompq == 1) { - difl[2] = 1.; - difr[(difr_dim1 << 1) + 1] = 1.; - } -return TCL_OK; + eps = dlamch_("P"); + smlnum = dlamch_("S") / eps; + sgn = (doublereal) (*isgn); + + k = *n1 + *n1 + *n2 - 2; + switch (k) { + case 1: goto L10; + case 2: goto L20; + case 3: goto L30; + case 4: goto L50; } - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; +L10: + tau1 = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; + bet = abs(tau1); + if (bet <= smlnum) { + tau1 = smlnum; + bet = smlnum; + *info = 1; } + *scale = 1.; + gam = (d__1 = b[b_dim1 + 1], abs(d__1)); + if (smlnum * gam > bet) { + *scale = 1. / gam; + } - iwk1 = 1; - iwk2 = iwk1 + *k; - iwk3 = iwk2 + *k; - iwk2i = iwk2 - 1; - iwk3i = iwk3 - 1; - + x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / tau1; + *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)); +return TCL_OK; - rho = dnrm2_(k, &z__[1], &dlasd8_c__1); - if (dlascl_(interp, "G", &dlasd8_c__0, &dlasd8_c__0, &rho, &dlasd8_c_b8, k, &dlasd8_c__1, &z__[1], k, info)!=TCL_OK) { return TCL_ERROR; } - rho *= rho; +L20: + d__7 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__8 = (d__2 = tr[tr_dim1 + 1] + , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tr[(tr_dim1 << + 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tr[ + tr_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = + tr[(tr_dim1 << 1) + 2], abs(d__5)); + d__6 = eps * max(d__7,d__8); + smin = max(d__6,smlnum); + tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; + tmp[3] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; + if (*ltranr) { + tmp[1] = sgn * tr[tr_dim1 + 2]; + tmp[2] = sgn * tr[(tr_dim1 << 1) + 1]; + } else { + tmp[1] = sgn * tr[(tr_dim1 << 1) + 1]; + tmp[2] = sgn * tr[tr_dim1 + 2]; + } + btmp[0] = b[b_dim1 + 1]; + btmp[1] = b[(b_dim1 << 1) + 1]; + goto L40; - if (dlaset_(interp, "A", k, &dlasd8_c__1, &dlasd8_c_b8, &dlasd8_c_b8, &work[iwk3], k)!=TCL_OK) { return TCL_ERROR; } +L30: + d__7 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__8 = (d__2 = tl[tl_dim1 + 1] + , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tl[(tl_dim1 << + 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tl[ + tl_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = + tl[(tl_dim1 << 1) + 2], abs(d__5)); + d__6 = eps * max(d__7,d__8); + smin = max(d__6,smlnum); + tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; + tmp[3] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; + if (*ltranl) { + tmp[1] = tl[(tl_dim1 << 1) + 1]; + tmp[2] = tl[tl_dim1 + 2]; + } else { + tmp[1] = tl[tl_dim1 + 2]; + tmp[2] = tl[(tl_dim1 << 1) + 1]; + } + btmp[0] = b[b_dim1 + 1]; + btmp[1] = b[b_dim1 + 2]; +L40: - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - if (dlasd4_(interp, k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[ iwk2], info)!=TCL_OK) { return TCL_ERROR; } + ipiv = idamax_(&dlasy2_c__4, tmp, &dlasy2_c__1); + u11 = tmp[ipiv - 1]; + if (abs(u11) <= smin) { + *info = 1; + u11 = smin; + } + u12 = tmp[locu12[ipiv - 1] - 1]; + l21 = tmp[locl21[ipiv - 1] - 1] / u11; + u22 = tmp[locu22[ipiv - 1] - 1] - u12 * l21; + xswap = xswpiv[ipiv - 1]; + bswap = bswpiv[ipiv - 1]; + if (abs(u22) <= smin) { + *info = 1; + u22 = smin; + } + if (bswap) { + temp = btmp[1]; + btmp[1] = btmp[0] - l21 * temp; + btmp[0] = temp; + } else { + btmp[1] -= l21 * btmp[0]; + } + *scale = 1.; + if (smlnum * 2. * abs(btmp[1]) > abs(u22) || smlnum * 2. * abs(btmp[0]) > + abs(u11)) { + d__1 = abs(btmp[0]), d__2 = abs(btmp[1]); + *scale = .5 / max(d__1,d__2); + btmp[0] *= *scale; + btmp[1] *= *scale; + } + x2[1] = btmp[1] / u22; + x2[0] = btmp[0] / u11 - u12 / u11 * x2[1]; + if (xswap) { + temp = x2[1]; + x2[1] = x2[0]; + x2[0] = temp; + } + x[x_dim1 + 1] = x2[0]; + if (*n1 == 1) { + x[(x_dim1 << 1) + 1] = x2[1]; + *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 << 1) + + 1], abs(d__2)); + } else { + x[x_dim1 + 2] = x2[1]; + d__3 = (d__1 = x[x_dim1 + 1], abs(d__1)), d__4 = (d__2 = x[x_dim1 + 2] + , abs(d__2)); + *xnorm = max(d__3,d__4); + } +return TCL_OK; +L50: + d__5 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__6 = (d__2 = tr[(tr_dim1 << + 1) + 1], abs(d__2)), d__5 = max(d__5,d__6), d__6 = (d__3 = tr[ + tr_dim1 + 2], abs(d__3)), d__5 = max(d__5,d__6), d__6 = (d__4 = + tr[(tr_dim1 << 1) + 2], abs(d__4)); + smin = max(d__5,d__6); + d__5 = smin, d__6 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__5 = max(d__5, + d__6), d__6 = (d__2 = tl[(tl_dim1 << 1) + 1], abs(d__2)), d__5 = + max(d__5,d__6), d__6 = (d__3 = tl[tl_dim1 + 2], abs(d__3)), d__5 = + max(d__5,d__6), d__6 = (d__4 = tl[(tl_dim1 << 1) + 2], abs(d__4)) + ; + smin = max(d__5,d__6); + d__1 = eps * smin; + smin = max(d__1,smlnum); + btmp[0] = 0.; + if (dcopy_(interp, &dlasy2_c__16, btmp, &dlasy2_c__0, t16, &dlasy2_c__1)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { -return TCL_OK; - } - work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j]; - difl[j] = -work[j]; - difr[j + difr_dim1] = -work[j + 1]; - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + - i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ - j]); - } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + - i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ - j]); - } + t16[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; + t16[5] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; + t16[10] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; + t16[15] = tl[(tl_dim1 << 1) + 2] + sgn * tr[(tr_dim1 << 1) + 2]; + if (*ltranl) { + t16[4] = tl[tl_dim1 + 2]; + t16[1] = tl[(tl_dim1 << 1) + 1]; + t16[14] = tl[tl_dim1 + 2]; + t16[11] = tl[(tl_dim1 << 1) + 1]; + } else { + t16[4] = tl[(tl_dim1 << 1) + 1]; + t16[1] = tl[tl_dim1 + 2]; + t16[14] = tl[(tl_dim1 << 1) + 1]; + t16[11] = tl[tl_dim1 + 2]; + } + if (*ltranr) { + t16[8] = sgn * tr[(tr_dim1 << 1) + 1]; + t16[13] = sgn * tr[(tr_dim1 << 1) + 1]; + t16[2] = sgn * tr[tr_dim1 + 2]; + t16[7] = sgn * tr[tr_dim1 + 2]; + } else { + t16[8] = sgn * tr[tr_dim1 + 2]; + t16[13] = sgn * tr[tr_dim1 + 2]; + t16[2] = sgn * tr[(tr_dim1 << 1) + 1]; + t16[7] = sgn * tr[(tr_dim1 << 1) + 1]; } + btmp[0] = b[b_dim1 + 1]; + btmp[1] = b[b_dim1 + 2]; + btmp[2] = b[(b_dim1 << 1) + 1]; + btmp[3] = b[(b_dim1 << 1) + 2]; - i__1 = *k; - for (i__ = 1; i__ <= i__1; ++i__) { - d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1))); - z__[i__] = d_sign(&d__2, &z__[i__]); - } + for (i__ = 1; i__ <= 3; ++i__) { + xmax = 0.; + for (ip = i__; ip <= 4; ++ip) { + for (jp = i__; jp <= 4; ++jp) { + if ((d__1 = t16[ip + (jp << 2) - 5], abs(d__1)) >= xmax) { + xmax = (d__1 = t16[ip + (jp << 2) - 5], abs(d__1)); + ipsv = ip; + jpsv = jp; + } + } + } + if (ipsv != i__) { + if (dswap_(interp, &dlasy2_c__4, &t16[ipsv - 1], &dlasy2_c__4, &t16[i__ - 1], &dlasy2_c__4)!=TCL_OK) { return TCL_ERROR; } + + temp = btmp[i__ - 1]; + btmp[i__ - 1] = btmp[ipsv - 1]; + btmp[ipsv - 1] = temp; + } + if (jpsv != i__) { + if (dswap_(interp, &dlasy2_c__4, &t16[(jpsv << 2) - 4], &dlasy2_c__1, &t16[(i__ << 2) - 4], &dlasy2_c__1)!=TCL_OK) { return TCL_ERROR; } - i__1 = *k; - for (j = 1; j <= i__1; ++j) { - diflj = difl[j]; - dj = d__[j]; - dsigj = -dsigma[j]; - if (j < *k) { - difrj = -difr[j + difr_dim1]; - dsigjp = -dsigma[j + 1]; } - work[j] = -z__[j] / diflj / (dsigma[j] + dj); - i__2 = j - 1; - for (i__ = 1; i__ <= i__2; ++i__) { - work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / ( - dsigma[i__] + dj); + jpiv[i__ - 1] = jpsv; + if ((d__1 = t16[i__ + (i__ << 2) - 5], abs(d__1)) < smin) { + *info = 1; + t16[i__ + (i__ << 2) - 5] = smin; } - i__2 = *k; - for (i__ = j + 1; i__ <= i__2; ++i__) { - work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) / - (dsigma[i__] + dj); + for (j = i__ + 1; j <= 4; ++j) { + t16[j + (i__ << 2) - 5] /= t16[i__ + (i__ << 2) - 5]; + btmp[j - 1] -= t16[j + (i__ << 2) - 5] * btmp[i__ - 1]; + for (k = i__ + 1; k <= 4; ++k) { + t16[j + (k << 2) - 5] -= t16[j + (i__ << 2) - 5] * t16[i__ + ( + k << 2) - 5]; + } } - temp = dnrm2_(k, &work[1], &dlasd8_c__1); - work[iwk2i + j] = ddot_(k, &work[1], &dlasd8_c__1, &vf[1], &dlasd8_c__1) / temp; - work[iwk3i + j] = ddot_(k, &work[1], &dlasd8_c__1, &vl[1], &dlasd8_c__1) / temp; - if (*icompq == 1) { - difr[j + (difr_dim1 << 1)] = temp; + } + if (abs(t16[15]) < smin) { + t16[15] = smin; + } + *scale = 1.; + if (smlnum * 8. * abs(btmp[0]) > abs(t16[0]) || smlnum * 8. * abs(btmp[1]) + > abs(t16[5]) || smlnum * 8. * abs(btmp[2]) > abs(t16[10]) || + smlnum * 8. * abs(btmp[3]) > abs(t16[15])) { + d__1 = abs(btmp[0]), d__2 = abs(btmp[1]), d__1 = max(d__1,d__2), d__2 + = abs(btmp[2]), d__1 = max(d__1,d__2), d__2 = abs(btmp[3]); + *scale = .125 / max(d__1,d__2); + btmp[0] *= *scale; + btmp[1] *= *scale; + btmp[2] *= *scale; + btmp[3] *= *scale; + } + for (i__ = 1; i__ <= 4; ++i__) { + k = 5 - i__; + temp = 1. / t16[k + (k << 2) - 5]; + tmp[k - 1] = btmp[k - 1] * temp; + for (j = k + 1; j <= 4; ++j) { + tmp[k - 1] -= temp * t16[k + (j << 2) - 5] * tmp[j - 1]; + } + } + for (i__ = 1; i__ <= 3; ++i__) { + if (jpiv[4 - i__ - 1] != 4 - i__) { + temp = tmp[4 - i__ - 1]; + tmp[4 - i__ - 1] = tmp[jpiv[4 - i__ - 1] - 1]; + tmp[jpiv[4 - i__ - 1] - 1] = temp; } } + x[x_dim1 + 1] = tmp[0]; + x[x_dim1 + 2] = tmp[1]; + x[(x_dim1 << 1) + 1] = tmp[2]; + x[(x_dim1 << 1) + 2] = tmp[3]; + d__1 = abs(tmp[0]) + abs(tmp[2]), d__2 = abs(tmp[1]) + abs(tmp[3]); + *xnorm = max(d__1,d__2); +return TCL_OK; - if (dcopy_(interp, k, &work[iwk2], &dlasd8_c__1, &vf[1], &dlasd8_c__1)!=TCL_OK) { return TCL_ERROR; } - if (dcopy_(interp, k, &work[iwk3], &dlasd8_c__1, &vl[1], &dlasd8_c__1)!=TCL_OK) { return TCL_ERROR; } +} /* dlasy2_ */ +static /* Subroutine */ int zrot_ (Tcl_Interp *interp, integer *n, doublecomplex *cx, integer *incx, doublecomplex *cy, integer *incy, doublereal *c__, doublecomplex *s) +{ + integer i__1, i__2, i__3, i__4; + doublecomplex z__1, z__2, z__3, z__4; + + void d_cnjg(doublecomplex *, doublecomplex *); + + integer i__, ix, iy; + doublecomplex stemp; + + + + + + + + + + + + + + + + --cy; + --cx; + + if (*n <= 0) { +return TCL_OK; + } + if (*incx == 1 && *incy == 1) { + goto L20; + } + ix = 1; + iy = 1; + if (*incx < 0) { + ix = (-(*n) + 1) * *incx + 1; + } + if (*incy < 0) { + iy = (-(*n) + 1) * *incy + 1; + } + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = ix; + z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i; + i__3 = iy; + z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[ + i__3].i + s->i * cy[i__3].r; + z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; + stemp.r = z__1.r, stemp.i = z__1.i; + i__2 = iy; + i__3 = iy; + z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i; + d_cnjg(&z__4, s); + i__4 = ix; + z__3.r = z__4.r * cx[i__4].r - z__4.i * cx[i__4].i, z__3.i = z__4.r * + cx[i__4].i + z__4.i * cx[i__4].r; + z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; + cy[i__2].r = z__1.r, cy[i__2].i = z__1.i; + i__2 = ix; + cx[i__2].r = stemp.r, cx[i__2].i = stemp.i; + ix += *incx; + iy += *incy; + } return TCL_OK; -} /* dlasd8_ */ -static /* Subroutine */ int zgerc_ (Tcl_Interp *interp, integer *m, integer *n, doublecomplex *alpha, doublecomplex *x, integer *incx, doublecomplex *y, integer *incy, doublecomplex *a, integer *lda) +L20: + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i; + i__3 = i__; + z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[ + i__3].i + s->i * cy[i__3].r; + z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; + stemp.r = z__1.r, stemp.i = z__1.i; + i__2 = i__; + i__3 = i__; + z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i; + d_cnjg(&z__4, s); + i__4 = i__; + z__3.r = z__4.r * cx[i__4].r - z__4.i * cx[i__4].i, z__3.i = z__4.r * + cx[i__4].i + z__4.i * cx[i__4].r; + z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; + cy[i__2].r = z__1.r, cy[i__2].i = z__1.i; + i__2 = i__; + cx[i__2].r = stemp.r, cx[i__2].i = stemp.i; + } +return TCL_OK; +} /* zrot_ */ +static /* Subroutine */ int zlartg_ (Tcl_Interp *interp, doublecomplex *f, doublecomplex *g, doublereal * cs, doublecomplex *sn, doublecomplex *r__) { - integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; - doublecomplex z__1, z__2; + integer i__1; + doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10; + doublecomplex z__1, z__2, z__3; + double log(doublereal), pow_di(doublereal *, integer *), d_imag( + doublecomplex *), sqrt(doublereal); void d_cnjg(doublecomplex *, doublecomplex *); - integer i__, j, ix, jy, kx, info; - doublecomplex temp; - - - - - + doublereal d__; + integer i__; + doublereal f2, g2; + doublecomplex ff; + doublereal di, dr; + doublecomplex fs, gs; + doublereal f2s, g2s, eps, scale; + integer count; + doublereal safmn2; + doublereal safmx2; + doublereal safmin; @@ -59233,107 +61800,163 @@ static /* Subroutine */ int zgerc_ (Tcl_Interp *interp, integer *m, integer *n, - --x; - --y; - a_dim1 = *lda; - a_offset = 1 + a_dim1; - a -= a_offset; - info = 0; - if (*m < 0) { - info = 1; - } else if (*n < 0) { - info = 2; - } else if (*incx == 0) { - info = 5; - } else if (*incy == 0) { - info = 7; - } else if (*lda < max(1,*m)) { - info = 9; - } - if (info != 0) { - vectcl_xerbla(interp, "ZGERC ", &info); -return TCL_ERROR; + safmin = dlamch_("S"); + eps = dlamch_("E"); + d__1 = dlamch_("B"); + i__1 = (integer) (log(safmin / eps) / log(dlamch_("B")) / 2.); + safmn2 = pow_di(&d__1, &i__1); + safmx2 = 1. / safmn2; + d__7 = (d__1 = f->r, abs(d__1)), d__8 = (d__2 = d_imag(f), abs(d__2)); + d__9 = (d__3 = g->r, abs(d__3)), d__10 = (d__4 = d_imag(g), abs(d__4)); + d__5 = max(d__7,d__8), d__6 = max(d__9,d__10); + scale = max(d__5,d__6); + fs.r = f->r, fs.i = f->i; + gs.r = g->r, gs.i = g->i; + count = 0; + if (scale >= safmx2) { +L10: + ++count; + z__1.r = safmn2 * fs.r, z__1.i = safmn2 * fs.i; + fs.r = z__1.r, fs.i = z__1.i; + z__1.r = safmn2 * gs.r, z__1.i = safmn2 * gs.i; + gs.r = z__1.r, gs.i = z__1.i; + scale *= safmn2; + if (scale >= safmx2) { + goto L10; + } + } else if (scale <= safmn2) { + if (g->r == 0. && g->i == 0.) { + *cs = 1.; + sn->r = 0., sn->i = 0.; + r__->r = f->r, r__->i = f->i; return TCL_OK; + } +L20: + --count; + z__1.r = safmx2 * fs.r, z__1.i = safmx2 * fs.i; + fs.r = z__1.r, fs.i = z__1.i; + z__1.r = safmx2 * gs.r, z__1.i = safmx2 * gs.i; + gs.r = z__1.r, gs.i = z__1.i; + scale *= safmx2; + if (scale <= safmn2) { + goto L20; + } } + d__1 = fs.r; + d__2 = d_imag(&fs); + f2 = d__1 * d__1 + d__2 * d__2; + d__1 = gs.r; + d__2 = d_imag(&gs); + g2 = d__1 * d__1 + d__2 * d__2; + if (f2 <= max(g2,1.) * safmin) { - if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0.) { + if (f->r == 0. && f->i == 0.) { + *cs = 0.; + d__2 = g->r; + d__3 = d_imag(g); + d__1 = dlapy2_(&d__2, &d__3); + r__->r = d__1, r__->i = 0.; + d__1 = gs.r; + d__2 = d_imag(&gs); + d__ = dlapy2_(&d__1, &d__2); + d__1 = gs.r / d__; + d__2 = -d_imag(&gs) / d__; + z__1.r = d__1, z__1.i = d__2; + sn->r = z__1.r, sn->i = z__1.i; return TCL_OK; - } - - - if (*incy > 0) { - jy = 1; - } else { - jy = 1 - (*n - 1) * *incy; - } - if (*incx == 1) { - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jy; - if (y[i__2].r != 0. || y[i__2].i != 0.) { - d_cnjg(&z__2, &y[jy]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = i__; - z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = - x[i__5].r * temp.i + x[i__5].i * temp.r; - z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - } - } - jy += *incy; } - } else { - if (*incx > 0) { - kx = 1; + d__1 = fs.r; + d__2 = d_imag(&fs); + f2s = dlapy2_(&d__1, &d__2); + g2s = sqrt(g2); + *cs = f2s / g2s; + d__3 = (d__1 = f->r, abs(d__1)), d__4 = (d__2 = d_imag(f), abs(d__2)); + if (max(d__3,d__4) > 1.) { + d__1 = f->r; + d__2 = d_imag(f); + d__ = dlapy2_(&d__1, &d__2); + d__1 = f->r / d__; + d__2 = d_imag(f) / d__; + z__1.r = d__1, z__1.i = d__2; + ff.r = z__1.r, ff.i = z__1.i; } else { - kx = 1 - (*m - 1) * *incx; + dr = safmx2 * f->r; + di = safmx2 * d_imag(f); + d__ = dlapy2_(&dr, &di); + d__1 = dr / d__; + d__2 = di / d__; + z__1.r = d__1, z__1.i = d__2; + ff.r = z__1.r, ff.i = z__1.i; } - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - i__2 = jy; - if (y[i__2].r != 0. || y[i__2].i != 0.) { - d_cnjg(&z__2, &y[jy]); - z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = - alpha->r * z__2.i + alpha->i * z__2.r; - temp.r = z__1.r, temp.i = z__1.i; - ix = kx; - i__2 = *m; - for (i__ = 1; i__ <= i__2; ++i__) { - i__3 = i__ + j * a_dim1; - i__4 = i__ + j * a_dim1; - i__5 = ix; - z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = - x[i__5].r * temp.i + x[i__5].i * temp.r; - z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; - a[i__3].r = z__1.r, a[i__3].i = z__1.i; - ix += *incx; + d__1 = gs.r / g2s; + d__2 = -d_imag(&gs) / g2s; + z__2.r = d__1, z__2.i = d__2; + z__1.r = ff.r * z__2.r - ff.i * z__2.i, z__1.i = ff.r * z__2.i + ff.i + * z__2.r; + sn->r = z__1.r, sn->i = z__1.i; + z__2.r = *cs * f->r, z__2.i = *cs * f->i; + z__3.r = sn->r * g->r - sn->i * g->i, z__3.i = sn->r * g->i + sn->i * + g->r; + z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; + r__->r = z__1.r, r__->i = z__1.i; + } else { + + + f2s = sqrt(g2 / f2 + 1.); + d__1 = f2s * fs.r; + d__2 = f2s * d_imag(&fs); + z__1.r = d__1, z__1.i = d__2; + r__->r = z__1.r, r__->i = z__1.i; + *cs = 1. / f2s; + d__ = f2 + g2; + d__1 = r__->r / d__; + d__2 = d_imag(r__) / d__; + z__1.r = d__1, z__1.i = d__2; + sn->r = z__1.r, sn->i = z__1.i; + d_cnjg(&z__2, &gs); + z__1.r = sn->r * z__2.r - sn->i * z__2.i, z__1.i = sn->r * z__2.i + + sn->i * z__2.r; + sn->r = z__1.r, sn->i = z__1.i; + if (count != 0) { + if (count > 0) { + i__1 = count; + for (i__ = 1; i__ <= i__1; ++i__) { + z__1.r = safmx2 * r__->r, z__1.i = safmx2 * r__->i; + r__->r = z__1.r, r__->i = z__1.i; + } + } else { + i__1 = -count; + for (i__ = 1; i__ <= i__1; ++i__) { + z__1.r = safmn2 * r__->r, z__1.i = safmn2 * r__->i; + r__->r = z__1.r, r__->i = z__1.i; } } - jy += *incy; } } - return TCL_OK; -} /* zgerc_ */ -static doublereal dlapy3_ (doublereal *x, doublereal *y, doublereal *z__) +} /* zlartg_ */ +static /* Subroutine */ int dlasd2_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal * beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2, integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer * idxq, integer *coltyp, integer *info) { - doublereal ret_val, d__1, d__2, d__3; - - double sqrt(doublereal); - - doublereal w, xabs, yabs, zabs; + integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset, + vt2_dim1, vt2_offset, i__1; + doublereal d__1, d__2; + doublereal c__; + integer i__, j, m, n; + doublereal s; + integer k2; + doublereal z1; + integer ct, jp; + doublereal eps, tau, tol; + integer psm[4], nlp1, nlp2, idxi, idxj; + integer ctot[4], idxjp; + integer jprev; + doublereal hlftol; @@ -59343,35 +61966,10 @@ static doublereal dlapy3_ (doublereal *x, doublereal *y, doublereal *z__) - xabs = abs(*x); - yabs = abs(*y); - zabs = abs(*z__); - d__1 = max(xabs,yabs); - w = max(d__1,zabs); - if (w == 0.) { - ret_val = xabs + yabs + zabs; - } else { - d__1 = xabs / w; - d__2 = yabs / w; - d__3 = zabs / w; - ret_val = w * sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3); - } - return ret_val; -} /* dlapy3_ */ -static /* Subroutine */ int dlasq3_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig, doublereal *qmax, integer *nfail, integer *iter, integer *ndiv, logical *ieee, integer *ttype, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *g, doublereal *tau) -{ - integer i__1; - doublereal d__1, d__2; - double sqrt(doublereal); - doublereal s, t; - integer j4, nn; - doublereal eps, tol; - integer n0in, ipn4; - doublereal tol2, temp; @@ -59397,225 +61995,340 @@ static /* Subroutine */ int dlasq3_ (Tcl_Interp *interp, integer *i0, integer *n + --d__; --z__; + u_dim1 = *ldu; + u_offset = 1 + u_dim1; + u -= u_offset; + vt_dim1 = *ldvt; + vt_offset = 1 + vt_dim1; + vt -= vt_offset; + --dsigma; + u2_dim1 = *ldu2; + u2_offset = 1 + u2_dim1; + u2 -= u2_offset; + vt2_dim1 = *ldvt2; + vt2_offset = 1 + vt2_dim1; + vt2 -= vt2_offset; + --idxp; + --idx; + --idxc; + --idxq; + --coltyp; - n0in = *n0; - eps = dlamch_("Precision"); - tol = eps * 100.; - d__1 = tol; - tol2 = d__1 * d__1; + *info = 0; + if (*nl < 1) { + *info = -1; + } else if (*nr < 1) { + *info = -2; + } else if (*sqre != 1 && *sqre != 0) { + *info = -3; + } -L10: + n = *nl + *nr + 1; + m = n + *sqre; - if (*n0 < *i0) { -return TCL_OK; - } - if (*n0 == *i0) { - goto L20; + if (*ldu < n) { + *info = -10; + } else if (*ldvt < m) { + *info = -12; + } else if (*ldu2 < n) { + *info = -15; + } else if (*ldvt2 < m) { + *info = -17; } - nn = (*n0 << 2) + *pp; - if (*n0 == *i0 + 1) { - goto L40; + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "DLASD2", &i__1); +return TCL_ERROR; + +return TCL_OK; } + nlp1 = *nl + 1; + nlp2 = *nl + 2; - if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - (*pp << 1) - - 4] > tol2 * z__[nn - 7]) { - goto L30; - } -L20: + z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1]; + z__[1] = z1; + for (i__ = *nl; i__ >= 1; --i__) { + z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1]; + d__[i__ + 1] = d__[i__]; + idxq[i__ + 1] = idxq[i__] + 1; + } - z__[(*n0 << 2) - 3] = z__[(*n0 << 2) + *pp - 3] + *sigma; - --(*n0); - goto L10; + i__1 = m; + for (i__ = nlp2; i__ <= i__1; ++i__) { + z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1]; + } -L30: - if (z__[nn - 9] > tol2 * *sigma && z__[nn - (*pp << 1) - 8] > tol2 * z__[ - nn - 11]) { - goto L50; + i__1 = nlp1; + for (i__ = 2; i__ <= i__1; ++i__) { + coltyp[i__] = 1; + } + i__1 = n; + for (i__ = nlp2; i__ <= i__1; ++i__) { + coltyp[i__] = 2; } -L40: - if (z__[nn - 3] > z__[nn - 7]) { - s = z__[nn - 3]; - z__[nn - 3] = z__[nn - 7]; - z__[nn - 7] = s; + i__1 = n; + for (i__ = nlp2; i__ <= i__1; ++i__) { + idxq[i__] += nlp1; } - if (z__[nn - 5] > z__[nn - 3] * tol2) { - t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5; - s = z__[nn - 3] * (z__[nn - 5] / t); - if (s <= t) { - s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.))); - } else { - s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s))); - } - t = z__[nn - 7] + (s + z__[nn - 5]); - z__[nn - 3] *= z__[nn - 7] / t; - z__[nn - 7] = t; + + + i__1 = n; + for (i__ = 2; i__ <= i__1; ++i__) { + dsigma[i__] = d__[idxq[i__]]; + u2[i__ + u2_dim1] = z__[idxq[i__]]; + idxc[i__] = coltyp[idxq[i__]]; } - z__[(*n0 << 2) - 7] = z__[nn - 7] + *sigma; - z__[(*n0 << 2) - 3] = z__[nn - 3] + *sigma; - *n0 += -2; - goto L10; -L50: - if (*pp == 2) { - *pp = 0; + if (dlamrg_(interp, nl, nr, &dsigma[2], &dlasd2_c__1, &dlasd2_c__1, &idx[2])!=TCL_OK) { return TCL_ERROR; } + + + i__1 = n; + for (i__ = 2; i__ <= i__1; ++i__) { + idxi = idx[i__] + 1; + d__[i__] = dsigma[idxi]; + z__[i__] = u2[idxi + u2_dim1]; + coltyp[i__] = idxc[idxi]; } - if (*dmin__ <= 0. || *n0 < n0in) { - if (z__[(*i0 << 2) + *pp - 3] * 1.5 < z__[(*n0 << 2) + *pp - 3]) { - ipn4 = *i0 + *n0 << 2; - i__1 = *i0 + *n0 - 1 << 1; - for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { - temp = z__[j4 - 3]; - z__[j4 - 3] = z__[ipn4 - j4 - 3]; - z__[ipn4 - j4 - 3] = temp; - temp = z__[j4 - 2]; - z__[j4 - 2] = z__[ipn4 - j4 - 2]; - z__[ipn4 - j4 - 2] = temp; - temp = z__[j4 - 1]; - z__[j4 - 1] = z__[ipn4 - j4 - 5]; - z__[ipn4 - j4 - 5] = temp; - temp = z__[j4]; - z__[j4] = z__[ipn4 - j4 - 4]; - z__[ipn4 - j4 - 4] = temp; - } - if (*n0 - *i0 <= 4) { - z__[(*n0 << 2) + *pp - 1] = z__[(*i0 << 2) + *pp - 1]; - z__[(*n0 << 2) - *pp] = z__[(*i0 << 2) - *pp]; + eps = dlamch_("Epsilon"); + d__1 = abs(*alpha), d__2 = abs(*beta); + tol = max(d__1,d__2); + d__2 = (d__1 = d__[n], abs(d__1)); + tol = eps * 8. * max(d__2,tol); + + + + + + *k = 1; + k2 = n + 1; + i__1 = n; + for (j = 2; j <= i__1; ++j) { + if ((d__1 = z__[j], abs(d__1)) <= tol) { + + + --k2; + idxp[k2] = j; + coltyp[j] = 4; + if (j == n) { + goto L120; } - d__1 = *dmin2, d__2 = z__[(*n0 << 2) + *pp - 1]; - *dmin2 = min(d__1,d__2); - d__1 = z__[(*n0 << 2) + *pp - 1], d__2 = z__[(*i0 << 2) + *pp - 1] - , d__1 = min(d__1,d__2), d__2 = z__[(*i0 << 2) + *pp + 3]; - z__[(*n0 << 2) + *pp - 1] = min(d__1,d__2); - d__1 = z__[(*n0 << 2) - *pp], d__2 = z__[(*i0 << 2) - *pp], d__1 = - min(d__1,d__2), d__2 = z__[(*i0 << 2) - *pp + 4]; - z__[(*n0 << 2) - *pp] = min(d__1,d__2); - d__1 = *qmax, d__2 = z__[(*i0 << 2) + *pp - 3], d__1 = max(d__1, - d__2), d__2 = z__[(*i0 << 2) + *pp + 1]; - *qmax = max(d__1,d__2); - *dmin__ = -0.; + } else { + jprev = j; + goto L90; } } +L90: + j = jprev; +L100: + ++j; + if (j > n) { + goto L110; + } + if ((d__1 = z__[j], abs(d__1)) <= tol) { - if (dlasq4_(interp, i0, n0, &z__[1], pp, &n0in, dmin__, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g)!=TCL_OK) { return TCL_ERROR; } + --k2; + idxp[k2] = j; + coltyp[j] = 4; + } else { + if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { -L70: + s = z__[jprev]; + c__ = z__[j]; + + + tau = dlapy2_(&c__, &s); + c__ /= tau; + s = -s / tau; + z__[j] = tau; + z__[jprev] = 0.; - if (dlasq5_(interp, i0, n0, &z__[1], pp, tau, dmin__, dmin1, dmin2, dn, dn1, dn2, ieee)!=TCL_OK) { return TCL_ERROR; } + idxjp = idxq[idx[jprev] + 1]; + idxj = idxq[idx[j] + 1]; + if (idxjp <= nlp1) { + --idxjp; + } + if (idxj <= nlp1) { + --idxj; + } + if (drot_(interp, &n, &u[idxjp * u_dim1 + 1], &dlasd2_c__1, &u[idxj * u_dim1 + 1], & dlasd2_c__1, &c__, &s)!=TCL_OK) { return TCL_ERROR; } - *ndiv += *n0 - *i0 + 2; - ++(*iter); + if (drot_(interp, &m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, & c__, &s)!=TCL_OK) { return TCL_ERROR; } - if (*dmin__ >= 0. && *dmin1 > 0.) { + if (coltyp[j] != coltyp[jprev]) { + coltyp[j] = 3; + } + coltyp[jprev] = 4; + --k2; + idxp[k2] = jprev; + jprev = j; + } else { + ++(*k); + u2[*k + u2_dim1] = z__[jprev]; + dsigma[*k] = d__[jprev]; + idxp[*k] = jprev; + jprev = j; + } + } + goto L100; +L110: - goto L90; + ++(*k); + u2[*k + u2_dim1] = z__[jprev]; + dsigma[*k] = d__[jprev]; + idxp[*k] = jprev; - } else if (*dmin__ < 0. && *dmin1 > 0. && z__[(*n0 - 1 << 2) - *pp] < tol - * (*sigma + *dn1) && abs(*dn) < tol * *sigma) { +L120: - z__[(*n0 - 1 << 2) - *pp + 2] = 0.; - *dmin__ = 0.; - goto L90; - } else if (*dmin__ < 0.) { + for (j = 1; j <= 4; ++j) { + ctot[j - 1] = 0; + } + i__1 = n; + for (j = 2; j <= i__1; ++j) { + ct = coltyp[j]; + ++ctot[ct - 1]; + } - ++(*nfail); - if (*ttype < -22) { + psm[0] = 2; + psm[1] = ctot[0] + 2; + psm[2] = psm[1] + ctot[1]; + psm[3] = psm[2] + ctot[2]; - *tau = 0.; - } else if (*dmin1 > 0.) { + i__1 = n; + for (j = 2; j <= i__1; ++j) { + jp = idxp[j]; + ct = coltyp[jp]; + idxc[psm[ct - 1]] = j; + ++psm[ct - 1]; + } - *tau = (*tau + *dmin__) * (1. - eps * 2.); - *ttype += -11; - } else { + i__1 = n; + for (j = 2; j <= i__1; ++j) { + jp = idxp[j]; + dsigma[j] = d__[jp]; + idxj = idxq[idx[idxp[idxc[j]]] + 1]; + if (idxj <= nlp1) { + --idxj; + } + if (dcopy_(interp, &n, &u[idxj * u_dim1 + 1], &dlasd2_c__1, &u2[j * u2_dim1 + 1], &dlasd2_c__1)!=TCL_OK) { return TCL_ERROR; } + if (dcopy_(interp, &m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2)!=TCL_OK) { return TCL_ERROR; } - *tau *= .25; - *ttype += -12; - } - goto L70; - } else if (disnan_(dmin__)) { + } - if (*tau == 0.) { - goto L80; + dsigma[1] = 0.; + hlftol = tol / 2.; + if (abs(dsigma[2]) <= hlftol) { + dsigma[2] = hlftol; + } + if (m > n) { + z__[1] = dlapy2_(&z1, &z__[m]); + if (z__[1] <= tol) { + c__ = 1.; + s = 0.; + z__[1] = tol; } else { - *tau = 0.; - goto L70; + c__ = z1 / z__[1]; + s = z__[m] / z__[1]; } } else { + if (abs(z1) <= tol) { + z__[1] = tol; + } else { + z__[1] = z1; + } + } - goto L80; - } + i__1 = *k - 1; + if (dcopy_(interp, &i__1, &u2[u2_dim1 + 2], &dlasd2_c__1, &z__[2], &dlasd2_c__1)!=TCL_OK) { return TCL_ERROR; } -L80: - if (dlasq6_(interp, i0, n0, &z__[1], pp, dmin__, dmin1, dmin2, dn, dn1, dn2)!=TCL_OK) { return TCL_ERROR; } - *ndiv += *n0 - *i0 + 2; - ++(*iter); - *tau = 0.; + if (dlaset_(interp, "A", &n, &dlasd2_c__1, &dlasd2_c_b30, &dlasd2_c_b30, &u2[u2_offset], ldu2)!=TCL_OK) { return TCL_ERROR; } -L90: - if (*tau < *sigma) { - *desig += *tau; - t = *sigma + *desig; - *desig -= t - *sigma; + u2[nlp1 + u2_dim1] = 1.; + if (m > n) { + i__1 = nlp1; + for (i__ = 1; i__ <= i__1; ++i__) { + vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1]; + vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1]; + } + i__1 = m; + for (i__ = nlp2; i__ <= i__1; ++i__) { + vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1]; + vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1]; + } } else { - t = *sigma + *tau; - *desig = *sigma - (t - *tau) + *desig; + if (dcopy_(interp, &m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2)!=TCL_OK) { return TCL_ERROR; } + } - *sigma = t; + if (m > n) { + if (dcopy_(interp, &m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2)!=TCL_OK) { return TCL_ERROR; } -return TCL_OK; + } -} /* dlasq3_ */ -static integer dlaneg_ (integer *n, doublereal *d__, doublereal *lld, doublereal * sigma, doublereal *pivmin, integer *r__) -{ - integer ret_val, i__1, i__2, i__3, i__4; + if (n > *k) { + i__1 = n - *k; + if (dcopy_(interp, &i__1, &dsigma[*k + 1], &dlasd2_c__1, &d__[*k + 1], &dlasd2_c__1)!=TCL_OK) { return TCL_ERROR; } - integer j; - doublereal p, t; - integer bj; - doublereal tmp; - integer neg1, neg2; - doublereal bsav, gamma, dplus; - integer negcnt; - logical sawnan; - doublereal dminus; + i__1 = n - *k; + if (dlacpy_(interp, "A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1) * u_dim1 + 1], ldu)!=TCL_OK) { return TCL_ERROR; } + i__1 = n - *k; + if (dlacpy_(interp, "A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 + vt_dim1], ldvt)!=TCL_OK) { return TCL_ERROR; } + } + for (j = 1; j <= 4; ++j) { + coltyp[j] = ctot[j - 1]; + } +return TCL_OK; +} /* dlasd2_ */ +static /* Subroutine */ int dlasd3_ (Tcl_Interp *interp, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma, doublereal *u, integer *ldu, doublereal *u2, integer *ldu2, doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2, integer *idxc, integer *ctot, doublereal *z__, integer *info) +{ + integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, + vt_offset, vt2_dim1, vt2_offset, i__1, i__2; + doublereal d__1, d__2; + double sqrt(doublereal), d_sign(doublereal *, doublereal *); + integer i__, j, m, n, jc; + doublereal rho; + integer nlp1, nlp2, nrp1; + doublereal temp; + integer ctemp; + integer ktemp; @@ -59624,97 +62337,11 @@ static integer dlaneg_ (integer *n, doublereal *d__, doublereal *lld, doublereal - --lld; - --d__; - negcnt = 0; - t = -(*sigma); - i__1 = *r__ - 1; - for (bj = 1; bj <= i__1; bj += 128) { - neg1 = 0; - bsav = t; - i__3 = bj + 127, i__4 = *r__ - 1; - i__2 = min(i__3,i__4); - for (j = bj; j <= i__2; ++j) { - dplus = d__[j] + t; - if (dplus < 0.) { - ++neg1; - } - tmp = t / dplus; - t = tmp * lld[j] - *sigma; - } - sawnan = disnan_(&t); - if (sawnan) { - neg1 = 0; - t = bsav; - i__3 = bj + 127, i__4 = *r__ - 1; - i__2 = min(i__3,i__4); - for (j = bj; j <= i__2; ++j) { - dplus = d__[j] + t; - if (dplus < 0.) { - ++neg1; - } - tmp = t / dplus; - if (disnan_(&tmp)) { - tmp = 1.; - } - t = tmp * lld[j] - *sigma; - } - } - negcnt += neg1; - } - p = d__[*n] - *sigma; - i__1 = *r__; - for (bj = *n - 1; bj >= i__1; bj += -128) { - neg2 = 0; - bsav = p; - i__3 = bj - 127; - i__2 = max(i__3,*r__); - for (j = bj; j >= i__2; --j) { - dminus = lld[j] + p; - if (dminus < 0.) { - ++neg2; - } - tmp = p / dminus; - p = tmp * d__[j] - *sigma; - } - sawnan = disnan_(&p); - if (sawnan) { - neg2 = 0; - p = bsav; - i__3 = bj - 127; - i__2 = max(i__3,*r__); - for (j = bj; j >= i__2; --j) { - dminus = lld[j] + p; - if (dminus < 0.) { - ++neg2; - } - tmp = p / dminus; - if (disnan_(&tmp)) { - tmp = 1.; - } - p = tmp * d__[j] - *sigma; - } - } - negcnt += neg2; - } - gamma = t + *sigma + p; - if (gamma < 0.) { - ++negcnt; - } - ret_val = negcnt; - return ret_val; -} /* dlaneg_ */ -static /* Subroutine */ int dtrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublereal *t, integer * ldt, doublereal *q, integer *ldq, integer *ifst, integer *ilst, doublereal *work, integer *info) -{ - integer q_dim1, q_offset, t_dim1, t_offset, i__1; - integer nbf, nbl, here; - logical wantq; - integer nbnext; @@ -59736,439 +62363,639 @@ static /* Subroutine */ int dtrexc_ (Tcl_Interp *interp, char *compq, integer *n - t_dim1 = *ldt; - t_offset = 1 + t_dim1; - t -= t_offset; + + + --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; - --work; + --dsigma; + u_dim1 = *ldu; + u_offset = 1 + u_dim1; + u -= u_offset; + u2_dim1 = *ldu2; + u2_offset = 1 + u2_dim1; + u2 -= u2_offset; + vt_dim1 = *ldvt; + vt_offset = 1 + vt_dim1; + vt -= vt_offset; + vt2_dim1 = *ldvt2; + vt2_offset = 1 + vt2_dim1; + vt2 -= vt2_offset; + --idxc; + --ctot; + --z__; *info = 0; - wantq = lsame_(compq, "V"); - if (! wantq && ! lsame_(compq, "N")) { + + if (*nl < 1) { *info = -1; - } else if (*n < 0) { + } else if (*nr < 1) { *info = -2; - } else if (*ldt < max(1,*n)) { + } else if (*sqre != 1 && *sqre != 0) { + *info = -3; + } + + n = *nl + *nr + 1; + m = n + *sqre; + nlp1 = *nl + 1; + nlp2 = *nl + 2; + + if (*k < 1 || *k > n) { *info = -4; - } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) { - *info = -6; - } else if (*ifst < 1 || *ifst > *n) { + } else if (*ldq < *k) { *info = -7; - } else if (*ilst < 1 || *ilst > *n) { - *info = -8; + } else if (*ldu < n) { + *info = -10; + } else if (*ldu2 < n) { + *info = -12; + } else if (*ldvt < m) { + *info = -14; + } else if (*ldvt2 < m) { + *info = -16; } if (*info != 0) { i__1 = -(*info); - vectcl_xerbla(interp, "DTREXC", &i__1); + vectcl_xerbla(interp, "DLASD3", &i__1); return TCL_ERROR; return TCL_OK; } - if (*n <= 1) { + if (*k == 1) { + d__[1] = abs(z__[1]); + if (dcopy_(interp, &m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt)!=TCL_OK) { return TCL_ERROR; } + + if (z__[1] > 0.) { + if (dcopy_(interp, &n, &u2[u2_dim1 + 1], &dlasd3_c__1, &u[u_dim1 + 1], &dlasd3_c__1)!=TCL_OK) { return TCL_ERROR; } + + } else { + i__1 = n; + for (i__ = 1; i__ <= i__1; ++i__) { + u[i__ + u_dim1] = -u2[i__ + u2_dim1]; + } + } return TCL_OK; } - if (*ifst > 1) { - if (t[*ifst + (*ifst - 1) * t_dim1] != 0.) { - --(*ifst); + i__1 = *k; + for (i__ = 1; i__ <= i__1; ++i__) { + dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; + } + + + if (dcopy_(interp, k, &z__[1], &dlasd3_c__1, &q[q_offset], &dlasd3_c__1)!=TCL_OK) { return TCL_ERROR; } + + + + rho = dnrm2_(k, &z__[1], &dlasd3_c__1); + if (dlascl_(interp, "G", &dlasd3_c__0, &dlasd3_c__0, &rho, &dlasd3_c_b13, k, &dlasd3_c__1, &z__[1], k, info)!=TCL_OK) { return TCL_ERROR; } + + rho *= rho; + + + i__1 = *k; + for (j = 1; j <= i__1; ++j) { + if (dlasd4_(interp, k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j], &vt[j * vt_dim1 + 1], info)!=TCL_OK) { return TCL_ERROR; } + + + + + if (*info != 0) { +return TCL_OK; } } - nbf = 1; - if (*ifst < *n) { - if (t[*ifst + 1 + *ifst * t_dim1] != 0.) { - nbf = 2; + + + i__1 = *k; + for (i__ = 1; i__ <= i__1; ++i__) { + z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1]; + i__2 = i__ - 1; + for (j = 1; j <= i__2; ++j) { + z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ + i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]); + } + i__2 = *k - 1; + for (j = i__; j <= i__2; ++j) { + z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ + i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]); } + d__2 = sqrt((d__1 = z__[i__], abs(d__1))); + z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]); } - if (*ilst > 1) { - if (t[*ilst + (*ilst - 1) * t_dim1] != 0.) { - --(*ilst); + i__1 = *k; + for (i__ = 1; i__ <= i__1; ++i__) { + vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ * + vt_dim1 + 1]; + u[i__ * u_dim1 + 1] = -1.; + i__2 = *k; + for (j = 2; j <= i__2; ++j) { + vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__ + * vt_dim1]; + u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1]; + } + temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &dlasd3_c__1); + q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp; + i__2 = *k; + for (j = 2; j <= i__2; ++j) { + jc = idxc[j]; + q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp; } } - nbl = 1; - if (*ilst < *n) { - if (t[*ilst + 1 + *ilst * t_dim1] != 0.) { - nbl = 2; + + + if (*k == 2) { + if (dgemm_(interp, "N", "N", &n, k, k, &dlasd3_c_b13, &u2[u2_offset], ldu2, &q[q_offset], ldq, &dlasd3_c_b26, &u[u_offset], ldu)!=TCL_OK) { return TCL_ERROR; } + + + goto L100; + } + if (ctot[1] > 0) { + if (dgemm_(interp, "N", "N", nl, k, &ctot[1], &dlasd3_c_b13, &u2[(u2_dim1 << 1) + 1], ldu2, &q[q_dim1 + 2], ldq, &dlasd3_c_b26, &u[u_dim1 + 1], ldu)!=TCL_OK) { return TCL_ERROR; } + + + if (ctot[3] > 0) { + ktemp = ctot[1] + 2 + ctot[2]; + if (dgemm_(interp, "N", "N", nl, k, &ctot[3], &dlasd3_c_b13, &u2[ktemp * u2_dim1 + 1], ldu2, &q[ktemp + q_dim1], ldq, &dlasd3_c_b13, &u[u_dim1 + 1], + ldu)!=TCL_OK) { return TCL_ERROR; } + + } + } else if (ctot[3] > 0) { + ktemp = ctot[1] + 2 + ctot[2]; + if (dgemm_(interp, "N", "N", nl, k, &ctot[3], &dlasd3_c_b13, &u2[ktemp * u2_dim1 + 1], ldu2, &q[ktemp + q_dim1], ldq, &dlasd3_c_b26, &u[u_dim1 + 1], ldu)!=TCL_OK) { return TCL_ERROR; } + + + } else { + if (dlacpy_(interp, "F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu)!=TCL_OK) { return TCL_ERROR; } + } + if (dcopy_(interp, k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu)!=TCL_OK) { return TCL_ERROR; } + + ktemp = ctot[1] + 2; + ctemp = ctot[2] + ctot[3]; + if (dgemm_(interp, "N", "N", nr, k, &ctemp, &dlasd3_c_b13, &u2[nlp2 + ktemp * u2_dim1], ldu2, &q[ktemp + q_dim1], ldq, &dlasd3_c_b26, &u[nlp2 + u_dim1], ldu)!=TCL_OK) { return TCL_ERROR; } + + + + +L100: + i__1 = *k; + for (i__ = 1; i__ <= i__1; ++i__) { + temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &dlasd3_c__1); + q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp; + i__2 = *k; + for (j = 2; j <= i__2; ++j) { + jc = idxc[j]; + q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp; + } + } + + + if (*k == 2) { + if (dgemm_(interp, "N", "N", k, &m, k, &dlasd3_c_b13, &q[q_offset], ldq, &vt2[vt2_offset], ldvt2, &dlasd3_c_b26, &vt[vt_offset], ldvt)!=TCL_OK) { return TCL_ERROR; } + - if (*ifst == *ilst) { return TCL_OK; } + ktemp = ctot[1] + 1; + if (dgemm_(interp, "N", "N", k, &nlp1, &ktemp, &dlasd3_c_b13, &q[q_dim1 + 1], ldq, &vt2[ vt2_dim1 + 1], ldvt2, &dlasd3_c_b26, &vt[vt_dim1 + 1], ldvt)!=TCL_OK) { return TCL_ERROR; } - if (*ifst < *ilst) { + ktemp = ctot[1] + 2 + ctot[2]; + if (ktemp <= *ldvt2) { + if (dgemm_(interp, "N", "N", k, &nlp1, &ctot[3], &dlasd3_c_b13, &q[ktemp * q_dim1 + 1], ldq, &vt2[ktemp + vt2_dim1], ldvt2, &dlasd3_c_b13, &vt[vt_dim1 + 1], + ldvt)!=TCL_OK) { return TCL_ERROR; } - if (nbf == 2 && nbl == 1) { - --(*ilst); + + } + + ktemp = ctot[1] + 1; + nrp1 = *nr + *sqre; + if (ktemp > 1) { + i__1 = *k; + for (i__ = 1; i__ <= i__1; ++i__) { + q[i__ + ktemp * q_dim1] = q[i__ + q_dim1]; } - if (nbf == 1 && nbl == 2) { - ++(*ilst); + i__1 = m; + for (i__ = nlp2; i__ <= i__1; ++i__) { + vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1]; } + } + ctemp = ctot[2] + 1 + ctot[3]; + if (dgemm_(interp, "N", "N", k, &nrp1, &ctemp, &dlasd3_c_b13, &q[ktemp * q_dim1 + 1], ldq, & vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &dlasd3_c_b26, &vt[nlp2 * vt_dim1 + + 1], ldvt)!=TCL_OK) { return TCL_ERROR; } - here = *ifst; -L10: + +return TCL_OK; + + +} /* dlasd3_ */ +static /* Subroutine */ int dlamrg_ (Tcl_Interp *interp, integer *n1, integer *n2, doublereal *a, integer *dtrd1, integer *dtrd2, integer *index) +{ + integer i__1; + + integer i__, ind1, ind2, n1sv, n2sv; - if (nbf == 1 || nbf == 2) { - nbnext = 1; - if (here + nbf + 1 <= *n) { - if (t[here + nbf + 1 + (here + nbf) * t_dim1] != 0.) { - nbnext = 2; - } - } - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &here, & nbf, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { - *ilst = here; -return TCL_OK; - } - here += nbnext; - if (nbf == 2) { - if (t[here + 1 + here * t_dim1] == 0.) { - nbf = 3; - } - } + + + + + --index; + --a; + + n1sv = *n1; + n2sv = *n2; + if (*dtrd1 > 0) { + ind1 = 1; + } else { + ind1 = *n1; + } + if (*dtrd2 > 0) { + ind2 = *n1 + 1; + } else { + ind2 = *n1 + *n2; + } + i__ = 1; +L10: + if (n1sv > 0 && n2sv > 0) { + if (a[ind1] <= a[ind2]) { + index[i__] = ind1; + ++i__; + ind1 += *dtrd1; + --n1sv; } else { + index[i__] = ind2; + ++i__; + ind2 += *dtrd2; + --n2sv; + } + goto L10; + } + if (n1sv == 0) { + i__1 = n2sv; + for (n1sv = 1; n1sv <= i__1; ++n1sv) { + index[i__] = ind2; + ++i__; + ind2 += *dtrd2; + } + } else { + i__1 = n1sv; + for (n2sv = 1; n2sv <= i__1; ++n2sv) { + index[i__] = ind1; + ++i__; + ind1 += *dtrd1; + } + } - - nbnext = 1; - if (here + 3 <= *n) { - if (t[here + 3 + (here + 2) * t_dim1] != 0.) { - nbnext = 2; - } - } - i__1 = here + 1; - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & dtrexc_c__1, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - - - if (*info != 0) { - *ilst = here; return TCL_OK; - } - if (nbnext == 1) { - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } +} /* dlamrg_ */ +static /* Subroutine */ int dlasd7_ (Tcl_Interp *interp, integer *icompq, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl, doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal * dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *c__, doublereal *s, integer *info) +{ + integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1; + doublereal d__1, d__2; + integer i__, j, m, n, k2; + doublereal z1; + integer jp; + doublereal eps, tau, tol; + integer nlp1, nlp2, idxi, idxj; + integer idxjp; + integer jprev; + doublereal hlftol; - ++here; - } else { - if (t[here + 2 + (here + 1) * t_dim1] == 0.) { - nbnext = 1; - } - if (nbnext == 2) { - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &nbnext, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { - *ilst = here; -return TCL_OK; - } - here += 2; - } else { - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - i__1 = here + 1; - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - here += 2; - } - } - } - if (here < *ilst) { - goto L10; - } - } else { - here = *ifst; -L20: - if (nbf == 1 || nbf == 2) { - nbnext = 1; - if (here >= 3) { - if (t[here - 1 + (here - 2) * t_dim1] != 0.) { - nbnext = 2; - } - } - i__1 = here - nbnext; - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &nbf, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { - *ilst = here; -return TCL_OK; - } - here -= nbnext; - if (nbf == 2) { - if (t[here + 1 + here * t_dim1] == 0.) { - nbf = 3; - } - } - } else { - nbnext = 1; - if (here >= 3) { - if (t[here - 1 + (here - 2) * t_dim1] != 0.) { - nbnext = 2; - } - } - i__1 = here - nbnext; - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { - *ilst = here; -return TCL_OK; - } - if (nbnext == 1) { - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &nbnext, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - --here; - } else { - if (t[here + (here - 1) * t_dim1] == 0.) { - nbnext = 1; - } - if (nbnext == 2) { - i__1 = here - 1; - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &dtrexc_c__2, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { - *ilst = here; -return TCL_OK; - } - here += -2; - } else { - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } - i__1 = here - 1; - if (dlaexc_(interp, &wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &dtrexc_c__1, &dtrexc_c__1, &work[1], info)!=TCL_OK) { return TCL_ERROR; } + --d__; + --z__; + --zw; + --vf; + --vfw; + --vl; + --vlw; + --dsigma; + --idx; + --idxp; + --idxq; + --perm; + givcol_dim1 = *ldgcol; + givcol_offset = 1 + givcol_dim1; + givcol -= givcol_offset; + givnum_dim1 = *ldgnum; + givnum_offset = 1 + givnum_dim1; + givnum -= givnum_offset; + *info = 0; + n = *nl + *nr + 1; + m = n + *sqre; - here += -2; - } - } - } - if (here > *ilst) { - goto L20; - } + if (*icompq < 0 || *icompq > 1) { + *info = -1; + } else if (*nl < 1) { + *info = -2; + } else if (*nr < 1) { + *info = -3; + } else if (*sqre < 0 || *sqre > 1) { + *info = -4; + } else if (*ldgcol < n) { + *info = -22; + } else if (*ldgnum < n) { + *info = -24; } - *ilst = here; + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "DLASD7", &i__1); +return TCL_ERROR; return TCL_OK; + } + nlp1 = *nl + 1; + nlp2 = *nl + 2; + if (*icompq == 1) { + *givptr = 0; + } -} /* dtrexc_ */ -static /* Subroutine */ int dormhr_ (Tcl_Interp *interp, char *side, char *trans, integer *m, integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal * tau, doublereal *c__, integer *ldc, doublereal *work, integer *lwork, integer *info) -{ - address a__1[2]; - integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2; - char ch__1[2]; - - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - - integer i1, i2, nb, mi, nh, ni, nq, nw; - logical left; - integer iinfo; - integer lwkopt; - logical lquery; + z1 = *alpha * vl[nlp1]; + vl[nlp1] = 0.; + tau = vf[nlp1]; + for (i__ = *nl; i__ >= 1; --i__) { + z__[i__ + 1] = *alpha * vl[i__]; + vl[i__] = 0.; + vf[i__ + 1] = vf[i__]; + d__[i__ + 1] = d__[i__]; + idxq[i__ + 1] = idxq[i__] + 1; + } + vf[1] = tau; + i__1 = m; + for (i__ = nlp2; i__ <= i__1; ++i__) { + z__[i__] = *beta * vf[i__]; + vf[i__] = 0.; + } + i__1 = n; + for (i__ = nlp2; i__ <= i__1; ++i__) { + idxq[i__] += nlp1; + } + i__1 = n; + for (i__ = 2; i__ <= i__1; ++i__) { + dsigma[i__] = d__[idxq[i__]]; + zw[i__] = z__[idxq[i__]]; + vfw[i__] = vf[idxq[i__]]; + vlw[i__] = vl[idxq[i__]]; + } + if (dlamrg_(interp, nl, nr, &dsigma[2], &dlasd7_c__1, &dlasd7_c__1, &idx[2])!=TCL_OK) { return TCL_ERROR; } + i__1 = n; + for (i__ = 2; i__ <= i__1; ++i__) { + idxi = idx[i__] + 1; + d__[i__] = dsigma[idxi]; + z__[i__] = zw[idxi]; + vf[i__] = vfw[idxi]; + vl[i__] = vlw[idxi]; + } + eps = dlamch_("Epsilon"); + d__1 = abs(*alpha), d__2 = abs(*beta); + tol = max(d__1,d__2); + d__2 = (d__1 = d__[n], abs(d__1)); + tol = eps * 64. * max(d__2,tol); + *k = 1; + k2 = n + 1; + i__1 = n; + for (j = 2; j <= i__1; ++j) { + if ((d__1 = z__[j], abs(d__1)) <= tol) { + --k2; + idxp[k2] = j; + if (j == n) { + goto L100; + } + } else { + jprev = j; + goto L70; + } + } +L70: + j = jprev; +L80: + ++j; + if (j > n) { + goto L90; + } + if ((d__1 = z__[j], abs(d__1)) <= tol) { + --k2; + idxp[k2] = j; + } else { + if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { + *s = z__[jprev]; + *c__ = z__[j]; - a_dim1 = *lda; - a_offset = 1 + a_dim1; - a -= a_offset; - --tau; - c_dim1 = *ldc; - c_offset = 1 + c_dim1; - c__ -= c_offset; - --work; + tau = dlapy2_(c__, s); + z__[j] = tau; + z__[jprev] = 0.; + *c__ /= tau; + *s = -(*s) / tau; - *info = 0; - nh = *ihi - *ilo; - left = lsame_(side, "L"); - lquery = *lwork == -1; + if (*icompq == 1) { + ++(*givptr); + idxjp = idxq[idx[jprev] + 1]; + idxj = idxq[idx[j] + 1]; + if (idxjp <= nlp1) { + --idxjp; + } + if (idxj <= nlp1) { + --idxj; + } + givcol[*givptr + (givcol_dim1 << 1)] = idxjp; + givcol[*givptr + givcol_dim1] = idxj; + givnum[*givptr + (givnum_dim1 << 1)] = *c__; + givnum[*givptr + givnum_dim1] = *s; + } + if (drot_(interp, &dlasd7_c__1, &vf[jprev], &dlasd7_c__1, &vf[j], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } - if (left) { - nq = *m; - nw = *n; - } else { - nq = *n; - nw = *m; - } - if (! left && ! lsame_(side, "R")) { - *info = -1; - } else if (! lsame_(trans, "N") && ! lsame_(trans, - "T")) { - *info = -2; - } else if (*m < 0) { - *info = -3; - } else if (*n < 0) { - *info = -4; - } else if (*ilo < 1 || *ilo > max(1,nq)) { - *info = -5; - } else if (*ihi < min(*ilo,nq) || *ihi > nq) { - *info = -6; - } else if (*lda < max(1,nq)) { - *info = -8; - } else if (*ldc < max(1,*m)) { - *info = -11; - } else if (*lwork < max(1,nw) && ! lquery) { - *info = -13; - } + if (drot_(interp, &dlasd7_c__1, &vl[jprev], &dlasd7_c__1, &vl[j], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } - if (*info == 0) { - if (left) { - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &dormhr_c__2, (ftnlen)2); - nb = ilaenv_(&dormhr_c__1, "DORMQR", ch__1, &nh, n, &nh, &dormhr_c_n1); + --k2; + idxp[k2] = jprev; + jprev = j; } else { - i__1[0] = 1, a__1[0] = side; - i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &dormhr_c__2, (ftnlen)2); - nb = ilaenv_(&dormhr_c__1, "DORMQR", ch__1, m, &nh, &nh, &dormhr_c_n1); + ++(*k); + zw[*k] = z__[jprev]; + dsigma[*k] = d__[jprev]; + idxp[*k] = jprev; + jprev = j; } - lwkopt = max(1,nw) * nb; - work[1] = (doublereal) lwkopt; } + goto L80; +L90: - if (*info != 0) { - i__2 = -(*info); - vectcl_xerbla(interp, "DORMHR", &i__2); -return TCL_ERROR; -return TCL_OK; - } else if (lquery) { -return TCL_OK; - } + ++(*k); + zw[*k] = z__[jprev]; + dsigma[*k] = d__[jprev]; + idxp[*k] = jprev; +L100: - if (*m == 0 || *n == 0 || nh == 0) { - work[1] = 1.; -return TCL_OK; - } - if (left) { - mi = nh; - ni = *n; - i1 = *ilo + 1; - i2 = 1; - } else { - mi = *m; - ni = nh; - i1 = 1; - i2 = *ilo + 1; + i__1 = n; + for (j = 2; j <= i__1; ++j) { + jp = idxp[j]; + dsigma[j] = d__[jp]; + vfw[j] = vf[jp]; + vlw[j] = vl[jp]; + } + if (*icompq == 1) { + i__1 = n; + for (j = 2; j <= i__1; ++j) { + jp = idxp[j]; + perm[j] = idxq[idx[jp] + 1]; + if (perm[j] <= nlp1) { + --perm[j]; + } + } } - - if (dormqr_(interp, side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, & tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo)!=TCL_OK) { return TCL_ERROR; } + i__1 = n - *k; + if (dcopy_(interp, &i__1, &dsigma[*k + 1], &dlasd7_c__1, &d__[*k + 1], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } - work[1] = (doublereal) lwkopt; -return TCL_OK; -} /* dormhr_ */ -static /* Subroutine */ int dlaqr2_ (Tcl_Interp *interp, logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer * ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal * v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer * nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork) -{ - integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, - wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; - doublereal d__1, d__2, d__3, d__4, d__5, d__6; + dsigma[1] = 0.; + hlftol = tol / 2.; + if (abs(dsigma[2]) <= hlftol) { + dsigma[2] = hlftol; + } + if (m > n) { + z__[1] = dlapy2_(&z1, &z__[m]); + if (z__[1] <= tol) { + *c__ = 1.; + *s = 0.; + z__[1] = tol; + } else { + *c__ = z1 / z__[1]; + *s = -z__[m] / z__[1]; + } + if (drot_(interp, &dlasd7_c__1, &vf[m], &dlasd7_c__1, &vf[1], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } - double sqrt(doublereal); + if (drot_(interp, &dlasd7_c__1, &vl[m], &dlasd7_c__1, &vl[1], &dlasd7_c__1, c__, s)!=TCL_OK) { return TCL_ERROR; } - integer i__, j, k; - doublereal s, aa, bb, cc, dd, cs, sn; - integer jw; - doublereal evi, evk, foo; - integer kln; - doublereal tau, ulp; - integer lwk1, lwk2; - doublereal beta; - integer kend, kcol, info, ifst, ilst, ltop, krow; - logical bulge; - integer infqr, kwtop; - doublereal safmin; - doublereal safmax; - logical sorted; - doublereal smlnum; - integer lwkopt; + } else { + if (abs(z1) <= tol) { + z__[1] = tol; + } else { + z__[1] = z1; + } + } + i__1 = *k - 1; + if (dcopy_(interp, &i__1, &zw[2], &dlasd7_c__1, &z__[2], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } + i__1 = n - 1; + if (dcopy_(interp, &i__1, &vfw[2], &dlasd7_c__1, &vf[2], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } + i__1 = n - 1; + if (dcopy_(interp, &i__1, &vlw[2], &dlasd7_c__1, &vl[2], &dlasd7_c__1)!=TCL_OK) { return TCL_ERROR; } +return TCL_OK; +} /* dlasd7_ */ +static /* Subroutine */ int dlasd8_ (Tcl_Interp *interp, integer *icompq, integer *k, doublereal *d__, doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl, doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal * work, integer *info) +{ + integer difr_dim1, difr_offset, i__1, i__2; + doublereal d__1, d__2; + double sqrt(doublereal), d_sign(doublereal *, doublereal *); + integer i__, j; + doublereal dj, rho; + integer iwk1, iwk2, iwk3; + doublereal temp; + integer iwk2i, iwk3i; + doublereal diflj, difrj, dsigj; + doublereal dsigjp; @@ -60195,610 +63022,648 @@ static /* Subroutine */ int dlaqr2_ (Tcl_Interp *interp, logical *wantt, logical - h_dim1 = *ldh; - h_offset = 1 + h_dim1; - h__ -= h_offset; - z_dim1 = *ldz; - z_offset = 1 + z_dim1; - z__ -= z_offset; - --sr; - --si; - v_dim1 = *ldv; - v_offset = 1 + v_dim1; - v -= v_offset; - t_dim1 = *ldt; - t_offset = 1 + t_dim1; - t -= t_offset; - wv_dim1 = *ldwv; - wv_offset = 1 + wv_dim1; - wv -= wv_offset; + --d__; + --z__; + --vf; + --vl; + --difl; + difr_dim1 = *lddifr; + difr_offset = 1 + difr_dim1; + difr -= difr_offset; + --dsigma; --work; - i__1 = *nw, i__2 = *kbot - *ktop + 1; - jw = min(i__1,i__2); - if (jw <= 2) { - lwkopt = 1; - } else { + *info = 0; + + if (*icompq < 0 || *icompq > 1) { + *info = -1; + } else if (*k < 1) { + *info = -2; + } else if (*lddifr < *k) { + *info = -9; + } + if (*info != 0) { + i__1 = -(*info); + vectcl_xerbla(interp, "DLASD8", &i__1); +return TCL_ERROR; + +return TCL_OK; + } - i__1 = jw - 1; - if (dgehrd_(interp, &jw, &dlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & dlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } + if (*k == 1) { + d__[1] = abs(z__[1]); + difl[1] = d__[1]; + if (*icompq == 1) { + difl[2] = 1.; + difr[(difr_dim1 << 1) + 1] = 1.; + } +return TCL_OK; + } - lwk1 = (integer) work[1]; + i__1 = *k; + for (i__ = 1; i__ <= i__1; ++i__) { + dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; + } - i__1 = jw - 1; - if (dormhr_(interp, "R", "N", &jw, &jw, &dlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[1], &dlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } + iwk1 = 1; + iwk2 = iwk1 + *k; + iwk3 = iwk2 + *k; + iwk2i = iwk2 - 1; + iwk3i = iwk3 - 1; - lwk2 = (integer) work[1]; + rho = dnrm2_(k, &z__[1], &dlasd8_c__1); + if (dlascl_(interp, "G", &dlasd8_c__0, &dlasd8_c__0, &rho, &dlasd8_c_b8, k, &dlasd8_c__1, &z__[1], k, info)!=TCL_OK) { return TCL_ERROR; } + rho *= rho; - lwkopt = jw + max(lwk1,lwk2); - } + if (dlaset_(interp, "A", k, &dlasd8_c__1, &dlasd8_c_b8, &dlasd8_c_b8, &work[iwk3], k)!=TCL_OK) { return TCL_ERROR; } - if (*lwork == -1) { - work[1] = (doublereal) lwkopt; -return TCL_OK; - } - *ns = 0; - *nd = 0; - work[1] = 1.; - if (*ktop > *kbot) { -return TCL_OK; - } - if (*nw < 1) { -return TCL_OK; - } + i__1 = *k; + for (j = 1; j <= i__1; ++j) { + if (dlasd4_(interp, k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[ iwk2], info)!=TCL_OK) { return TCL_ERROR; } - safmin = dlamch_("SAFE MINIMUM"); - safmax = 1. / safmin; - if (dlabad_(interp, &safmin, &safmax)!=TCL_OK) { return TCL_ERROR; } - ulp = dlamch_("PRECISION"); - smlnum = safmin * ((doublereal) (*n) / ulp); - i__1 = *nw, i__2 = *kbot - *ktop + 1; - jw = min(i__1,i__2); - kwtop = *kbot - jw + 1; - if (kwtop == *ktop) { - s = 0.; - } else { - s = h__[kwtop + (kwtop - 1) * h_dim1]; + if (*info != 0) { +return TCL_OK; + } + work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j]; + difl[j] = -work[j]; + difr[j + difr_dim1] = -work[j + 1]; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + + i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ + j]); + } + i__2 = *k; + for (i__ = j + 1; i__ <= i__2; ++i__) { + work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + + i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ + j]); + } } - if (*kbot == kwtop) { - - sr[kwtop] = h__[kwtop + kwtop * h_dim1]; - si[kwtop] = 0.; - *ns = 1; - *nd = 0; - d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs( - d__1)); - if (abs(s) <= max(d__2,d__3)) { - *ns = 0; - *nd = 1; - if (kwtop > *ktop) { - h__[kwtop + (kwtop - 1) * h_dim1] = 0.; - } - } - work[1] = 1.; -return TCL_OK; + i__1 = *k; + for (i__ = 1; i__ <= i__1; ++i__) { + d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1))); + z__[i__] = d_sign(&d__2, &z__[i__]); } - if (dlacpy_(interp, "U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt)!=TCL_OK) { return TCL_ERROR; } + i__1 = *k; + for (j = 1; j <= i__1; ++j) { + diflj = difl[j]; + dj = d__[j]; + dsigj = -dsigma[j]; + if (j < *k) { + difrj = -difr[j + difr_dim1]; + dsigjp = -dsigma[j + 1]; + } + work[j] = -z__[j] / diflj / (dsigma[j] + dj); + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / ( + dsigma[i__] + dj); + } + i__2 = *k; + for (i__ = j + 1; i__ <= i__2; ++i__) { + work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) / + (dsigma[i__] + dj); + } + temp = dnrm2_(k, &work[1], &dlasd8_c__1); + work[iwk2i + j] = ddot_(k, &work[1], &dlasd8_c__1, &vf[1], &dlasd8_c__1) / temp; + work[iwk3i + j] = ddot_(k, &work[1], &dlasd8_c__1, &vl[1], &dlasd8_c__1) / temp; + if (*icompq == 1) { + difr[j + (difr_dim1 << 1)] = temp; + } + } + if (dcopy_(interp, k, &work[iwk2], &dlasd8_c__1, &vf[1], &dlasd8_c__1)!=TCL_OK) { return TCL_ERROR; } - i__1 = jw - 1; - i__2 = *ldh + 1; - i__3 = *ldt + 1; - if (dcopy_(interp, &i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3)!=TCL_OK) { return TCL_ERROR; } + if (dcopy_(interp, k, &work[iwk3], &dlasd8_c__1, &vl[1], &dlasd8_c__1)!=TCL_OK) { return TCL_ERROR; } +return TCL_OK; - if (dlaset_(interp, "A", &jw, &jw, &dlaqr2_c_b12, &dlaqr2_c_b13, &v[v_offset], ldv)!=TCL_OK) { return TCL_ERROR; } - if (dlahqr_(interp, &dlaqr2_c_true, &dlaqr2_c_true, &jw, &dlaqr2_c__1, &jw, &t[t_offset], ldt, &sr[kwtop], &si[kwtop], &dlaqr2_c__1, &jw, &v[v_offset], ldv, &infqr)!=TCL_OK) { return TCL_ERROR; } +} /* dlasd8_ */ +static /* Subroutine */ int zgerc_ (Tcl_Interp *interp, integer *m, integer *n, doublecomplex *alpha, doublecomplex *x, integer *incx, doublecomplex *y, integer *incy, doublecomplex *a, integer *lda) +{ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; + doublecomplex z__1, z__2; + void d_cnjg(doublecomplex *, doublecomplex *); + integer i__, j, ix, jy, kx, info; + doublecomplex temp; - i__1 = jw - 3; - for (j = 1; j <= i__1; ++j) { - t[j + 2 + j * t_dim1] = 0.; - t[j + 3 + j * t_dim1] = 0.; - } - if (jw > 2) { - t[jw + (jw - 2) * t_dim1] = 0.; - } - *ns = jw; - ilst = infqr + 1; -L20: - if (ilst <= *ns) { - if (*ns == 1) { - bulge = FALSE_; - } else { - bulge = t[*ns + (*ns - 1) * t_dim1] != 0.; - } - if (! bulge) { - foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1)); - if (foo == 0.) { - foo = abs(s); - } - d__2 = smlnum, d__3 = ulp * foo; - if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3)) - { - --(*ns); - } else { - ifst = *ns; - if (dtrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info)!=TCL_OK) { return TCL_ERROR; } - ++ilst; - } - } else { - foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[* - ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[* - ns - 1 + *ns * t_dim1], abs(d__2))); - if (foo == 0.) { - foo = abs(s); - } - d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 = - s * v[(*ns - 1) * v_dim1 + 1], abs(d__2)); - d__5 = smlnum, d__6 = ulp * foo; - if (max(d__3,d__4) <= max(d__5,d__6)) { - *ns += -2; - } else { - ifst = *ns; - if (dtrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info)!=TCL_OK) { return TCL_ERROR; } - ilst += 2; - } - } + --x; + --y; + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + info = 0; + if (*m < 0) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*incx == 0) { + info = 5; + } else if (*incy == 0) { + info = 7; + } else if (*lda < max(1,*m)) { + info = 9; + } + if (info != 0) { + vectcl_xerbla(interp, "ZGERC ", &info); +return TCL_ERROR; - goto L20; +return TCL_OK; } - if (*ns == 0) { - s = 0.; + if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0.) { +return TCL_OK; } - if (*ns < jw) { - - sorted = FALSE_; - i__ = *ns + 1; -L30: - if (sorted) { - goto L50; + if (*incy > 0) { + jy = 1; + } else { + jy = 1 - (*n - 1) * *incy; + } + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jy; + if (y[i__2].r != 0. || y[i__2].i != 0.) { + d_cnjg(&z__2, &y[jy]); + z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = + alpha->r * z__2.i + alpha->i * z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + i__2 = *m; + for (i__ = 1; i__ <= i__2; ++i__) { + i__3 = i__ + j * a_dim1; + i__4 = i__ + j * a_dim1; + i__5 = i__; + z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = + x[i__5].r * temp.i + x[i__5].i * temp.r; + z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; + a[i__3].r = z__1.r, a[i__3].i = z__1.i; + } + } + jy += *incy; } - sorted = TRUE_; - - kend = i__ - 1; - i__ = infqr + 1; - if (i__ == *ns) { - k = i__ + 1; - } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { - k = i__ + 1; + } else { + if (*incx > 0) { + kx = 1; } else { - k = i__ + 2; + kx = 1 - (*m - 1) * *incx; } -L40: - if (k <= kend) { - if (k == i__ + 1) { - evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1)); - } else { - evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 = - t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 = - t[i__ + (i__ + 1) * t_dim1], abs(d__2))); + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jy; + if (y[i__2].r != 0. || y[i__2].i != 0.) { + d_cnjg(&z__2, &y[jy]); + z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i = + alpha->r * z__2.i + alpha->i * z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + ix = kx; + i__2 = *m; + for (i__ = 1; i__ <= i__2; ++i__) { + i__3 = i__ + j * a_dim1; + i__4 = i__ + j * a_dim1; + i__5 = ix; + z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i = + x[i__5].r * temp.i + x[i__5].i * temp.r; + z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i; + a[i__3].r = z__1.r, a[i__3].i = z__1.i; + ix += *incx; + } } + jy += *incy; + } + } - if (k == kend) { - evk = (d__1 = t[k + k * t_dim1], abs(d__1)); - } else if (t[k + 1 + k * t_dim1] == 0.) { - evk = (d__1 = t[k + k * t_dim1], abs(d__1)); - } else { - evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[ - k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k + - (k + 1) * t_dim1], abs(d__2))); - } +return TCL_OK; - if (evi >= evk) { - i__ = k; - } else { - sorted = FALSE_; - ifst = i__; - ilst = k; - if (dtrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info)!=TCL_OK) { return TCL_ERROR; } +} /* zgerc_ */ +static doublereal dlapy3_ (doublereal *x, doublereal *y, doublereal *z__) +{ + doublereal ret_val, d__1, d__2, d__3; - if (info == 0) { - i__ = ilst; - } else { - i__ = k; - } - } - if (i__ == kend) { - k = i__ + 1; - } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { - k = i__ + 1; - } else { - k = i__ + 2; - } - goto L40; - } - goto L30; -L50: - ; - } + double sqrt(doublereal); + doublereal w, xabs, yabs, zabs; - i__ = jw; -L60: - if (i__ >= infqr + 1) { - if (i__ == infqr + 1) { - sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; - si[kwtop + i__ - 1] = 0.; - --i__; - } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) { - sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; - si[kwtop + i__ - 1] = 0.; - --i__; - } else { - aa = t[i__ - 1 + (i__ - 1) * t_dim1]; - cc = t[i__ + (i__ - 1) * t_dim1]; - bb = t[i__ - 1 + i__ * t_dim1]; - dd = t[i__ + i__ * t_dim1]; - if (dlanv2_(interp, &aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, & - sn)!=TCL_OK) { return TCL_ERROR; } - i__ += -2; - } - goto L60; + + + + + + + + xabs = abs(*x); + yabs = abs(*y); + zabs = abs(*z__); + d__1 = max(xabs,yabs); + w = max(d__1,zabs); + if (w == 0.) { + ret_val = xabs + yabs + zabs; + } else { + d__1 = xabs / w; + d__2 = yabs / w; + d__3 = zabs / w; + ret_val = w * sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3); } + return ret_val; - if (*ns < jw || s == 0.) { - if (*ns > 1 && s != 0.) { +} /* dlapy3_ */ +static /* Subroutine */ int dlasq3_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig, doublereal *qmax, integer *nfail, integer *iter, integer *ndiv, logical *ieee, integer *ttype, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *g, doublereal *tau) +{ + integer i__1; + doublereal d__1, d__2; - if (dcopy_(interp, ns, &v[v_offset], ldv, &work[1], &dlaqr2_c__1)!=TCL_OK) { return TCL_ERROR; } + double sqrt(doublereal); - beta = work[1]; - if (dlarfg_(interp, ns, &beta, &work[2], &dlaqr2_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } + doublereal s, t; + integer j4, nn; + doublereal eps, tol; + integer n0in, ipn4; + doublereal tol2, temp; - work[1] = 1.; - i__1 = jw - 2; - i__2 = jw - 2; - if (dlaset_(interp, "L", &i__1, &i__2, &dlaqr2_c_b12, &dlaqr2_c_b12, &t[t_dim1 + 3], ldt)!=TCL_OK) { return TCL_ERROR; } - if (dlarf_(interp, "L", ns, &jw, &work[1], &dlaqr2_c__1, &tau, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } - if (dlarf_(interp, "R", ns, ns, &work[1], &dlaqr2_c__1, &tau, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } - if (dlarf_(interp, "R", &jw, ns, &work[1], &dlaqr2_c__1, &tau, &v[v_offset], ldv, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } - i__1 = *lwork - jw; - if (dgehrd_(interp, &jw, &dlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } - } - if (kwtop > 1) { - h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1]; - } - if (dlacpy_(interp, "U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } - i__1 = jw - 1; - i__2 = *ldt + 1; - i__3 = *ldh + 1; - if (dcopy_(interp, &i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3)!=TCL_OK) { return TCL_ERROR; } - if (*ns > 1 && s != 0.) { - i__1 = *lwork - jw; - if (dormhr_(interp, "R", "N", &jw, ns, &dlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } - } - if (*wantt) { - ltop = 1; - } else { - ltop = *ktop; - } - i__1 = kwtop - 1; - i__2 = *nv; - for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += - i__2) { - i__3 = *nv, i__4 = kwtop - krow; - kln = min(i__3,i__4); - if (dgemm_(interp, "N", "N", &kln, &jw, &jw, &dlaqr2_c_b13, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &dlaqr2_c_b12, &wv[wv_offset], - ldwv)!=TCL_OK) { return TCL_ERROR; } + --z__; + n0in = *n0; + eps = dlamch_("Precision"); + tol = eps * 100.; + d__1 = tol; + tol2 = d__1 * d__1; - if (dlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } +L10: - } + if (*n0 < *i0) { +return TCL_OK; + } + if (*n0 == *i0) { + goto L20; + } + nn = (*n0 << 2) + *pp; + if (*n0 == *i0 + 1) { + goto L40; + } - if (*wantt) { - i__2 = *n; - i__1 = *nh; - for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2; - kcol += i__1) { - i__3 = *nh, i__4 = *n - kcol + 1; - kln = min(i__3,i__4); - if (dgemm_(interp, "C", "N", &jw, &kln, &jw, &dlaqr2_c_b13, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &dlaqr2_c_b12, &t[t_offset], - ldt)!=TCL_OK) { return TCL_ERROR; } + if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - (*pp << 1) - + 4] > tol2 * z__[nn - 7]) { + goto L30; + } +L20: - if (dlacpy_(interp, "A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } + z__[(*n0 << 2) - 3] = z__[(*n0 << 2) + *pp - 3] + *sigma; + --(*n0); + goto L10; - } - } +L30: + if (z__[nn - 9] > tol2 * *sigma && z__[nn - (*pp << 1) - 8] > tol2 * z__[ + nn - 11]) { + goto L50; + } - if (*wantz) { - i__1 = *ihiz; - i__2 = *nv; - for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += - i__2) { - i__3 = *nv, i__4 = *ihiz - krow + 1; - kln = min(i__3,i__4); - if (dgemm_(interp, "N", "N", &kln, &jw, &jw, &dlaqr2_c_b13, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &dlaqr2_c_b12, &wv[ - wv_offset], ldwv)!=TCL_OK) { return TCL_ERROR; } +L40: + if (z__[nn - 3] > z__[nn - 7]) { + s = z__[nn - 3]; + z__[nn - 3] = z__[nn - 7]; + z__[nn - 7] = s; + } + if (z__[nn - 5] > z__[nn - 3] * tol2) { + t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5; + s = z__[nn - 3] * (z__[nn - 5] / t); + if (s <= t) { + s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.))); + } else { + s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s))); + } + t = z__[nn - 7] + (s + z__[nn - 5]); + z__[nn - 3] *= z__[nn - 7] / t; + z__[nn - 7] = t; + } + z__[(*n0 << 2) - 7] = z__[nn - 7] + *sigma; + z__[(*n0 << 2) - 3] = z__[nn - 3] + *sigma; + *n0 += -2; + goto L10; - if (dlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz)!=TCL_OK) { return TCL_ERROR; } +L50: + if (*pp == 2) { + *pp = 0; + } + if (*dmin__ <= 0. || *n0 < n0in) { + if (z__[(*i0 << 2) + *pp - 3] * 1.5 < z__[(*n0 << 2) + *pp - 3]) { + ipn4 = *i0 + *n0 << 2; + i__1 = *i0 + *n0 - 1 << 1; + for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { + temp = z__[j4 - 3]; + z__[j4 - 3] = z__[ipn4 - j4 - 3]; + z__[ipn4 - j4 - 3] = temp; + temp = z__[j4 - 2]; + z__[j4 - 2] = z__[ipn4 - j4 - 2]; + z__[ipn4 - j4 - 2] = temp; + temp = z__[j4 - 1]; + z__[j4 - 1] = z__[ipn4 - j4 - 5]; + z__[ipn4 - j4 - 5] = temp; + temp = z__[j4]; + z__[j4] = z__[ipn4 - j4 - 4]; + z__[ipn4 - j4 - 4] = temp; + } + if (*n0 - *i0 <= 4) { + z__[(*n0 << 2) + *pp - 1] = z__[(*i0 << 2) + *pp - 1]; + z__[(*n0 << 2) - *pp] = z__[(*i0 << 2) - *pp]; } + d__1 = *dmin2, d__2 = z__[(*n0 << 2) + *pp - 1]; + *dmin2 = min(d__1,d__2); + d__1 = z__[(*n0 << 2) + *pp - 1], d__2 = z__[(*i0 << 2) + *pp - 1] + , d__1 = min(d__1,d__2), d__2 = z__[(*i0 << 2) + *pp + 3]; + z__[(*n0 << 2) + *pp - 1] = min(d__1,d__2); + d__1 = z__[(*n0 << 2) - *pp], d__2 = z__[(*i0 << 2) - *pp], d__1 = + min(d__1,d__2), d__2 = z__[(*i0 << 2) - *pp + 4]; + z__[(*n0 << 2) - *pp] = min(d__1,d__2); + d__1 = *qmax, d__2 = z__[(*i0 << 2) + *pp - 3], d__1 = max(d__1, + d__2), d__2 = z__[(*i0 << 2) + *pp + 1]; + *qmax = max(d__1,d__2); + *dmin__ = -0.; } } - *nd = jw - *ns; + if (dlasq4_(interp, i0, n0, &z__[1], pp, &n0in, dmin__, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g)!=TCL_OK) { return TCL_ERROR; } - *ns -= infqr; - work[1] = (doublereal) lwkopt; +L70: + if (dlasq5_(interp, i0, n0, &z__[1], pp, tau, dmin__, dmin1, dmin2, dn, dn1, dn2, ieee)!=TCL_OK) { return TCL_ERROR; } -return TCL_OK; -} /* dlaqr2_ */ -static /* Subroutine */ int dlaqr1_ (Tcl_Interp *interp, integer *n, doublereal *h__, integer *ldh, doublereal *sr1, doublereal *si1, doublereal *sr2, doublereal *si2, doublereal *v) -{ - integer h_dim1, h_offset; - doublereal d__1, d__2, d__3; - doublereal s, h21s, h31s; + *ndiv += *n0 - *i0 + 2; + ++(*iter); + if (*dmin__ >= 0. && *dmin1 > 0.) { + goto L90; + } else if (*dmin__ < 0. && *dmin1 > 0. && z__[(*n0 - 1 << 2) - *pp] < tol + * (*sigma + *dn1) && abs(*dn) < tol * *sigma) { + z__[(*n0 - 1 << 2) - *pp + 2] = 0.; + *dmin__ = 0.; + goto L90; + } else if (*dmin__ < 0.) { + ++(*nfail); + if (*ttype < -22) { + *tau = 0.; + } else if (*dmin1 > 0.) { + *tau = (*tau + *dmin__) * (1. - eps * 2.); + *ttype += -11; + } else { + *tau *= .25; + *ttype += -12; + } + goto L70; + } else if (disnan_(dmin__)) { - h_dim1 = *ldh; - h_offset = 1 + h_dim1; - h__ -= h_offset; - --v; - if (*n == 2) { - s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = - h__[h_dim1 + 2], abs(d__2)); - if (s == 0.) { - v[1] = 0.; - v[2] = 0.; + if (*tau == 0.) { + goto L80; } else { - h21s = h__[h_dim1 + 2] / s; - v[1] = h21s * h__[(h_dim1 << 1) + 1] + (h__[h_dim1 + 1] - *sr1) * - ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s); - v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * - sr2); + *tau = 0.; + goto L70; } } else { - s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = - h__[h_dim1 + 2], abs(d__2)) + (d__3 = h__[h_dim1 + 3], abs( - d__3)); - if (s == 0.) { - v[1] = 0.; - v[2] = 0.; - v[3] = 0.; - } else { - h21s = h__[h_dim1 + 2] / s; - h31s = h__[h_dim1 + 3] / s; - v[1] = (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s) - - *si1 * (*si2 / s) + h__[(h_dim1 << 1) + 1] * h21s + h__[ - h_dim1 * 3 + 1] * h31s; - v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * - sr2) + h__[h_dim1 * 3 + 2] * h31s; - v[3] = h31s * (h__[h_dim1 + 1] + h__[h_dim1 * 3 + 3] - *sr1 - * - sr2) + h21s * h__[(h_dim1 << 1) + 3]; - } - } -return TCL_OK; -} /* dlaqr1_ */ -static /* Subroutine */ int ztrexc_ (Tcl_Interp *interp, char *compq, integer *n, doublecomplex *t, integer *ldt, doublecomplex *q, integer *ldq, integer *ifst, integer * ilst, integer *info) -{ - integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3; - doublecomplex z__1; - - void d_cnjg(doublecomplex *, doublecomplex *); - - integer k, m1, m2, m3; - doublereal cs; - doublecomplex t11, t22, sn, temp; - logical wantq; - - - - - - - - - - - - - - - + goto L80; + } - t_dim1 = *ldt; - t_offset = 1 + t_dim1; - t -= t_offset; - q_dim1 = *ldq; - q_offset = 1 + q_dim1; - q -= q_offset; +L80: + if (dlasq6_(interp, i0, n0, &z__[1], pp, dmin__, dmin1, dmin2, dn, dn1, dn2)!=TCL_OK) { return TCL_ERROR; } - *info = 0; - wantq = lsame_(compq, "V"); - if (! lsame_(compq, "N") && ! wantq) { - *info = -1; - } else if (*n < 0) { - *info = -2; - } else if (*ldt < max(1,*n)) { - *info = -4; - } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) { - *info = -6; - } else if (*ifst < 1 || *ifst > *n) { - *info = -7; - } else if (*ilst < 1 || *ilst > *n) { - *info = -8; - } - if (*info != 0) { - i__1 = -(*info); - vectcl_xerbla(interp, "ZTREXC", &i__1); -return TCL_ERROR; + *ndiv += *n0 - *i0 + 2; + ++(*iter); + *tau = 0.; -return TCL_OK; +L90: + if (*tau < *sigma) { + *desig += *tau; + t = *sigma + *desig; + *desig -= t - *sigma; + } else { + t = *sigma + *tau; + *desig = *sigma - (t - *tau) + *desig; } + *sigma = t; - - if (*n == 1 || *ifst == *ilst) { return TCL_OK; - } - if (*ifst < *ilst) { +} /* dlasq3_ */ +static integer dlaneg_ (integer *n, doublereal *d__, doublereal *lld, doublereal * sigma, doublereal *pivmin, integer *r__) +{ + integer ret_val, i__1, i__2, i__3, i__4; - m1 = 0; - m2 = -1; - m3 = 1; - } else { + integer j; + doublereal p, t; + integer bj; + doublereal tmp; + integer neg1, neg2; + doublereal bsav, gamma, dplus; + integer negcnt; + logical sawnan; + doublereal dminus; - m1 = -1; - m2 = 0; - m3 = -1; - } - i__1 = *ilst + m2; - i__2 = m3; - for (k = *ifst + m1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) { - i__3 = k + k * t_dim1; - t11.r = t[i__3].r, t11.i = t[i__3].i; - i__3 = k + 1 + (k + 1) * t_dim1; - t22.r = t[i__3].r, t22.i = t[i__3].i; - z__1.r = t22.r - t11.r, z__1.i = t22.i - t11.i; - if (zlartg_(interp, &t[k + (k + 1) * t_dim1], &z__1, &cs, &sn, &temp)!=TCL_OK) { return TCL_ERROR; } - if (k + 2 <= *n) { - i__3 = *n - k - 1; - if (zrot_(interp, &i__3, &t[k + (k + 2) * t_dim1], ldt, &t[k + 1 + (k + 2) * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - } - i__3 = k - 1; - d_cnjg(&z__1, &sn); - if (zrot_(interp, &i__3, &t[k * t_dim1 + 1], &ztrexc_c__1, &t[(k + 1) * t_dim1 + 1], & ztrexc_c__1, &cs, &z__1)!=TCL_OK) { return TCL_ERROR; } - i__3 = k + k * t_dim1; - t[i__3].r = t22.r, t[i__3].i = t22.i; - i__3 = k + 1 + (k + 1) * t_dim1; - t[i__3].r = t11.r, t[i__3].i = t11.i; - if (wantq) { - d_cnjg(&z__1, &sn); - if (zrot_(interp, n, &q[k * q_dim1 + 1], &ztrexc_c__1, &q[(k + 1) * q_dim1 + 1], & ztrexc_c__1, &cs, &z__1)!=TCL_OK) { return TCL_ERROR; } + --lld; + --d__; + negcnt = 0; + t = -(*sigma); + i__1 = *r__ - 1; + for (bj = 1; bj <= i__1; bj += 128) { + neg1 = 0; + bsav = t; + i__3 = bj + 127, i__4 = *r__ - 1; + i__2 = min(i__3,i__4); + for (j = bj; j <= i__2; ++j) { + dplus = d__[j] + t; + if (dplus < 0.) { + ++neg1; + } + tmp = t / dplus; + t = tmp * lld[j] - *sigma; } - + sawnan = disnan_(&t); + if (sawnan) { + neg1 = 0; + t = bsav; + i__3 = bj + 127, i__4 = *r__ - 1; + i__2 = min(i__3,i__4); + for (j = bj; j <= i__2; ++j) { + dplus = d__[j] + t; + if (dplus < 0.) { + ++neg1; + } + tmp = t / dplus; + if (disnan_(&tmp)) { + tmp = 1.; + } + t = tmp * lld[j] - *sigma; + } + } + negcnt += neg1; } -return TCL_OK; + p = d__[*n] - *sigma; + i__1 = *r__; + for (bj = *n - 1; bj >= i__1; bj += -128) { + neg2 = 0; + bsav = p; + i__3 = bj - 127; + i__2 = max(i__3,*r__); + for (j = bj; j >= i__2; --j) { + dminus = lld[j] + p; + if (dminus < 0.) { + ++neg2; + } + tmp = p / dminus; + p = tmp * d__[j] - *sigma; + } + sawnan = disnan_(&p); + if (sawnan) { + neg2 = 0; + p = bsav; + i__3 = bj - 127; + i__2 = max(i__3,*r__); + for (j = bj; j >= i__2; --j) { + dminus = lld[j] + p; + if (dminus < 0.) { + ++neg2; + } + tmp = p / dminus; + if (disnan_(&tmp)) { + tmp = 1.; + } + p = tmp * d__[j] - *sigma; + } + } + negcnt += neg2; + } -} /* ztrexc_ */ -static /* Subroutine */ int zunmhr_ (Tcl_Interp *interp, char *side, char *trans, integer *m, integer *n, integer *ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *c__, integer *ldc, doublecomplex * work, integer *lwork, integer *info) + gamma = t + *sigma + p; + if (gamma < 0.) { + ++negcnt; + } + ret_val = negcnt; + return ret_val; +} /* dlaneg_ */ +static /* Subroutine */ int dormhr_ (Tcl_Interp *interp, char *side, char *trans, integer *m, integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal * tau, doublereal *c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2; @@ -60863,7 +63728,7 @@ static /* Subroutine */ int zunmhr_ (Tcl_Interp *interp, char *side, char *trans if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! lsame_(trans, "N") && ! lsame_(trans, - "C")) { + "T")) { *info = -2; } else if (*m < 0) { *info = -3; @@ -60885,21 +63750,21 @@ static /* Subroutine */ int zunmhr_ (Tcl_Interp *interp, char *side, char *trans if (left) { i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &zunmhr_c__2, (ftnlen)2); - nb = ilaenv_(&zunmhr_c__1, "ZUNMQR", ch__1, &nh, n, &nh, &zunmhr_c_n1); + s_cat(ch__1, a__1, i__1, &dormhr_c__2, (ftnlen)2); + nb = ilaenv_(&dormhr_c__1, "DORMQR", ch__1, &nh, n, &nh, &dormhr_c_n1); } else { i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; - s_cat(ch__1, a__1, i__1, &zunmhr_c__2, (ftnlen)2); - nb = ilaenv_(&zunmhr_c__1, "ZUNMQR", ch__1, m, &nh, &nh, &zunmhr_c_n1); + s_cat(ch__1, a__1, i__1, &dormhr_c__2, (ftnlen)2); + nb = ilaenv_(&dormhr_c__1, "DORMQR", ch__1, m, &nh, &nh, &dormhr_c_n1); } lwkopt = max(1,nw) * nb; - work[1].r = (doublereal) lwkopt, work[1].i = 0.; + work[1] = (doublereal) lwkopt; } if (*info != 0) { i__2 = -(*info); - vectcl_xerbla(interp, "ZUNMHR", &i__2); + vectcl_xerbla(interp, "DORMHR", &i__2); return TCL_ERROR; return TCL_OK; @@ -60909,7 +63774,7 @@ return TCL_OK; if (*m == 0 || *n == 0 || nh == 0) { - work[1].r = 1., work[1].i = 0.; + work[1] = 1.; return TCL_OK; } @@ -60925,39 +63790,37 @@ return TCL_OK; i2 = *ilo + 1; } - if (zunmqr_(interp, side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, & tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo)!=TCL_OK) { return TCL_ERROR; } + if (dormqr_(interp, side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, & tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo)!=TCL_OK) { return TCL_ERROR; } - work[1].r = (doublereal) lwkopt, work[1].i = 0.; + work[1] = (doublereal) lwkopt; return TCL_OK; -} /* zunmhr_ */ -static /* Subroutine */ int zlaqr2_ (Tcl_Interp *interp, logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublecomplex *h__, integer *ldh, integer *iloz, integer *ihiz, doublecomplex *z__, integer *ldz, integer *ns, integer *nd, doublecomplex *sh, doublecomplex *v, integer *ldv, integer *nh, doublecomplex *t, integer *ldt, integer *nv, doublecomplex *wv, integer *ldwv, doublecomplex *work, integer *lwork) +} /* dormhr_ */ +static /* Subroutine */ int dlaqr2_ (Tcl_Interp *interp, logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer * ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal * v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer * nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork) { integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2, d__3, d__4, d__5, d__6; - doublecomplex z__1, z__2; - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); + double sqrt(doublereal); - integer i__, j; - doublecomplex s; + integer i__, j, k; + doublereal s, aa, bb, cc, dd, cs, sn; integer jw; - doublereal foo; + doublereal evi, evk, foo; integer kln; - doublecomplex tau; - integer knt; - doublereal ulp; + doublereal tau, ulp; integer lwk1, lwk2; - doublecomplex beta; - integer kcol, info, ifst, ilst, ltop, krow; - integer infqr; - integer kwtop; - doublereal safmin, safmax; + doublereal beta; + integer kend, kcol, info, ifst, ilst, ltop, krow; + logical bulge; + integer infqr, kwtop; + doublereal safmin; + doublereal safmax; + logical sorted; doublereal smlnum; integer lwkopt; @@ -61002,7 +63865,8 @@ static /* Subroutine */ int zlaqr2_ (Tcl_Interp *interp, logical *wantt, logical z_dim1 = *ldz; z_offset = 1 + z_dim1; z__ -= z_offset; - --sh; + --sr; + --si; v_dim1 = *ldv; v_offset = 1 + v_dim1; v -= v_offset; @@ -61022,17 +63886,17 @@ static /* Subroutine */ int zlaqr2_ (Tcl_Interp *interp, logical *wantt, logical i__1 = jw - 1; - if (zgehrd_(interp, &jw, &zlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & zlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } + if (dgehrd_(interp, &jw, &dlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & dlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } - lwk1 = (integer) work[1].r; + lwk1 = (integer) work[1]; i__1 = jw - 1; - if (zunmhr_(interp, "R", "N", &jw, &jw, &zlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[1], &zlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } + if (dormhr_(interp, "R", "N", &jw, &jw, &dlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[1], &dlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } - lwk2 = (integer) work[1].r; + lwk2 = (integer) work[1]; lwkopt = jw + max(lwk1,lwk2); @@ -61040,15 +63904,13 @@ static /* Subroutine */ int zlaqr2_ (Tcl_Interp *interp, logical *wantt, logical if (*lwork == -1) { - d__1 = (doublereal) lwkopt; - z__1.r = d__1, z__1.i = 0.; - work[1].r = z__1.r, work[1].i = z__1.i; + work[1] = (doublereal) lwkopt; return TCL_OK; } *ns = 0; *nd = 0; - work[1].r = 1., work[1].i = 0.; + work[1] = 1.; if (*ktop > *kbot) { return TCL_OK; } @@ -61069,186 +63931,273 @@ return TCL_OK; jw = min(i__1,i__2); kwtop = *kbot - jw + 1; if (kwtop == *ktop) { - s.r = 0., s.i = 0.; + s = 0.; } else { - i__1 = kwtop + (kwtop - 1) * h_dim1; - s.r = h__[i__1].r, s.i = h__[i__1].i; + s = h__[kwtop + (kwtop - 1) * h_dim1]; } if (*kbot == kwtop) { - i__1 = kwtop; - i__2 = kwtop + kwtop * h_dim1; - sh[i__1].r = h__[i__2].r, sh[i__1].i = h__[i__2].i; + sr[kwtop] = h__[kwtop + kwtop * h_dim1]; + si[kwtop] = 0.; *ns = 1; *nd = 0; - i__1 = kwtop + kwtop * h_dim1; - d__5 = smlnum, d__6 = ulp * ((d__1 = h__[i__1].r, abs(d__1)) + (d__2 = - d_imag(&h__[kwtop + kwtop * h_dim1]), abs(d__2))); - if ((d__3 = s.r, abs(d__3)) + (d__4 = d_imag(&s), abs(d__4)) <= max( - d__5,d__6)) { + d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs( + d__1)); + if (abs(s) <= max(d__2,d__3)) { *ns = 0; *nd = 1; if (kwtop > *ktop) { - i__1 = kwtop + (kwtop - 1) * h_dim1; - h__[i__1].r = 0., h__[i__1].i = 0.; + h__[kwtop + (kwtop - 1) * h_dim1] = 0.; } } - work[1].r = 1., work[1].i = 0.; + work[1] = 1.; return TCL_OK; } - if (zlacpy_(interp, "U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt)!=TCL_OK) { return TCL_ERROR; } + if (dlacpy_(interp, "U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt)!=TCL_OK) { return TCL_ERROR; } i__1 = jw - 1; i__2 = *ldh + 1; i__3 = *ldt + 1; - if (zcopy_(interp, &i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3)!=TCL_OK) { return TCL_ERROR; } + if (dcopy_(interp, &i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3)!=TCL_OK) { return TCL_ERROR; } - if (zlaset_(interp, "A", &jw, &jw, &zlaqr2_c_b1, &zlaqr2_c_b2, &v[v_offset], ldv)!=TCL_OK) { return TCL_ERROR; } + if (dlaset_(interp, "A", &jw, &jw, &dlaqr2_c_b12, &dlaqr2_c_b13, &v[v_offset], ldv)!=TCL_OK) { return TCL_ERROR; } + + if (dlahqr_(interp, &dlaqr2_c_true, &dlaqr2_c_true, &jw, &dlaqr2_c__1, &jw, &t[t_offset], ldt, &sr[kwtop], &si[kwtop], &dlaqr2_c__1, &jw, &v[v_offset], ldv, &infqr)!=TCL_OK) { return TCL_ERROR; } - if (zlahqr_(interp, &zlaqr2_c_true, &zlaqr2_c_true, &jw, &zlaqr2_c__1, &jw, &t[t_offset], ldt, &sh[kwtop], &zlaqr2_c__1, &jw, &v[v_offset], ldv, &infqr)!=TCL_OK) { return TCL_ERROR; } + i__1 = jw - 3; + for (j = 1; j <= i__1; ++j) { + t[j + 2 + j * t_dim1] = 0.; + t[j + 3 + j * t_dim1] = 0.; + } + if (jw > 2) { + t[jw + (jw - 2) * t_dim1] = 0.; + } + *ns = jw; ilst = infqr + 1; - i__1 = jw; - for (knt = infqr + 1; knt <= i__1; ++knt) { +L20: + if (ilst <= *ns) { + if (*ns == 1) { + bulge = FALSE_; + } else { + bulge = t[*ns + (*ns - 1) * t_dim1] != 0.; + } - i__2 = *ns + *ns * t_dim1; - foo = (d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[*ns + *ns * - t_dim1]), abs(d__2)); - if (foo == 0.) { - foo = (d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2)); - } - i__2 = *ns * v_dim1 + 1; - d__5 = smlnum, d__6 = ulp * foo; - if (((d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2))) * (( - d__3 = v[i__2].r, abs(d__3)) + (d__4 = d_imag(&v[*ns * v_dim1 - + 1]), abs(d__4))) <= max(d__5,d__6)) { + if (! bulge) { - --(*ns); + foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1)); + if (foo == 0.) { + foo = abs(s); + } + d__2 = smlnum, d__3 = ulp * foo; + if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3)) + { + + + --(*ns); + } else { + + + ifst = *ns; + if (dtrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info)!=TCL_OK) { return TCL_ERROR; } + + + ++ilst; + } } else { - ifst = *ns; - if (ztrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, & ilst, &info)!=TCL_OK) { return TCL_ERROR; } + foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[* + ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[* + ns - 1 + *ns * t_dim1], abs(d__2))); + if (foo == 0.) { + foo = abs(s); + } + d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 = + s * v[(*ns - 1) * v_dim1 + 1], abs(d__2)); + d__5 = smlnum, d__6 = ulp * foo; + if (max(d__3,d__4) <= max(d__5,d__6)) { - ++ilst; + *ns += -2; + } else { + + + ifst = *ns; + if (dtrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info)!=TCL_OK) { return TCL_ERROR; } + + + ilst += 2; + } } + + + goto L20; } if (*ns == 0) { - s.r = 0., s.i = 0.; + s = 0.; } - if (*ns < jw) { + if (*ns < jw) { + + + sorted = FALSE_; + i__ = *ns + 1; +L30: + if (sorted) { + goto L50; + } + sorted = TRUE_; + + kend = i__ - 1; + i__ = infqr + 1; + if (i__ == *ns) { + k = i__ + 1; + } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { + k = i__ + 1; + } else { + k = i__ + 2; + } +L40: + if (k <= kend) { + if (k == i__ + 1) { + evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1)); + } else { + evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 = + t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 = + t[i__ + (i__ + 1) * t_dim1], abs(d__2))); + } + + if (k == kend) { + evk = (d__1 = t[k + k * t_dim1], abs(d__1)); + } else if (t[k + 1 + k * t_dim1] == 0.) { + evk = (d__1 = t[k + k * t_dim1], abs(d__1)); + } else { + evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[ + k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k + + (k + 1) * t_dim1], abs(d__2))); + } + + if (evi >= evk) { + i__ = k; + } else { + sorted = FALSE_; + ifst = i__; + ilst = k; + if (dtrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info)!=TCL_OK) { return TCL_ERROR; } - i__1 = *ns; - for (i__ = infqr + 1; i__ <= i__1; ++i__) { - ifst = i__; - i__2 = *ns; - for (j = i__ + 1; j <= i__2; ++j) { - i__3 = j + j * t_dim1; - i__4 = ifst + ifst * t_dim1; - if ((d__1 = t[i__3].r, abs(d__1)) + (d__2 = d_imag(&t[j + j * - t_dim1]), abs(d__2)) > (d__3 = t[i__4].r, abs(d__3)) - + (d__4 = d_imag(&t[ifst + ifst * t_dim1]), abs(d__4)) - ) { - ifst = j; + if (info == 0) { + i__ = ilst; + } else { + i__ = k; } } - ilst = i__; - if (ifst != ilst) { - if (ztrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &info)!=TCL_OK) { return TCL_ERROR; } - - + if (i__ == kend) { + k = i__ + 1; + } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { + k = i__ + 1; + } else { + k = i__ + 2; } + goto L40; } + goto L30; +L50: + ; } - i__1 = jw; - for (i__ = infqr + 1; i__ <= i__1; ++i__) { - i__2 = kwtop + i__ - 1; - i__3 = i__ + i__ * t_dim1; - sh[i__2].r = t[i__3].r, sh[i__2].i = t[i__3].i; - } + i__ = jw; +L60: + if (i__ >= infqr + 1) { + if (i__ == infqr + 1) { + sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; + si[kwtop + i__ - 1] = 0.; + --i__; + } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) { + sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; + si[kwtop + i__ - 1] = 0.; + --i__; + } else { + aa = t[i__ - 1 + (i__ - 1) * t_dim1]; + cc = t[i__ + (i__ - 1) * t_dim1]; + bb = t[i__ - 1 + i__ * t_dim1]; + dd = t[i__ + i__ * t_dim1]; + if (dlanv2_(interp, &aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, & + sn)!=TCL_OK) { return TCL_ERROR; } - if (*ns < jw || s.r == 0. && s.i == 0.) { - if (*ns > 1 && (s.r != 0. || s.i != 0.)) { + i__ += -2; + } + goto L60; + } + if (*ns < jw || s == 0.) { + if (*ns > 1 && s != 0.) { - if (zcopy_(interp, ns, &v[v_offset], ldv, &work[1], &zlaqr2_c__1)!=TCL_OK) { return TCL_ERROR; } - i__1 = *ns; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - d_cnjg(&z__1, &work[i__]); - work[i__2].r = z__1.r, work[i__2].i = z__1.i; - } - beta.r = work[1].r, beta.i = work[1].i; - if (zlarfg_(interp, ns, &beta, &work[2], &zlaqr2_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } + if (dcopy_(interp, ns, &v[v_offset], ldv, &work[1], &dlaqr2_c__1)!=TCL_OK) { return TCL_ERROR; } - work[1].r = 1., work[1].i = 0.; + beta = work[1]; + if (dlarfg_(interp, ns, &beta, &work[2], &dlaqr2_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } + + work[1] = 1.; i__1 = jw - 2; i__2 = jw - 2; - if (zlaset_(interp, "L", &i__1, &i__2, &zlaqr2_c_b1, &zlaqr2_c_b1, &t[t_dim1 + 3], ldt)!=TCL_OK) { return TCL_ERROR; } + if (dlaset_(interp, "L", &i__1, &i__2, &dlaqr2_c_b12, &dlaqr2_c_b12, &t[t_dim1 + 3], ldt)!=TCL_OK) { return TCL_ERROR; } - d_cnjg(&z__1, &tau); - if (zlarf_(interp, "L", ns, &jw, &work[1], &zlaqr2_c__1, &z__1, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } + if (dlarf_(interp, "L", ns, &jw, &work[1], &dlaqr2_c__1, &tau, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } - if (zlarf_(interp, "R", ns, ns, &work[1], &zlaqr2_c__1, &tau, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } + if (dlarf_(interp, "R", ns, ns, &work[1], &dlaqr2_c__1, &tau, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } - if (zlarf_(interp, "R", &jw, ns, &work[1], &zlaqr2_c__1, &tau, &v[v_offset], ldv, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } + if (dlarf_(interp, "R", &jw, ns, &work[1], &dlaqr2_c__1, &tau, &v[v_offset], ldv, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } i__1 = *lwork - jw; - if (zgehrd_(interp, &jw, &zlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } + if (dgehrd_(interp, &jw, &dlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } } if (kwtop > 1) { - i__1 = kwtop + (kwtop - 1) * h_dim1; - d_cnjg(&z__2, &v[v_dim1 + 1]); - z__1.r = s.r * z__2.r - s.i * z__2.i, z__1.i = s.r * z__2.i + s.i - * z__2.r; - h__[i__1].r = z__1.r, h__[i__1].i = z__1.i; + h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1]; } - if (zlacpy_(interp, "U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } + if (dlacpy_(interp, "U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } i__1 = jw - 1; i__2 = *ldt + 1; i__3 = *ldh + 1; - if (zcopy_(interp, &i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3)!=TCL_OK) { return TCL_ERROR; } + if (dcopy_(interp, &i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3)!=TCL_OK) { return TCL_ERROR; } - if (*ns > 1 && (s.r != 0. || s.i != 0.)) { + if (*ns > 1 && s != 0.) { i__1 = *lwork - jw; - if (zunmhr_(interp, "R", "N", &jw, ns, &zlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } + if (dormhr_(interp, "R", "N", &jw, ns, &dlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } } @@ -61265,11 +64214,11 @@ return TCL_OK; i__2) { i__3 = *nv, i__4 = kwtop - krow; kln = min(i__3,i__4); - if (zgemm_(interp, "N", "N", &kln, &jw, &jw, &zlaqr2_c_b2, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &zlaqr2_c_b1, &wv[wv_offset], + if (dgemm_(interp, "N", "N", &kln, &jw, &jw, &dlaqr2_c_b13, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &dlaqr2_c_b12, &wv[wv_offset], ldwv)!=TCL_OK) { return TCL_ERROR; } - if (zlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } + if (dlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } } @@ -61282,11 +64231,11 @@ return TCL_OK; kcol += i__1) { i__3 = *nh, i__4 = *n - kcol + 1; kln = min(i__3,i__4); - if (zgemm_(interp, "C", "N", &jw, &kln, &jw, &zlaqr2_c_b2, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &zlaqr2_c_b1, &t[t_offset], - ldt)!=TCL_OK) { return TCL_ERROR; } + if (dgemm_(interp, "C", "N", &jw, &kln, &jw, &dlaqr2_c_b13, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &dlaqr2_c_b12, &t[t_offset], + ldt)!=TCL_OK) { return TCL_ERROR; } - if (zlacpy_(interp, "A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } + if (dlacpy_(interp, "A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } } @@ -61300,11 +64249,11 @@ return TCL_OK; i__2) { i__3 = *nv, i__4 = *ihiz - krow + 1; kln = min(i__3,i__4); - if (zgemm_(interp, "N", "N", &kln, &jw, &jw, &zlaqr2_c_b2, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &zlaqr2_c_b1, &wv[wv_offset] -, ldwv)!=TCL_OK) { return TCL_ERROR; } + if (dgemm_(interp, "N", "N", &kln, &jw, &jw, &dlaqr2_c_b13, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &dlaqr2_c_b12, &wv[ + wv_offset], ldwv)!=TCL_OK) { return TCL_ERROR; } - if (zlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz)!=TCL_OK) { return TCL_ERROR; } + if (dlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz)!=TCL_OK) { return TCL_ERROR; } } @@ -61318,23 +64267,18 @@ return TCL_OK; *ns -= infqr; - d__1 = (doublereal) lwkopt; - z__1.r = d__1, z__1.i = 0.; - work[1].r = z__1.r, work[1].i = z__1.i; + work[1] = (doublereal) lwkopt; return TCL_OK; -} /* zlaqr2_ */ -static /* Subroutine */ int zlaqr1_ (Tcl_Interp *interp, integer *n, doublecomplex *h__, integer *ldh, doublecomplex *s1, doublecomplex *s2, doublecomplex *v) +} /* dlaqr2_ */ +static /* Subroutine */ int dlaqr1_ (Tcl_Interp *interp, integer *n, doublereal *h__, integer *ldh, doublereal *sr1, doublereal *si1, doublereal *sr2, doublereal *si2, doublereal *v) { - integer h_dim1, h_offset, i__1, i__2, i__3, i__4; - doublereal d__1, d__2, d__3, d__4, d__5, d__6; - doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8; + integer h_dim1, h_offset; + doublereal d__1, d__2, d__3; - double d_imag(doublecomplex *); + doublereal s, h21s, h31s; - doublereal s; - doublecomplex h21s, h31s; @@ -61357,140 +64301,53 @@ static /* Subroutine */ int zlaqr1_ (Tcl_Interp *interp, integer *n, doublecompl --v; if (*n == 2) { - i__1 = h_dim1 + 1; - z__2.r = h__[i__1].r - s2->r, z__2.i = h__[i__1].i - s2->i; - z__1.r = z__2.r, z__1.i = z__2.i; - i__2 = h_dim1 + 2; - s = (d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1), abs(d__2)) + ( - (d__3 = h__[i__2].r, abs(d__3)) + (d__4 = d_imag(&h__[h_dim1 - + 2]), abs(d__4))); + s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = + h__[h_dim1 + 2], abs(d__2)); if (s == 0.) { - v[1].r = 0., v[1].i = 0.; - v[2].r = 0., v[2].i = 0.; + v[1] = 0.; + v[2] = 0.; } else { - i__1 = h_dim1 + 2; - z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s; - h21s.r = z__1.r, h21s.i = z__1.i; - i__1 = (h_dim1 << 1) + 1; - z__2.r = h21s.r * h__[i__1].r - h21s.i * h__[i__1].i, z__2.i = - h21s.r * h__[i__1].i + h21s.i * h__[i__1].r; - i__2 = h_dim1 + 1; - z__4.r = h__[i__2].r - s1->r, z__4.i = h__[i__2].i - s1->i; - i__3 = h_dim1 + 1; - z__6.r = h__[i__3].r - s2->r, z__6.i = h__[i__3].i - s2->i; - z__5.r = z__6.r / s, z__5.i = z__6.i / s; - z__3.r = z__4.r * z__5.r - z__4.i * z__5.i, z__3.i = z__4.r * - z__5.i + z__4.i * z__5.r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - v[1].r = z__1.r, v[1].i = z__1.i; - i__1 = h_dim1 + 1; - i__2 = (h_dim1 << 1) + 2; - z__4.r = h__[i__1].r + h__[i__2].r, z__4.i = h__[i__1].i + h__[ - i__2].i; - z__3.r = z__4.r - s1->r, z__3.i = z__4.i - s1->i; - z__2.r = z__3.r - s2->r, z__2.i = z__3.i - s2->i; - z__1.r = h21s.r * z__2.r - h21s.i * z__2.i, z__1.i = h21s.r * - z__2.i + h21s.i * z__2.r; - v[2].r = z__1.r, v[2].i = z__1.i; + h21s = h__[h_dim1 + 2] / s; + v[1] = h21s * h__[(h_dim1 << 1) + 1] + (h__[h_dim1 + 1] - *sr1) * + ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s); + v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * + sr2); } } else { - i__1 = h_dim1 + 1; - z__2.r = h__[i__1].r - s2->r, z__2.i = h__[i__1].i - s2->i; - z__1.r = z__2.r, z__1.i = z__2.i; - i__2 = h_dim1 + 2; - i__3 = h_dim1 + 3; - s = (d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1), abs(d__2)) + ( - (d__3 = h__[i__2].r, abs(d__3)) + (d__4 = d_imag(&h__[h_dim1 - + 2]), abs(d__4))) + ((d__5 = h__[i__3].r, abs(d__5)) + (d__6 - = d_imag(&h__[h_dim1 + 3]), abs(d__6))); + s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = + h__[h_dim1 + 2], abs(d__2)) + (d__3 = h__[h_dim1 + 3], abs( + d__3)); if (s == 0.) { - v[1].r = 0., v[1].i = 0.; - v[2].r = 0., v[2].i = 0.; - v[3].r = 0., v[3].i = 0.; + v[1] = 0.; + v[2] = 0.; + v[3] = 0.; } else { - i__1 = h_dim1 + 2; - z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s; - h21s.r = z__1.r, h21s.i = z__1.i; - i__1 = h_dim1 + 3; - z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s; - h31s.r = z__1.r, h31s.i = z__1.i; - i__1 = h_dim1 + 1; - z__4.r = h__[i__1].r - s1->r, z__4.i = h__[i__1].i - s1->i; - i__2 = h_dim1 + 1; - z__6.r = h__[i__2].r - s2->r, z__6.i = h__[i__2].i - s2->i; - z__5.r = z__6.r / s, z__5.i = z__6.i / s; - z__3.r = z__4.r * z__5.r - z__4.i * z__5.i, z__3.i = z__4.r * - z__5.i + z__4.i * z__5.r; - i__3 = (h_dim1 << 1) + 1; - z__7.r = h__[i__3].r * h21s.r - h__[i__3].i * h21s.i, z__7.i = - h__[i__3].r * h21s.i + h__[i__3].i * h21s.r; - z__2.r = z__3.r + z__7.r, z__2.i = z__3.i + z__7.i; - i__4 = h_dim1 * 3 + 1; - z__8.r = h__[i__4].r * h31s.r - h__[i__4].i * h31s.i, z__8.i = - h__[i__4].r * h31s.i + h__[i__4].i * h31s.r; - z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i; - v[1].r = z__1.r, v[1].i = z__1.i; - i__1 = h_dim1 + 1; - i__2 = (h_dim1 << 1) + 2; - z__5.r = h__[i__1].r + h__[i__2].r, z__5.i = h__[i__1].i + h__[ - i__2].i; - z__4.r = z__5.r - s1->r, z__4.i = z__5.i - s1->i; - z__3.r = z__4.r - s2->r, z__3.i = z__4.i - s2->i; - z__2.r = h21s.r * z__3.r - h21s.i * z__3.i, z__2.i = h21s.r * - z__3.i + h21s.i * z__3.r; - i__3 = h_dim1 * 3 + 2; - z__6.r = h__[i__3].r * h31s.r - h__[i__3].i * h31s.i, z__6.i = - h__[i__3].r * h31s.i + h__[i__3].i * h31s.r; - z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i; - v[2].r = z__1.r, v[2].i = z__1.i; - i__1 = h_dim1 + 1; - i__2 = h_dim1 * 3 + 3; - z__5.r = h__[i__1].r + h__[i__2].r, z__5.i = h__[i__1].i + h__[ - i__2].i; - z__4.r = z__5.r - s1->r, z__4.i = z__5.i - s1->i; - z__3.r = z__4.r - s2->r, z__3.i = z__4.i - s2->i; - z__2.r = h31s.r * z__3.r - h31s.i * z__3.i, z__2.i = h31s.r * - z__3.i + h31s.i * z__3.r; - i__3 = (h_dim1 << 1) + 3; - z__6.r = h21s.r * h__[i__3].r - h21s.i * h__[i__3].i, z__6.i = - h21s.r * h__[i__3].i + h21s.i * h__[i__3].r; - z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i; - v[3].r = z__1.r, v[3].i = z__1.i; + h21s = h__[h_dim1 + 2] / s; + h31s = h__[h_dim1 + 3] / s; + v[1] = (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s) + - *si1 * (*si2 / s) + h__[(h_dim1 << 1) + 1] * h21s + h__[ + h_dim1 * 3 + 1] * h31s; + v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * + sr2) + h__[h_dim1 * 3 + 2] * h31s; + v[3] = h31s * (h__[h_dim1 + 1] + h__[h_dim1 * 3 + 3] - *sr1 - * + sr2) + h21s * h__[(h_dim1 << 1) + 3]; } } return TCL_OK; -} /* zlaqr1_ */ -static /* Subroutine */ int dlasd4_ (Tcl_Interp *interp, integer *n, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal * sigma, doublereal *work, integer *info) +} /* dlaqr1_ */ +static /* Subroutine */ int zunmhr_ (Tcl_Interp *interp, char *side, char *trans, integer *m, integer *n, integer *ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *c__, integer *ldc, doublecomplex * work, integer *lwork, integer *info) { - integer i__1; - doublereal d__1; - - double sqrt(doublereal); - - doublereal a, b, c__; - integer j; - doublereal w, dd[3]; - integer ii; - doublereal dw, zz[3]; - integer ip1; - doublereal eta, phi, eps, tau, psi; - integer iim1, iip1; - doublereal dphi, dpsi; - integer iter; - doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq, dtiip; - integer niter; - doublereal dtisq; - logical swtch; - doublereal dtnsq; - doublereal delsq2, dtnsq1; - logical swtch3; - logical orgati; - doublereal erretm, dtipsq, rhoinv; - - - + address a__1[2]; + integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2; + char ch__1[2]; + /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); + integer i1, i2, nb, mi, nh, ni, nq, nw; + logical left; + integer iinfo; + integer lwkopt; + logical lquery; @@ -61518,1053 +64375,646 @@ static /* Subroutine */ int dlasd4_ (Tcl_Interp *interp, integer *n, integer *i_ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --tau; + c_dim1 = *ldc; + c_offset = 1 + c_dim1; + c__ -= c_offset; --work; - --delta; - --z__; - --d__; *info = 0; - if (*n == 1) { + nh = *ihi - *ilo; + left = lsame_(side, "L"); + lquery = *lwork == -1; - *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]); - delta[1] = 1.; - work[1] = 1.; -return TCL_OK; + if (left) { + nq = *m; + nw = *n; + } else { + nq = *n; + nw = *m; } - if (*n == 2) { - if (dlasd5_(interp, i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1])!=TCL_OK) { return TCL_ERROR; } - -return TCL_OK; + if (! left && ! lsame_(side, "R")) { + *info = -1; + } else if (! lsame_(trans, "N") && ! lsame_(trans, + "C")) { + *info = -2; + } else if (*m < 0) { + *info = -3; + } else if (*n < 0) { + *info = -4; + } else if (*ilo < 1 || *ilo > max(1,nq)) { + *info = -5; + } else if (*ihi < min(*ilo,nq) || *ihi > nq) { + *info = -6; + } else if (*lda < max(1,nq)) { + *info = -8; + } else if (*ldc < max(1,*m)) { + *info = -11; + } else if (*lwork < max(1,nw) && ! lquery) { + *info = -13; } - - eps = dlamch_("Epsilon"); - rhoinv = 1. / *rho; - - - if (*i__ == *n) { - - - ii = *n - 1; - niter = 1; - - - temp = *rho / 2.; - - - temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp)); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[*n] + temp1; - delta[j] = d__[j] - d__[*n] - temp1; - } - - psi = 0.; - i__1 = *n - 2; - for (j = 1; j <= i__1; ++j) { - psi += z__[j] * z__[j] / (delta[j] * work[j]); - } - - c__ = rhoinv + psi; - w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[* - n] / (delta[*n] * work[*n]); - - if (w <= 0.) { - temp1 = sqrt(d__[*n] * d__[*n] + *rho); - temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[* - n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] * - z__[*n] / *rho; - - - if (c__ <= temp) { - tau = *rho; - } else { - delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); - a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[* - n]; - b = z__[*n] * z__[*n] * delsq; - if (a < 0.) { - tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); - } else { - tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); - } - } - - - } else { - delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); - a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; - b = z__[*n] * z__[*n] * delsq; - - - if (a < 0.) { - tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); - } else { - tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); - } - - - } - - - eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau)); - - *sigma = d__[*n] + eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] = d__[j] - d__[*i__] - eta; - work[j] = d__[j] + d__[*i__] + eta; - } - - - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (delta[j] * work[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; - } - erretm = abs(erretm); - - - temp = z__[*n] / (delta[*n] * work[*n]); - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi - + dphi); - - w = rhoinv + phi + psi; - - - if (abs(w) <= eps * erretm) { - goto L240; - } - - - ++niter; - dtnsq1 = work[*n - 1] * delta[*n - 1]; - dtnsq = work[*n] * delta[*n]; - c__ = w - dtnsq1 * dpsi - dtnsq * dphi; - a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi); - b = dtnsq * dtnsq1 * w; - if (c__ < 0.) { - c__ = abs(c__); - } - if (c__ == 0.) { - eta = *rho - *sigma * *sigma; - } else if (a >= 0.) { - eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ - * 2.); + if (*info == 0) { + if (left) { + i__1[0] = 1, a__1[0] = side; + i__1[1] = 1, a__1[1] = trans; + s_cat(ch__1, a__1, i__1, &zunmhr_c__2, (ftnlen)2); + nb = ilaenv_(&zunmhr_c__1, "ZUNMQR", ch__1, &nh, n, &nh, &zunmhr_c_n1); } else { - eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) - ); + i__1[0] = 1, a__1[0] = side; + i__1[1] = 1, a__1[1] = trans; + s_cat(ch__1, a__1, i__1, &zunmhr_c__2, (ftnlen)2); + nb = ilaenv_(&zunmhr_c__1, "ZUNMQR", ch__1, m, &nh, &nh, &zunmhr_c_n1); } + lwkopt = max(1,nw) * nb; + work[1].r = (doublereal) lwkopt, work[1].i = 0.; + } + if (*info != 0) { + i__2 = -(*info); + vectcl_xerbla(interp, "ZUNMHR", &i__2); +return TCL_ERROR; - if (w * eta > 0.) { - eta = -w / (dpsi + dphi); - } - temp = eta - dtnsq; - if (temp > *rho) { - eta = *rho + dtnsq; - } - - tau += eta; - eta /= *sigma + sqrt(eta + *sigma * *sigma); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; - work[j] += eta; - } +return TCL_OK; + } else if (lquery) { +return TCL_OK; + } - *sigma += eta; + if (*m == 0 || *n == 0 || nh == 0) { + work[1].r = 1., work[1].i = 0.; +return TCL_OK; + } - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; - } - erretm = abs(erretm); + if (left) { + mi = nh; + ni = *n; + i1 = *ilo + 1; + i2 = 1; + } else { + mi = *m; + ni = nh; + i1 = 1; + i2 = *ilo + 1; + } + if (zunmqr_(interp, side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, & tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo)!=TCL_OK) { return TCL_ERROR; } - temp = z__[*n] / (work[*n] * delta[*n]); - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi - + dphi); - w = rhoinv + phi + psi; + work[1].r = (doublereal) lwkopt, work[1].i = 0.; +return TCL_OK; - iter = niter + 1; - for (niter = iter; niter <= 20; ++niter) { +} /* zunmhr_ */ +static /* Subroutine */ int zlaqr2_ (Tcl_Interp *interp, logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublecomplex *h__, integer *ldh, integer *iloz, integer *ihiz, doublecomplex *z__, integer *ldz, integer *ns, integer *nd, doublecomplex *sh, doublecomplex *v, integer *ldv, integer *nh, doublecomplex *t, integer *ldt, integer *nv, doublecomplex *wv, integer *ldwv, doublecomplex *work, integer *lwork) +{ + integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, + wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; + doublereal d__1, d__2, d__3, d__4, d__5, d__6; + doublecomplex z__1, z__2; + double d_imag(doublecomplex *); + void d_cnjg(doublecomplex *, doublecomplex *); - if (abs(w) <= eps * erretm) { - goto L240; - } + integer i__, j; + doublecomplex s; + integer jw; + doublereal foo; + integer kln; + doublecomplex tau; + integer knt; + doublereal ulp; + integer lwk1, lwk2; + doublecomplex beta; + integer kcol, info, ifst, ilst, ltop, krow; + integer infqr; + integer kwtop; + doublereal safmin, safmax; + doublereal smlnum; + integer lwkopt; - dtnsq1 = work[*n - 1] * delta[*n - 1]; - dtnsq = work[*n] * delta[*n]; - c__ = w - dtnsq1 * dpsi - dtnsq * dphi; - a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi); - b = dtnsq1 * dtnsq * w; - if (a >= 0.) { - eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } - if (w * eta > 0.) { - eta = -w / (dpsi + dphi); - } - temp = eta - dtnsq; - if (temp <= 0.) { - eta /= 2.; - } - tau += eta; - eta /= *sigma + sqrt(eta + *sigma * *sigma); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - delta[j] -= eta; - work[j] += eta; - } - *sigma += eta; - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = ii; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; - } - erretm = abs(erretm); - temp = z__[*n] / (work[*n] * delta[*n]); - phi = z__[*n] * temp; - dphi = temp * temp; - erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( - dpsi + dphi); - w = rhoinv + phi + psi; - } - *info = 1; - goto L240; - } else { - niter = 1; - ip1 = *i__ + 1; - delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]); - delsq2 = delsq / 2.; - temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2)); - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[*i__] + temp; - delta[j] = d__[j] - d__[*i__] - temp; - } - psi = 0.; - i__1 = *i__ - 1; - for (j = 1; j <= i__1; ++j) { - psi += z__[j] * z__[j] / (work[j] * delta[j]); - } - phi = 0.; - i__1 = *i__ + 2; - for (j = *n; j >= i__1; --j) { - phi += z__[j] * z__[j] / (work[j] * delta[j]); - } - c__ = rhoinv + psi + phi; - w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[ - ip1] * z__[ip1] / (work[ip1] * delta[ip1]); - if (w > 0.) { - orgati = TRUE_; - sg2lb = 0.; - sg2ub = delsq2; - a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; - b = z__[*i__] * z__[*i__] * delsq; - if (a > 0.) { - tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } else { - tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } - eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau)); - } else { - orgati = FALSE_; - sg2lb = -delsq2; - sg2ub = 0.; - a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; - b = z__[ip1] * z__[ip1] * delsq; - if (a < 0.) { - tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( - d__1)))); - } else { - tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / - (c__ * 2.); - } - eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau, - abs(d__1)))); - } - if (orgati) { - ii = *i__; - *sigma = d__[*i__] + eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[*i__] + eta; - delta[j] = d__[j] - d__[*i__] - eta; - } - } else { - ii = *i__ + 1; - *sigma = d__[ip1] + eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] = d__[j] + d__[ip1] + eta; - delta[j] = d__[j] - d__[ip1] - eta; - } - } - iim1 = ii - 1; - iip1 = ii + 1; - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; - } - erretm = abs(erretm); + h_dim1 = *ldh; + h_offset = 1 + h_dim1; + h__ -= h_offset; + z_dim1 = *ldz; + z_offset = 1 + z_dim1; + z__ -= z_offset; + --sh; + v_dim1 = *ldv; + v_offset = 1 + v_dim1; + v -= v_offset; + t_dim1 = *ldt; + t_offset = 1 + t_dim1; + t -= t_offset; + wv_dim1 = *ldwv; + wv_offset = 1 + wv_dim1; + wv -= wv_offset; + --work; - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / (work[j] * delta[j]); - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; - } + i__1 = *nw, i__2 = *kbot - *ktop + 1; + jw = min(i__1,i__2); + if (jw <= 2) { + lwkopt = 1; + } else { - w = rhoinv + phi + psi; + i__1 = jw - 1; + if (zgehrd_(interp, &jw, &zlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & zlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } - swtch3 = FALSE_; - if (orgati) { - if (w < 0.) { - swtch3 = TRUE_; - } - } else { - if (w > 0.) { - swtch3 = TRUE_; - } - } - if (ii == 1 || ii == *n) { - swtch3 = FALSE_; - } - temp = z__[ii] / (work[ii] * delta[ii]); - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w += temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + - abs(tau) * dw; + lwk1 = (integer) work[1].r; - if (abs(w) <= eps * erretm) { - goto L240; - } + i__1 = jw - 1; + if (zunmhr_(interp, "R", "N", &jw, &jw, &zlaqr2_c__1, &i__1, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[1], &zlaqr2_c_n1, &info)!=TCL_OK) { return TCL_ERROR; } - if (w <= 0.) { - sg2lb = max(sg2lb,tau); - } else { - sg2ub = min(sg2ub,tau); - } + lwk2 = (integer) work[1].r; - ++niter; - if (! swtch3) { - dtipsq = work[ip1] * delta[ip1]; - dtisq = work[*i__] * delta[*i__]; - if (orgati) { - d__1 = z__[*i__] / dtisq; - c__ = w - dtipsq * dw + delsq * (d__1 * d__1); - } else { - d__1 = z__[ip1] / dtipsq; - c__ = w - dtisq * dw - delsq * (d__1 * d__1); - } - a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; - b = dtipsq * dtisq * w; - if (c__ == 0.) { - if (a == 0.) { - if (orgati) { - a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi + - dphi); - } else { - a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi + - dphi); - } - } - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( - d__1)))); - } - } else { + lwkopt = jw + max(lwk1,lwk2); + } - dtiim = work[iim1] * delta[iim1]; - dtiip = work[iip1] * delta[iip1]; - temp = rhoinv + psi + phi; - if (orgati) { - temp1 = z__[iim1] / dtiim; - temp1 *= temp1; - c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) * - (d__[iim1] + d__[iip1]) * temp1; - zz[0] = z__[iim1] * z__[iim1]; - if (dpsi < temp1) { - zz[2] = dtiip * dtiip * dphi; - } else { - zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); - } - } else { - temp1 = z__[iip1] / dtiip; - temp1 *= temp1; - c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) * - (d__[iim1] + d__[iip1]) * temp1; - if (dphi < temp1) { - zz[0] = dtiim * dtiim * dpsi; - } else { - zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); - } - zz[2] = z__[iip1] * z__[iip1]; - } - zz[1] = z__[ii] * z__[ii]; - dd[0] = dtiim; - dd[1] = delta[ii] * work[ii]; - dd[2] = dtiip; - if (dlaed6_(interp, &niter, &orgati, &c__, dd, zz, &w, &eta, info)!=TCL_OK) { return TCL_ERROR; } - if (*info != 0) { - goto L240; - } - } + if (*lwork == -1) { + d__1 = (doublereal) lwkopt; + z__1.r = d__1, z__1.i = 0.; + work[1].r = z__1.r, work[1].i = z__1.i; +return TCL_OK; + } + *ns = 0; + *nd = 0; + work[1].r = 1., work[1].i = 0.; + if (*ktop > *kbot) { +return TCL_OK; + } + if (*nw < 1) { +return TCL_OK; + } - if (w * eta >= 0.) { - eta = -w / dw; - } - if (orgati) { - temp1 = work[*i__] * delta[*i__]; - temp = eta - temp1; - } else { - temp1 = work[ip1] * delta[ip1]; - temp = eta - temp1; - } - if (temp > sg2ub || temp < sg2lb) { - if (w < 0.) { - eta = (sg2ub - tau) / 2.; - } else { - eta = (sg2lb - tau) / 2.; - } - } - tau += eta; - eta /= *sigma + sqrt(*sigma * *sigma + eta); + safmin = dlamch_("SAFE MINIMUM"); + safmax = 1. / safmin; + if (dlabad_(interp, &safmin, &safmax)!=TCL_OK) { return TCL_ERROR; } - prew = w; + ulp = dlamch_("PRECISION"); + smlnum = safmin * ((doublereal) (*n) / ulp); - *sigma += eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] += eta; - delta[j] -= eta; - } + i__1 = *nw, i__2 = *kbot - *ktop + 1; + jw = min(i__1,i__2); + kwtop = *kbot - jw + 1; + if (kwtop == *ktop) { + s.r = 0., s.i = 0.; + } else { + i__1 = kwtop + (kwtop - 1) * h_dim1; + s.r = h__[i__1].r, s.i = h__[i__1].i; + } + + if (*kbot == kwtop) { - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; + + i__1 = kwtop; + i__2 = kwtop + kwtop * h_dim1; + sh[i__1].r = h__[i__2].r, sh[i__1].i = h__[i__2].i; + *ns = 1; + *nd = 0; + i__1 = kwtop + kwtop * h_dim1; + d__5 = smlnum, d__6 = ulp * ((d__1 = h__[i__1].r, abs(d__1)) + (d__2 = + d_imag(&h__[kwtop + kwtop * h_dim1]), abs(d__2))); + if ((d__3 = s.r, abs(d__3)) + (d__4 = d_imag(&s), abs(d__4)) <= max( + d__5,d__6)) { + *ns = 0; + *nd = 1; + if (kwtop > *ktop) { + i__1 = kwtop + (kwtop - 1) * h_dim1; + h__[i__1].r = 0., h__[i__1].i = 0.; + } } - erretm = abs(erretm); + work[1].r = 1., work[1].i = 0.; +return TCL_OK; + } - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / (work[j] * delta[j]); - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; - } + if (zlacpy_(interp, "U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt)!=TCL_OK) { return TCL_ERROR; } - temp = z__[ii] / (work[ii] * delta[ii]); - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w = rhoinv + phi + psi + temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + - abs(tau) * dw; - if (w <= 0.) { - sg2lb = max(sg2lb,tau); - } else { - sg2ub = min(sg2ub,tau); - } + i__1 = jw - 1; + i__2 = *ldh + 1; + i__3 = *ldt + 1; + if (zcopy_(interp, &i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3)!=TCL_OK) { return TCL_ERROR; } - swtch = FALSE_; - if (orgati) { - if (-w > abs(prew) / 10.) { - swtch = TRUE_; - } - } else { - if (w > abs(prew) / 10.) { - swtch = TRUE_; - } - } - iter = niter + 1; + if (zlaset_(interp, "A", &jw, &jw, &zlaqr2_c_b1, &zlaqr2_c_b2, &v[v_offset], ldv)!=TCL_OK) { return TCL_ERROR; } - for (niter = iter; niter <= 20; ++niter) { + if (zlahqr_(interp, &zlaqr2_c_true, &zlaqr2_c_true, &jw, &zlaqr2_c__1, &jw, &t[t_offset], ldt, &sh[kwtop], &zlaqr2_c__1, &jw, &v[v_offset], ldv, &infqr)!=TCL_OK) { return TCL_ERROR; } - if (abs(w) <= eps * erretm) { - goto L240; - } - if (! swtch3) { - dtipsq = work[ip1] * delta[ip1]; - dtisq = work[*i__] * delta[*i__]; - if (! swtch) { - if (orgati) { - d__1 = z__[*i__] / dtisq; - c__ = w - dtipsq * dw + delsq * (d__1 * d__1); - } else { - d__1 = z__[ip1] / dtipsq; - c__ = w - dtisq * dw - delsq * (d__1 * d__1); - } - } else { - temp = z__[ii] / (work[ii] * delta[ii]); - if (orgati) { - dpsi += temp * temp; - } else { - dphi += temp * temp; - } - c__ = w - dtisq * dpsi - dtipsq * dphi; - } - a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; - b = dtipsq * dtisq * w; - if (c__ == 0.) { - if (a == 0.) { - if (! swtch) { - if (orgati) { - a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * - (dpsi + dphi); - } else { - a = z__[ip1] * z__[ip1] + dtisq * dtisq * ( - dpsi + dphi); - } - } else { - a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi; - } - } - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) - / (c__ * 2.); - } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, - abs(d__1)))); - } - } else { + *ns = jw; + ilst = infqr + 1; + i__1 = jw; + for (knt = infqr + 1; knt <= i__1; ++knt) { - dtiim = work[iim1] * delta[iim1]; - dtiip = work[iip1] * delta[iip1]; - temp = rhoinv + psi + phi; - if (swtch) { - c__ = temp - dtiim * dpsi - dtiip * dphi; - zz[0] = dtiim * dtiim * dpsi; - zz[2] = dtiip * dtiip * dphi; - } else { - if (orgati) { - temp1 = z__[iim1] / dtiim; - temp1 *= temp1; - temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[ - iip1]) * temp1; - c__ = temp - dtiip * (dpsi + dphi) - temp2; - zz[0] = z__[iim1] * z__[iim1]; - if (dpsi < temp1) { - zz[2] = dtiip * dtiip * dphi; - } else { - zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); - } - } else { - temp1 = z__[iip1] / dtiip; - temp1 *= temp1; - temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[ - iip1]) * temp1; - c__ = temp - dtiim * (dpsi + dphi) - temp2; - if (dphi < temp1) { - zz[0] = dtiim * dtiim * dpsi; - } else { - zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); - } - zz[2] = z__[iip1] * z__[iip1]; - } - } - dd[0] = dtiim; - dd[1] = delta[ii] * work[ii]; - dd[2] = dtiip; - if (dlaed6_(interp, &niter, &orgati, &c__, dd, zz, &w, &eta, info)!=TCL_OK) { return TCL_ERROR; } + i__2 = *ns + *ns * t_dim1; + foo = (d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[*ns + *ns * + t_dim1]), abs(d__2)); + if (foo == 0.) { + foo = (d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2)); + } + i__2 = *ns * v_dim1 + 1; + d__5 = smlnum, d__6 = ulp * foo; + if (((d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2))) * (( + d__3 = v[i__2].r, abs(d__3)) + (d__4 = d_imag(&v[*ns * v_dim1 + + 1]), abs(d__4))) <= max(d__5,d__6)) { - if (*info != 0) { - goto L240; - } - } + --(*ns); + } else { - if (w * eta >= 0.) { - eta = -w / dw; - } - if (orgati) { - temp1 = work[*i__] * delta[*i__]; - temp = eta - temp1; - } else { - temp1 = work[ip1] * delta[ip1]; - temp = eta - temp1; - } - if (temp > sg2ub || temp < sg2lb) { - if (w < 0.) { - eta = (sg2ub - tau) / 2.; - } else { - eta = (sg2lb - tau) / 2.; + + ifst = *ns; + if (ztrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, & ilst, &info)!=TCL_OK) { return TCL_ERROR; } + + + ++ilst; + } + } + + + if (*ns == 0) { + s.r = 0., s.i = 0.; + } + + if (*ns < jw) { + + + i__1 = *ns; + for (i__ = infqr + 1; i__ <= i__1; ++i__) { + ifst = i__; + i__2 = *ns; + for (j = i__ + 1; j <= i__2; ++j) { + i__3 = j + j * t_dim1; + i__4 = ifst + ifst * t_dim1; + if ((d__1 = t[i__3].r, abs(d__1)) + (d__2 = d_imag(&t[j + j * + t_dim1]), abs(d__2)) > (d__3 = t[i__4].r, abs(d__3)) + + (d__4 = d_imag(&t[ifst + ifst * t_dim1]), abs(d__4)) + ) { + ifst = j; } } + ilst = i__; + if (ifst != ilst) { + if (ztrexc_(interp, "V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &info)!=TCL_OK) { return TCL_ERROR; } - tau += eta; - eta /= *sigma + sqrt(*sigma * *sigma + eta); - *sigma += eta; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - work[j] += eta; - delta[j] -= eta; } + } + } - prew = w; + i__1 = jw; + for (i__ = infqr + 1; i__ <= i__1; ++i__) { + i__2 = kwtop + i__ - 1; + i__3 = i__ + i__ * t_dim1; + sh[i__2].r = t[i__3].r, sh[i__2].i = t[i__3].i; + } - dpsi = 0.; - psi = 0.; - erretm = 0.; - i__1 = iim1; - for (j = 1; j <= i__1; ++j) { - temp = z__[j] / (work[j] * delta[j]); - psi += z__[j] * temp; - dpsi += temp * temp; - erretm += psi; - } - erretm = abs(erretm); + if (*ns < jw || s.r == 0. && s.i == 0.) { + if (*ns > 1 && (s.r != 0. || s.i != 0.)) { - dphi = 0.; - phi = 0.; - i__1 = iip1; - for (j = *n; j >= i__1; --j) { - temp = z__[j] / (work[j] * delta[j]); - phi += z__[j] * temp; - dphi += temp * temp; - erretm += phi; - } - temp = z__[ii] / (work[ii] * delta[ii]); - dw = dpsi + dphi + temp * temp; - temp = z__[ii] * temp; - w = rhoinv + phi + psi + temp; - erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. - + abs(tau) * dw; - if (w * prew > 0. && abs(w) > abs(prew) / 10.) { - swtch = ! swtch; - } + if (zcopy_(interp, ns, &v[v_offset], ldv, &work[1], &zlaqr2_c__1)!=TCL_OK) { return TCL_ERROR; } - if (w <= 0.) { - sg2lb = max(sg2lb,tau); - } else { - sg2ub = min(sg2ub,tau); + i__1 = *ns; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + d_cnjg(&z__1, &work[i__]); + work[i__2].r = z__1.r, work[i__2].i = z__1.i; } + beta.r = work[1].r, beta.i = work[1].i; + if (zlarfg_(interp, ns, &beta, &work[2], &zlaqr2_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } - } + work[1].r = 1., work[1].i = 0.; + i__1 = jw - 2; + i__2 = jw - 2; + if (zlaset_(interp, "L", &i__1, &i__2, &zlaqr2_c_b1, &zlaqr2_c_b1, &t[t_dim1 + 3], ldt)!=TCL_OK) { return TCL_ERROR; } - *info = 1; - } + d_cnjg(&z__1, &tau); + if (zlarf_(interp, "L", ns, &jw, &work[1], &zlaqr2_c__1, &z__1, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } -L240: -return TCL_OK; + if (zlarf_(interp, "R", ns, ns, &work[1], &zlaqr2_c__1, &tau, &t[t_offset], ldt, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } -} /* dlasd4_ */ -static /* Subroutine */ int dlasq4_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *tau, integer *ttype, doublereal *g) -{ - integer i__1; - doublereal d__1, d__2; - double sqrt(doublereal); + if (zlarf_(interp, "R", &jw, ns, &work[1], &zlaqr2_c__1, &tau, &v[v_offset], ldv, & work[jw + 1])!=TCL_OK) { return TCL_ERROR; } + - doublereal s, a2, b1, b2; - integer i4, nn, np; - doublereal gam, gap1, gap2; + i__1 = *lwork - jw; + if (zgehrd_(interp, &jw, &zlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } + + + } + + + if (kwtop > 1) { + i__1 = kwtop + (kwtop - 1) * h_dim1; + d_cnjg(&z__2, &v[v_dim1 + 1]); + z__1.r = s.r * z__2.r - s.i * z__2.i, z__1.i = s.r * z__2.i + s.i + * z__2.r; + h__[i__1].r = z__1.r, h__[i__1].i = z__1.i; + } + if (zlacpy_(interp, "U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } + i__1 = jw - 1; + i__2 = *ldt + 1; + i__3 = *ldh + 1; + if (zcopy_(interp, &i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3)!=TCL_OK) { return TCL_ERROR; } + if (*ns > 1 && (s.r != 0. || s.i != 0.)) { + i__1 = *lwork - jw; + if (zunmhr_(interp, "R", "N", &jw, ns, &zlaqr2_c__1, ns, &t[t_offset], ldt, &work[1], &v[v_offset], ldv, &work[jw + 1], &i__1, &info)!=TCL_OK) { return TCL_ERROR; } + } + if (*wantt) { + ltop = 1; + } else { + ltop = *ktop; + } + i__1 = kwtop - 1; + i__2 = *nv; + for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += + i__2) { + i__3 = *nv, i__4 = kwtop - krow; + kln = min(i__3,i__4); + if (zgemm_(interp, "N", "N", &kln, &jw, &jw, &zlaqr2_c_b2, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &zlaqr2_c_b1, &wv[wv_offset], + ldwv)!=TCL_OK) { return TCL_ERROR; } + if (zlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } + } + if (*wantt) { + i__2 = *n; + i__1 = *nh; + for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2; + kcol += i__1) { + i__3 = *nh, i__4 = *n - kcol + 1; + kln = min(i__3,i__4); + if (zgemm_(interp, "C", "N", &jw, &kln, &jw, &zlaqr2_c_b2, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &zlaqr2_c_b1, &t[t_offset], + ldt)!=TCL_OK) { return TCL_ERROR; } + if (zlacpy_(interp, "A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh)!=TCL_OK) { return TCL_ERROR; } + } + } + if (*wantz) { + i__1 = *ihiz; + i__2 = *nv; + for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += + i__2) { + i__3 = *nv, i__4 = *ihiz - krow + 1; + kln = min(i__3,i__4); + if (zgemm_(interp, "N", "N", &kln, &jw, &jw, &zlaqr2_c_b2, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &zlaqr2_c_b1, &wv[wv_offset] +, ldwv)!=TCL_OK) { return TCL_ERROR; } + if (zlacpy_(interp, "A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz)!=TCL_OK) { return TCL_ERROR; } - --z__; - if (*dmin__ <= 0.) { - *tau = -(*dmin__); - *ttype = -1; -return TCL_OK; + } + } } - nn = (*n0 << 2) + *pp; - if (*n0in == *n0) { + *nd = jw - *ns; - if (*dmin__ == *dn || *dmin__ == *dn1) { - b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]); - b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]); - a2 = z__[nn - 7] + z__[nn - 5]; + *ns -= infqr; - if (*dmin__ == *dn && *dmin1 == *dn1) { - gap2 = *dmin2 - a2 - *dmin2 * .25; - if (gap2 > 0. && gap2 > b2) { - gap1 = a2 - *dn - b2 / gap2 * b2; - } else { - gap1 = a2 - *dn - (b1 + b2); - } - if (gap1 > 0. && gap1 > b1) { - d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5; - s = max(d__1,d__2); - *ttype = -2; - } else { - s = 0.; - if (*dn > b1) { - s = *dn - b1; - } - if (a2 > b1 + b2) { - d__1 = s, d__2 = a2 - (b1 + b2); - s = min(d__1,d__2); - } - d__1 = s, d__2 = *dmin__ * .333; - s = max(d__1,d__2); - *ttype = -3; - } - } else { + d__1 = (doublereal) lwkopt; + z__1.r = d__1, z__1.i = 0.; + work[1].r = z__1.r, work[1].i = z__1.i; - *ttype = -4; - s = *dmin__ * .25; - if (*dmin__ == *dn) { - gam = *dn; - a2 = 0.; - if (z__[nn - 5] > z__[nn - 7]) { -return TCL_OK; - } - b2 = z__[nn - 5] / z__[nn - 7]; - np = nn - 9; - } else { - np = nn - (*pp << 1); - b2 = z__[np - 2]; - gam = *dn1; - if (z__[np - 4] > z__[np - 2]) { -return TCL_OK; - } - a2 = z__[np - 4] / z__[np - 2]; - if (z__[nn - 9] > z__[nn - 11]) { return TCL_OK; - } - b2 = z__[nn - 9] / z__[nn - 11]; - np = nn - 13; - } +} /* zlaqr2_ */ +static /* Subroutine */ int zlaqr1_ (Tcl_Interp *interp, integer *n, doublecomplex *h__, integer *ldh, doublecomplex *s1, doublecomplex *s2, doublecomplex *v) +{ + integer h_dim1, h_offset, i__1, i__2, i__3, i__4; + doublereal d__1, d__2, d__3, d__4, d__5, d__6; + doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8; + double d_imag(doublecomplex *); - a2 += b2; - i__1 = (*i0 << 2) - 1 + *pp; - for (i4 = np; i4 >= i__1; i4 += -4) { - if (b2 == 0.) { - goto L20; - } - b1 = b2; - if (z__[i4] > z__[i4 - 2]) { -return TCL_OK; - } - b2 *= z__[i4] / z__[i4 - 2]; - a2 += b2; - if (max(b2,b1) * 100. < a2 || .563 < a2) { - goto L20; - } - } -L20: - a2 *= 1.05; + doublereal s; + doublecomplex h21s, h31s; - if (a2 < .563) { - s = gam * (1. - sqrt(a2)) / (a2 + 1.); - } - } - } else if (*dmin__ == *dn2) { - *ttype = -5; - s = *dmin__ * .25; - np = nn - (*pp << 1); - b1 = z__[np - 2]; - b2 = z__[np - 6]; - gam = *dn2; - if (z__[np - 8] > b2 || z__[np - 4] > b1) { -return TCL_OK; - } - a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.); - if (*n0 - *i0 > 2) { - b2 = z__[nn - 13] / z__[nn - 15]; - a2 += b2; - i__1 = (*i0 << 2) - 1 + *pp; - for (i4 = nn - 17; i4 >= i__1; i4 += -4) { - if (b2 == 0.) { - goto L40; - } - b1 = b2; - if (z__[i4] > z__[i4 - 2]) { -return TCL_OK; - } - b2 *= z__[i4] / z__[i4 - 2]; - a2 += b2; - if (max(b2,b1) * 100. < a2 || .563 < a2) { - goto L40; - } - } -L40: - a2 *= 1.05; - } - if (a2 < .563) { - s = gam * (1. - sqrt(a2)) / (a2 + 1.); - } - } else { - if (*ttype == -6) { - *g += (1. - *g) * .333; - } else if (*ttype == -18) { - *g = .083250000000000005; - } else { - *g = .25; - } - s = *g * *dmin__; - *ttype = -6; - } - } else if (*n0in == *n0 + 1) { - if (*dmin1 == *dn1 && *dmin2 == *dn2) { - *ttype = -7; - s = *dmin1 * .333; - if (z__[nn - 5] > z__[nn - 7]) { -return TCL_OK; - } - b1 = z__[nn - 5] / z__[nn - 7]; - b2 = b1; - if (b2 == 0.) { - goto L60; - } - i__1 = (*i0 << 2) - 1 + *pp; - for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { - a2 = b1; - if (z__[i4] > z__[i4 - 2]) { -return TCL_OK; - } - b1 *= z__[i4] / z__[i4 - 2]; - b2 += b1; - if (max(b1,a2) * 100. < b2) { - goto L60; - } - } -L60: - b2 = sqrt(b2 * 1.05); - d__1 = b2; - a2 = *dmin1 / (d__1 * d__1 + 1.); - gap2 = *dmin2 * .5 - a2; - if (gap2 > 0. && gap2 > b2 * a2) { - d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); - s = max(d__1,d__2); - } else { - d__1 = s, d__2 = a2 * (1. - b2 * 1.01); - s = max(d__1,d__2); - *ttype = -8; - } + h_dim1 = *ldh; + h_offset = 1 + h_dim1; + h__ -= h_offset; + --v; + + if (*n == 2) { + i__1 = h_dim1 + 1; + z__2.r = h__[i__1].r - s2->r, z__2.i = h__[i__1].i - s2->i; + z__1.r = z__2.r, z__1.i = z__2.i; + i__2 = h_dim1 + 2; + s = (d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1), abs(d__2)) + ( + (d__3 = h__[i__2].r, abs(d__3)) + (d__4 = d_imag(&h__[h_dim1 + + 2]), abs(d__4))); + if (s == 0.) { + v[1].r = 0., v[1].i = 0.; + v[2].r = 0., v[2].i = 0.; + } else { + i__1 = h_dim1 + 2; + z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s; + h21s.r = z__1.r, h21s.i = z__1.i; + i__1 = (h_dim1 << 1) + 1; + z__2.r = h21s.r * h__[i__1].r - h21s.i * h__[i__1].i, z__2.i = + h21s.r * h__[i__1].i + h21s.i * h__[i__1].r; + i__2 = h_dim1 + 1; + z__4.r = h__[i__2].r - s1->r, z__4.i = h__[i__2].i - s1->i; + i__3 = h_dim1 + 1; + z__6.r = h__[i__3].r - s2->r, z__6.i = h__[i__3].i - s2->i; + z__5.r = z__6.r / s, z__5.i = z__6.i / s; + z__3.r = z__4.r * z__5.r - z__4.i * z__5.i, z__3.i = z__4.r * + z__5.i + z__4.i * z__5.r; + z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; + v[1].r = z__1.r, v[1].i = z__1.i; + i__1 = h_dim1 + 1; + i__2 = (h_dim1 << 1) + 2; + z__4.r = h__[i__1].r + h__[i__2].r, z__4.i = h__[i__1].i + h__[ + i__2].i; + z__3.r = z__4.r - s1->r, z__3.i = z__4.i - s1->i; + z__2.r = z__3.r - s2->r, z__2.i = z__3.i - s2->i; + z__1.r = h21s.r * z__2.r - h21s.i * z__2.i, z__1.i = h21s.r * + z__2.i + h21s.i * z__2.r; + v[2].r = z__1.r, v[2].i = z__1.i; + } + } else { + i__1 = h_dim1 + 1; + z__2.r = h__[i__1].r - s2->r, z__2.i = h__[i__1].i - s2->i; + z__1.r = z__2.r, z__1.i = z__2.i; + i__2 = h_dim1 + 2; + i__3 = h_dim1 + 3; + s = (d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1), abs(d__2)) + ( + (d__3 = h__[i__2].r, abs(d__3)) + (d__4 = d_imag(&h__[h_dim1 + + 2]), abs(d__4))) + ((d__5 = h__[i__3].r, abs(d__5)) + (d__6 + = d_imag(&h__[h_dim1 + 3]), abs(d__6))); + if (s == 0.) { + v[1].r = 0., v[1].i = 0.; + v[2].r = 0., v[2].i = 0.; + v[3].r = 0., v[3].i = 0.; } else { + i__1 = h_dim1 + 2; + z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s; + h21s.r = z__1.r, h21s.i = z__1.i; + i__1 = h_dim1 + 3; + z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s; + h31s.r = z__1.r, h31s.i = z__1.i; + i__1 = h_dim1 + 1; + z__4.r = h__[i__1].r - s1->r, z__4.i = h__[i__1].i - s1->i; + i__2 = h_dim1 + 1; + z__6.r = h__[i__2].r - s2->r, z__6.i = h__[i__2].i - s2->i; + z__5.r = z__6.r / s, z__5.i = z__6.i / s; + z__3.r = z__4.r * z__5.r - z__4.i * z__5.i, z__3.i = z__4.r * + z__5.i + z__4.i * z__5.r; + i__3 = (h_dim1 << 1) + 1; + z__7.r = h__[i__3].r * h21s.r - h__[i__3].i * h21s.i, z__7.i = + h__[i__3].r * h21s.i + h__[i__3].i * h21s.r; + z__2.r = z__3.r + z__7.r, z__2.i = z__3.i + z__7.i; + i__4 = h_dim1 * 3 + 1; + z__8.r = h__[i__4].r * h31s.r - h__[i__4].i * h31s.i, z__8.i = + h__[i__4].r * h31s.i + h__[i__4].i * h31s.r; + z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i; + v[1].r = z__1.r, v[1].i = z__1.i; + i__1 = h_dim1 + 1; + i__2 = (h_dim1 << 1) + 2; + z__5.r = h__[i__1].r + h__[i__2].r, z__5.i = h__[i__1].i + h__[ + i__2].i; + z__4.r = z__5.r - s1->r, z__4.i = z__5.i - s1->i; + z__3.r = z__4.r - s2->r, z__3.i = z__4.i - s2->i; + z__2.r = h21s.r * z__3.r - h21s.i * z__3.i, z__2.i = h21s.r * + z__3.i + h21s.i * z__3.r; + i__3 = h_dim1 * 3 + 2; + z__6.r = h__[i__3].r * h31s.r - h__[i__3].i * h31s.i, z__6.i = + h__[i__3].r * h31s.i + h__[i__3].i * h31s.r; + z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i; + v[2].r = z__1.r, v[2].i = z__1.i; + i__1 = h_dim1 + 1; + i__2 = h_dim1 * 3 + 3; + z__5.r = h__[i__1].r + h__[i__2].r, z__5.i = h__[i__1].i + h__[ + i__2].i; + z__4.r = z__5.r - s1->r, z__4.i = z__5.i - s1->i; + z__3.r = z__4.r - s2->r, z__3.i = z__4.i - s2->i; + z__2.r = h31s.r * z__3.r - h31s.i * z__3.i, z__2.i = h31s.r * + z__3.i + h31s.i * z__3.r; + i__3 = (h_dim1 << 1) + 3; + z__6.r = h21s.r * h__[i__3].r - h21s.i * h__[i__3].i, z__6.i = + h21s.r * h__[i__3].i + h21s.i * h__[i__3].r; + z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i; + v[3].r = z__1.r, v[3].i = z__1.i; + } + } +return TCL_OK; +} /* zlaqr1_ */ +static /* Subroutine */ int dlarfx_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublereal * v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) +{ + integer c_dim1, c_offset, i__1; + integer j; + doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7, + v8, v9, t10, v10, sum; - s = *dmin1 * .25; - if (*dmin1 == *dn1) { - s = *dmin1 * .5; - } - *ttype = -9; - } - } else if (*n0in == *n0 + 2) { - if (*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7]) { - *ttype = -10; - s = *dmin2 * .333; - if (z__[nn - 5] > z__[nn - 7]) { -return TCL_OK; - } - b1 = z__[nn - 5] / z__[nn - 7]; - b2 = b1; - if (b2 == 0.) { - goto L80; - } - i__1 = (*i0 << 2) - 1 + *pp; - for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { - if (z__[i4] > z__[i4 - 2]) { -return TCL_OK; - } - b1 *= z__[i4] / z__[i4 - 2]; - b2 += b1; - if (b1 * 100. < b2) { - goto L80; - } - } -L80: - b2 = sqrt(b2 * 1.05); - d__1 = b2; - a2 = *dmin2 / (d__1 * d__1 + 1.); - gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[ - nn - 9]) - a2; - if (gap2 > 0. && gap2 > b2 * a2) { - d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); - s = max(d__1,d__2); - } else { - d__1 = s, d__2 = a2 * (1. - b2 * 1.01); - s = max(d__1,d__2); - } - } else { - s = *dmin2 * .25; - *ttype = -11; - } - } else if (*n0in > *n0 + 2) { - s = 0.; - *ttype = -12; - } - *tau = s; -return TCL_OK; -} /* dlasq4_ */ -static /* Subroutine */ int dlasq5_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2, logical *ieee) -{ - integer i__1; - doublereal d__1, d__2; - doublereal d__; - integer j4, j4p2; - doublereal emin, temp; @@ -62575,296 +65025,624 @@ static /* Subroutine */ int dlasq5_ (Tcl_Interp *interp, integer *i0, integer *n + --v; + c_dim1 = *ldc; + c_offset = 1 + c_dim1; + c__ -= c_offset; + --work; + if (*tau == 0.) { +return TCL_OK; + } + if (lsame_(side, "L")) { + switch (*m) { + case 1: goto L10; + case 2: goto L30; + case 3: goto L50; + case 4: goto L70; + case 5: goto L90; + case 6: goto L110; + case 7: goto L130; + case 8: goto L150; + case 9: goto L170; + case 10: goto L190; + } + if (dlarf_(interp, side, m, n, &v[1], &dlarfx_c__1, tau, &c__[c_offset], ldc, &work[1])!=TCL_OK) { return TCL_ERROR; } + goto L410; +L10: + t1 = 1. - *tau * v[1] * v[1]; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1]; + } + goto L410; +L30: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + } + goto L410; +L50: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + } + goto L410; +L70: - --z__; - if (*n0 - *i0 - 1 <= 0) { -return TCL_OK; - } + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + c__[j * c_dim1 + 4] -= sum * t4; + } + goto L410; +L90: - j4 = (*i0 << 2) + *pp - 3; - emin = z__[j4 + 4]; - d__ = z__[j4] - *tau; - *dmin__ = d__; - *dmin1 = -z__[j4]; - if (*ieee) { + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ + j * c_dim1 + 5]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + c__[j * c_dim1 + 4] -= sum * t4; + c__[j * c_dim1 + 5] -= sum * t5; + } + goto L410; +L110: - if (*pp == 0) { - i__1 = *n0 - 3 << 2; - for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { - z__[j4 - 2] = d__ + z__[j4 - 1]; - temp = z__[j4 + 1] / z__[j4 - 2]; - d__ = d__ * temp - *tau; - *dmin__ = min(*dmin__,d__); - z__[j4] = z__[j4 - 1] * temp; - d__1 = z__[j4]; - emin = min(d__1,emin); - } - } else { - i__1 = *n0 - 3 << 2; - for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { - z__[j4 - 3] = d__ + z__[j4]; - temp = z__[j4 + 2] / z__[j4 - 3]; - d__ = d__ * temp - *tau; - *dmin__ = min(*dmin__,d__); - z__[j4 - 1] = z__[j4] * temp; - d__1 = z__[j4 - 1]; - emin = min(d__1,emin); - } + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ + j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + c__[j * c_dim1 + 4] -= sum * t4; + c__[j * c_dim1 + 5] -= sum * t5; + c__[j * c_dim1 + 6] -= sum * t6; } + goto L410; +L130: - *dnm2 = d__; - *dmin2 = *dmin__; - j4 = (*n0 - 2 << 2) - *pp; - j4p2 = j4 + (*pp << 1) - 1; - z__[j4 - 2] = *dnm2 + z__[j4p2]; - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; - *dmin__ = min(*dmin__,*dnm1); + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ + j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * + c_dim1 + 7]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + c__[j * c_dim1 + 4] -= sum * t4; + c__[j * c_dim1 + 5] -= sum * t5; + c__[j * c_dim1 + 6] -= sum * t6; + c__[j * c_dim1 + 7] -= sum * t7; + } + goto L410; +L150: - *dmin1 = *dmin__; - j4 += 4; - j4p2 = j4 + (*pp << 1) - 1; - z__[j4 - 2] = *dnm1 + z__[j4p2]; - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; - *dmin__ = min(*dmin__,*dn); - } else { + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + v8 = v[8]; + t8 = *tau * v8; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ + j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * + c_dim1 + 7] + v8 * c__[j * c_dim1 + 8]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + c__[j * c_dim1 + 4] -= sum * t4; + c__[j * c_dim1 + 5] -= sum * t5; + c__[j * c_dim1 + 6] -= sum * t6; + c__[j * c_dim1 + 7] -= sum * t7; + c__[j * c_dim1 + 8] -= sum * t8; + } + goto L410; +L170: - if (*pp == 0) { - i__1 = *n0 - 3 << 2; - for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { - z__[j4 - 2] = d__ + z__[j4 - 1]; - if (d__ < 0.) { -return TCL_OK; - } else { - z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); - d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau; - } - *dmin__ = min(*dmin__,d__); - d__1 = emin, d__2 = z__[j4]; - emin = min(d__1,d__2); - } - } else { - i__1 = *n0 - 3 << 2; - for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { - z__[j4 - 3] = d__ + z__[j4]; - if (d__ < 0.) { -return TCL_OK; - } else { - z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); - d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau; - } - *dmin__ = min(*dmin__,d__); - d__1 = emin, d__2 = z__[j4 - 1]; - emin = min(d__1,d__2); - } + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + v8 = v[8]; + t8 = *tau * v8; + v9 = v[9]; + t9 = *tau * v9; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ + j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * + c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * + c_dim1 + 9]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + c__[j * c_dim1 + 4] -= sum * t4; + c__[j * c_dim1 + 5] -= sum * t5; + c__[j * c_dim1 + 6] -= sum * t6; + c__[j * c_dim1 + 7] -= sum * t7; + c__[j * c_dim1 + 8] -= sum * t8; + c__[j * c_dim1 + 9] -= sum * t9; } + goto L410; +L190: - *dnm2 = d__; - *dmin2 = *dmin__; - j4 = (*n0 - 2 << 2) - *pp; - j4p2 = j4 + (*pp << 1) - 1; - z__[j4 - 2] = *dnm2 + z__[j4p2]; - if (*dnm2 < 0.) { -return TCL_OK; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + v8 = v[8]; + t8 = *tau * v8; + v9 = v[9]; + t9 = *tau * v9; + v10 = v[10]; + t10 = *tau * v10; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * + c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ + j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * + c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * + c_dim1 + 9] + v10 * c__[j * c_dim1 + 10]; + c__[j * c_dim1 + 1] -= sum * t1; + c__[j * c_dim1 + 2] -= sum * t2; + c__[j * c_dim1 + 3] -= sum * t3; + c__[j * c_dim1 + 4] -= sum * t4; + c__[j * c_dim1 + 5] -= sum * t5; + c__[j * c_dim1 + 6] -= sum * t6; + c__[j * c_dim1 + 7] -= sum * t7; + c__[j * c_dim1 + 8] -= sum * t8; + c__[j * c_dim1 + 9] -= sum * t9; + c__[j * c_dim1 + 10] -= sum * t10; } - *dmin__ = min(*dmin__,*dnm1); + goto L410; + } else { - *dmin1 = *dmin__; - j4 += 4; - j4p2 = j4 + (*pp << 1) - 1; - z__[j4 - 2] = *dnm1 + z__[j4p2]; - if (*dnm1 < 0.) { -return TCL_OK; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; + + switch (*n) { + case 1: goto L210; + case 2: goto L230; + case 3: goto L250; + case 4: goto L270; + case 5: goto L290; + case 6: goto L310; + case 7: goto L330; + case 8: goto L350; + case 9: goto L370; + case 10: goto L390; } - *dmin__ = min(*dmin__,*dn); - } - z__[j4 + 2] = *dn; - z__[(*n0 << 2) - *pp] = emin; -return TCL_OK; + if (dlarf_(interp, side, m, n, &v[1], &dlarfx_c__1, tau, &c__[c_offset], ldc, &work[1])!=TCL_OK) { return TCL_ERROR; } + goto L410; +L210: -} /* dlasq5_ */ -static /* Subroutine */ int dlasq6_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2) -{ - integer i__1; - doublereal d__1, d__2; - doublereal d__; - integer j4, j4p2; - doublereal emin, temp; - doublereal safmin; + t1 = 1. - *tau * v[1] * v[1]; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + c__[j + c_dim1] = t1 * c__[j + c_dim1]; + } + goto L410; +L230: + + + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + } + goto L410; +L250: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + } + goto L410; +L270: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + c__[j + (c_dim1 << 2)] -= sum * t4; + } + goto L410; +L290: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * + c__[j + c_dim1 * 5]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + c__[j + (c_dim1 << 2)] -= sum * t4; + c__[j + c_dim1 * 5] -= sum * t5; + } + goto L410; +L310: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * + c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + c__[j + (c_dim1 << 2)] -= sum * t4; + c__[j + c_dim1 * 5] -= sum * t5; + c__[j + c_dim1 * 6] -= sum * t6; + } + goto L410; +L330: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * + c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ + j + c_dim1 * 7]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + c__[j + (c_dim1 << 2)] -= sum * t4; + c__[j + c_dim1 * 5] -= sum * t5; + c__[j + c_dim1 * 6] -= sum * t6; + c__[j + c_dim1 * 7] -= sum * t7; + } + goto L410; +L350: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + v8 = v[8]; + t8 = *tau * v8; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * + c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ + j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + c__[j + (c_dim1 << 2)] -= sum * t4; + c__[j + c_dim1 * 5] -= sum * t5; + c__[j + c_dim1 * 6] -= sum * t6; + c__[j + c_dim1 * 7] -= sum * t7; + c__[j + (c_dim1 << 3)] -= sum * t8; + } + goto L410; +L370: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + v8 = v[8]; + t8 = *tau * v8; + v9 = v[9]; + t9 = *tau * v9; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * + c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ + j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ + j + c_dim1 * 9]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + c__[j + (c_dim1 << 2)] -= sum * t4; + c__[j + c_dim1 * 5] -= sum * t5; + c__[j + c_dim1 * 6] -= sum * t6; + c__[j + c_dim1 * 7] -= sum * t7; + c__[j + (c_dim1 << 3)] -= sum * t8; + c__[j + c_dim1 * 9] -= sum * t9; + } + goto L410; +L390: + v1 = v[1]; + t1 = *tau * v1; + v2 = v[2]; + t2 = *tau * v2; + v3 = v[3]; + t3 = *tau * v3; + v4 = v[4]; + t4 = *tau * v4; + v5 = v[5]; + t5 = *tau * v5; + v6 = v[6]; + t6 = *tau * v6; + v7 = v[7]; + t7 = *tau * v7; + v8 = v[8]; + t8 = *tau * v8; + v9 = v[9]; + t9 = *tau * v9; + v10 = v[10]; + t10 = *tau * v10; + i__1 = *m; + for (j = 1; j <= i__1; ++j) { + sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * + c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * + c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ + j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ + j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10]; + c__[j + c_dim1] -= sum * t1; + c__[j + (c_dim1 << 1)] -= sum * t2; + c__[j + c_dim1 * 3] -= sum * t3; + c__[j + (c_dim1 << 2)] -= sum * t4; + c__[j + c_dim1 * 5] -= sum * t5; + c__[j + c_dim1 * 6] -= sum * t6; + c__[j + c_dim1 * 7] -= sum * t7; + c__[j + (c_dim1 << 3)] -= sum * t8; + c__[j + c_dim1 * 9] -= sum * t9; + c__[j + c_dim1 * 10] -= sum * t10; + } + goto L410; + } +L410: +return TCL_OK; +} /* dlarfx_ */ +static /* Subroutine */ int dlasd4_ (Tcl_Interp *interp, integer *n, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal * sigma, doublereal *work, integer *info) +{ + integer i__1; + doublereal d__1; + double sqrt(doublereal); + doublereal a, b, c__; + integer j; + doublereal w, dd[3]; + integer ii; + doublereal dw, zz[3]; + integer ip1; + doublereal eta, phi, eps, tau, psi; + integer iim1, iip1; + doublereal dphi, dpsi; + integer iter; + doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq, dtiip; + integer niter; + doublereal dtisq; + logical swtch; + doublereal dtnsq; + doublereal delsq2, dtnsq1; + logical swtch3; + logical orgati; + doublereal erretm, dtipsq, rhoinv; - --z__; - if (*n0 - *i0 - 1 <= 0) { -return TCL_OK; - } - safmin = dlamch_("Safe minimum"); - j4 = (*i0 << 2) + *pp - 3; - emin = z__[j4 + 4]; - d__ = z__[j4]; - *dmin__ = d__; - if (*pp == 0) { - i__1 = *n0 - 3 << 2; - for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { - z__[j4 - 2] = d__ + z__[j4 - 1]; - if (z__[j4 - 2] == 0.) { - z__[j4] = 0.; - d__ = z__[j4 + 1]; - *dmin__ = d__; - emin = 0.; - } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4 - - 2] < z__[j4 + 1]) { - temp = z__[j4 + 1] / z__[j4 - 2]; - z__[j4] = z__[j4 - 1] * temp; - d__ *= temp; - } else { - z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); - d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]); - } - *dmin__ = min(*dmin__,d__); - d__1 = emin, d__2 = z__[j4]; - emin = min(d__1,d__2); - } - } else { - i__1 = *n0 - 3 << 2; - for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { - z__[j4 - 3] = d__ + z__[j4]; - if (z__[j4 - 3] == 0.) { - z__[j4 - 1] = 0.; - d__ = z__[j4 + 2]; - *dmin__ = d__; - emin = 0.; - } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4 - - 3] < z__[j4 + 2]) { - temp = z__[j4 + 2] / z__[j4 - 3]; - z__[j4 - 1] = z__[j4] * temp; - d__ *= temp; - } else { - z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); - d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]); - } - *dmin__ = min(*dmin__,d__); - d__1 = emin, d__2 = z__[j4 - 1]; - emin = min(d__1,d__2); - } - } - *dnm2 = d__; - *dmin2 = *dmin__; - j4 = (*n0 - 2 << 2) - *pp; - j4p2 = j4 + (*pp << 1) - 1; - z__[j4 - 2] = *dnm2 + z__[j4p2]; - if (z__[j4 - 2] == 0.) { - z__[j4] = 0.; - *dnm1 = z__[j4p2 + 2]; - *dmin__ = *dnm1; - emin = 0.; - } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < - z__[j4p2 + 2]) { - temp = z__[j4p2 + 2] / z__[j4 - 2]; - z__[j4] = z__[j4p2] * temp; - *dnm1 = *dnm2 * temp; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]); - } - *dmin__ = min(*dmin__,*dnm1); - *dmin1 = *dmin__; - j4 += 4; - j4p2 = j4 + (*pp << 1) - 1; - z__[j4 - 2] = *dnm1 + z__[j4p2]; - if (z__[j4 - 2] == 0.) { - z__[j4] = 0.; - *dn = z__[j4p2 + 2]; - *dmin__ = *dn; - emin = 0.; - } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < - z__[j4p2 + 2]) { - temp = z__[j4p2 + 2] / z__[j4 - 2]; - z__[j4] = z__[j4p2] * temp; - *dn = *dnm1 * temp; - } else { - z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); - *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]); - } - *dmin__ = min(*dmin__,*dn); - z__[j4 + 2] = *dn; - z__[(*n0 << 2) - *pp] = emin; -return TCL_OK; -} /* dlasq6_ */ -static /* Subroutine */ int dlaexc_ (Tcl_Interp *interp, logical *wantq, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, integer *j1, integer *n1, integer *n2, doublereal *work, integer *info) -{ - integer q_dim1, q_offset, t_dim1, t_offset, i__1; - doublereal d__1, d__2, d__3; - doublereal d__[16] /* was [4][4] */; - integer k; - doublereal u[3], x[4] /* was [2][2] */; - integer j2, j3, j4; - doublereal u1[3], u2[3]; - integer nd; - doublereal cs, t11, t22, t33, sn, wi1, wi2, wr1, wr2, eps, tau, tau1, - tau2; - integer ierr; - doublereal temp; - doublereal scale, dnorm, xnorm; - doublereal thresh, smlnum; @@ -62886,600 +65664,773 @@ static /* Subroutine */ int dlaexc_ (Tcl_Interp *interp, logical *wantq, integer - t_dim1 = *ldt; - t_offset = 1 + t_dim1; - t -= t_offset; - q_dim1 = *ldq; - q_offset = 1 + q_dim1; - q -= q_offset; --work; + --delta; + --z__; + --d__; *info = 0; + if (*n == 1) { - if (*n == 0 || *n1 == 0 || *n2 == 0) { + *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]); + delta[1] = 1.; + work[1] = 1.; return TCL_OK; } - if (*j1 + *n1 > *n) { + if (*n == 2) { + if (dlasd5_(interp, i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1])!=TCL_OK) { return TCL_ERROR; } + return TCL_OK; } - j2 = *j1 + 1; - j3 = *j1 + 2; - j4 = *j1 + 3; - - if (*n1 == 1 && *n2 == 1) { + eps = dlamch_("Epsilon"); + rhoinv = 1. / *rho; - t11 = t[*j1 + *j1 * t_dim1]; - t22 = t[j2 + j2 * t_dim1]; + if (*i__ == *n) { - d__1 = t22 - t11; - if (dlartg_(interp, &t[*j1 + j2 * t_dim1], &d__1, &cs, &sn, &temp)!=TCL_OK) { return TCL_ERROR; } + ii = *n - 1; + niter = 1; - if (j3 <= *n) { - i__1 = *n - *j1 - 1; - if (drot_(interp, &i__1, &t[*j1 + j3 * t_dim1], ldt, &t[j2 + j3 * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + temp = *rho / 2.; + temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp)); + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + work[j] = d__[j] + d__[*n] + temp1; + delta[j] = d__[j] - d__[*n] - temp1; } - i__1 = *j1 - 1; - if (drot_(interp, &i__1, &t[*j1 * t_dim1 + 1], &dlaexc_c__1, &t[j2 * t_dim1 + 1], &dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - - - - t[*j1 + *j1 * t_dim1] = t22; - t[j2 + j2 * t_dim1] = t11; - - if (*wantq) { - - - if (drot_(interp, n, &q[*j1 * q_dim1 + 1], &dlaexc_c__1, &q[j2 * q_dim1 + 1], &dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - + psi = 0.; + i__1 = *n - 2; + for (j = 1; j <= i__1; ++j) { + psi += z__[j] * z__[j] / (delta[j] * work[j]); } - } else { + c__ = rhoinv + psi; + w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[* + n] / (delta[*n] * work[*n]); + if (w <= 0.) { + temp1 = sqrt(d__[*n] * d__[*n] + *rho); + temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[* + n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] * + z__[*n] / *rho; - nd = *n1 + *n2; - if (dlacpy_(interp, "Full", &nd, &nd, &t[*j1 + *j1 * t_dim1], ldt, d__, &dlaexc_c__4)!=TCL_OK) { return TCL_ERROR; } + if (c__ <= temp) { + tau = *rho; + } else { + delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); + a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[* + n]; + b = z__[*n] * z__[*n] * delsq; + if (a < 0.) { + tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); + } else { + tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); + } + } - dnorm = dlange_("Max", &nd, &nd, d__, &dlaexc_c__4, &work[1]); + } else { + delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); + a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; + b = z__[*n] * z__[*n] * delsq; - eps = dlamch_("P"); - smlnum = dlamch_("S") / eps; - d__1 = eps * 10. * dnorm; - thresh = max(d__1,smlnum); + if (a < 0.) { + tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); + } else { + tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); + } - if (dlasy2_(interp, &dlaexc_c_false, &dlaexc_c_false, &dlaexc_c_n1, n1, n2, d__, &dlaexc_c__4, &d__[*n1 + 1 + (*n1 + 1 << 2) - 5], &dlaexc_c__4, &d__[(*n1 + 1 << 2) - 4], &dlaexc_c__4, & - scale, x, &dlaexc_c__2, &xnorm, &ierr)!=TCL_OK) { return TCL_ERROR; } + } + eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau)); - k = *n1 + *n1 + *n2 - 3; - switch (k) { - case 1: goto L10; - case 2: goto L20; - case 3: goto L30; + *sigma = d__[*n] + eta; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + delta[j] = d__[j] - d__[*i__] - eta; + work[j] = d__[j] + d__[*i__] + eta; } -L10: + dpsi = 0.; + psi = 0.; + erretm = 0.; + i__1 = ii; + for (j = 1; j <= i__1; ++j) { + temp = z__[j] / (delta[j] * work[j]); + psi += z__[j] * temp; + dpsi += temp * temp; + erretm += psi; + } + erretm = abs(erretm); - u[0] = scale; - u[1] = x[0]; - u[2] = x[2]; - if (dlarfg_(interp, &dlaexc_c__3, &u[2], u, &dlaexc_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } + temp = z__[*n] / (delta[*n] * work[*n]); + phi = z__[*n] * temp; + dphi = temp * temp; + erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + + dphi); - u[2] = 1.; - t11 = t[*j1 + *j1 * t_dim1]; + w = rhoinv + phi + psi; - if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + if (abs(w) <= eps * erretm) { + goto L240; + } - if (dlarfx_(interp, "R", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + + ++niter; + dtnsq1 = work[*n - 1] * delta[*n - 1]; + dtnsq = work[*n] * delta[*n]; + c__ = w - dtnsq1 * dpsi - dtnsq * dphi; + a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi); + b = dtnsq * dtnsq1 * w; + if (c__ < 0.) { + c__ = abs(c__); + } + if (c__ == 0.) { + eta = *rho - *sigma * *sigma; + } else if (a >= 0.) { + eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ + * 2.); + } else { + eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) + ); + } + if (w * eta > 0.) { + eta = -w / (dpsi + dphi); + } + temp = eta - dtnsq; + if (temp > *rho) { + eta = *rho + dtnsq; + } - d__2 = abs(d__[2]), d__3 = abs(d__[6]), d__2 = max(d__2,d__3), d__3 = - (d__1 = d__[10] - t11, abs(d__1)); - if (max(d__2,d__3) > thresh) { - goto L50; + tau += eta; + eta /= *sigma + sqrt(eta + *sigma * *sigma); + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + delta[j] -= eta; + work[j] += eta; } + *sigma += eta; - i__1 = *n - *j1 + 1; - if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u, &tau, &t[*j1 + *j1 * t_dim1], ldt, & work[1])!=TCL_OK) { return TCL_ERROR; } + dpsi = 0.; + psi = 0.; + erretm = 0.; + i__1 = ii; + for (j = 1; j <= i__1; ++j) { + temp = z__[j] / (work[j] * delta[j]); + psi += z__[j] * temp; + dpsi += temp * temp; + erretm += psi; + } + erretm = abs(erretm); - if (dlarfx_(interp, "R", &j2, &dlaexc_c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1])!=TCL_OK) { return TCL_ERROR; } + temp = z__[*n] / (work[*n] * delta[*n]); + phi = z__[*n] * temp; + dphi = temp * temp; + erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + + dphi); - t[j3 + *j1 * t_dim1] = 0.; - t[j3 + j2 * t_dim1] = 0.; - t[j3 + j3 * t_dim1] = t11; + w = rhoinv + phi + psi; - if (*wantq) { + iter = niter + 1; - if (dlarfx_(interp, "R", n, &dlaexc_c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1])!=TCL_OK) { return TCL_ERROR; } + for (niter = iter; niter <= 20; ++niter) { - } - goto L40; + if (abs(w) <= eps * erretm) { + goto L240; + } -L20: + dtnsq1 = work[*n - 1] * delta[*n - 1]; + dtnsq = work[*n] * delta[*n]; + c__ = w - dtnsq1 * dpsi - dtnsq * dphi; + a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi); + b = dtnsq1 * dtnsq * w; + if (a >= 0.) { + eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( + c__ * 2.); + } else { + eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( + d__1)))); + } - u[0] = -x[0]; - u[1] = -x[1]; - u[2] = scale; - if (dlarfg_(interp, &dlaexc_c__3, u, &u[1], &dlaexc_c__1, &tau)!=TCL_OK) { return TCL_ERROR; } + if (w * eta > 0.) { + eta = -w / (dpsi + dphi); + } + temp = eta - dtnsq; + if (temp <= 0.) { + eta /= 2.; + } - u[0] = 1.; - t33 = t[j3 + j3 * t_dim1]; + tau += eta; + eta /= *sigma + sqrt(eta + *sigma * *sigma); + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + delta[j] -= eta; + work[j] += eta; + } + *sigma += eta; - if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } - if (dlarfx_(interp, "R", &dlaexc_c__3, &dlaexc_c__3, u, &tau, d__, &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + dpsi = 0.; + psi = 0.; + erretm = 0.; + i__1 = ii; + for (j = 1; j <= i__1; ++j) { + temp = z__[j] / (work[j] * delta[j]); + psi += z__[j] * temp; + dpsi += temp * temp; + erretm += psi; + } + erretm = abs(erretm); + temp = z__[*n] / (work[*n] * delta[*n]); + phi = z__[*n] * temp; + dphi = temp * temp; + erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( + dpsi + dphi); - d__2 = abs(d__[1]), d__3 = abs(d__[2]), d__2 = max(d__2,d__3), d__3 = - (d__1 = d__[0] - t33, abs(d__1)); - if (max(d__2,d__3) > thresh) { - goto L50; + w = rhoinv + phi + psi; } - if (dlarfx_(interp, "R", &j3, &dlaexc_c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1])!=TCL_OK) { return TCL_ERROR; } - - i__1 = *n - *j1; - if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u, &tau, &t[*j1 + j2 * t_dim1], ldt, &work[ 1])!=TCL_OK) { return TCL_ERROR; } - - + *info = 1; + goto L240; - t[*j1 + *j1 * t_dim1] = t33; - t[j2 + *j1 * t_dim1] = 0.; - t[j3 + *j1 * t_dim1] = 0.; - if (*wantq) { + } else { - if (dlarfx_(interp, "R", n, &dlaexc_c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1])!=TCL_OK) { return TCL_ERROR; } + niter = 1; + ip1 = *i__ + 1; + delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]); + delsq2 = delsq / 2.; + temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2)); + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + work[j] = d__[j] + d__[*i__] + temp; + delta[j] = d__[j] - d__[*i__] - temp; } - goto L40; - -L30: - + psi = 0.; + i__1 = *i__ - 1; + for (j = 1; j <= i__1; ++j) { + psi += z__[j] * z__[j] / (work[j] * delta[j]); + } - u1[0] = -x[0]; - u1[1] = -x[1]; - u1[2] = scale; - if (dlarfg_(interp, &dlaexc_c__3, u1, &u1[1], &dlaexc_c__1, &tau1)!=TCL_OK) { return TCL_ERROR; } - - u1[0] = 1.; - - temp = -tau1 * (x[2] + u1[1] * x[3]); - u2[0] = -temp * u1[1] - x[3]; - u2[1] = -temp * u1[2]; - u2[2] = scale; - if (dlarfg_(interp, &dlaexc_c__3, u2, &u2[1], &dlaexc_c__1, &tau2)!=TCL_OK) { return TCL_ERROR; } + phi = 0.; + i__1 = *i__ + 2; + for (j = *n; j >= i__1; --j) { + phi += z__[j] * z__[j] / (work[j] * delta[j]); + } + c__ = rhoinv + psi + phi; + w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[ + ip1] * z__[ip1] / (work[ip1] * delta[ip1]); - u2[0] = 1.; + if (w > 0.) { - if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__4, u1, &tau1, d__, &dlaexc_c__4, &work[1]) !=TCL_OK) { return TCL_ERROR; } + orgati = TRUE_; + sg2lb = 0.; + sg2ub = delsq2; + a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; + b = z__[*i__] * z__[*i__] * delsq; + if (a > 0.) { + tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( + d__1)))); + } else { + tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( + c__ * 2.); + } - if (dlarfx_(interp, "R", &dlaexc_c__4, &dlaexc_c__3, u1, &tau1, d__, &dlaexc_c__4, &work[1]) !=TCL_OK) { return TCL_ERROR; } + eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau)); + } else { - if (dlarfx_(interp, "L", &dlaexc_c__3, &dlaexc_c__4, u2, &tau2, &d__[1], &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } - if (dlarfx_(interp, "R", &dlaexc_c__4, &dlaexc_c__3, u2, &tau2, &d__[4], &dlaexc_c__4, &work[1])!=TCL_OK) { return TCL_ERROR; } + orgati = FALSE_; + sg2lb = -delsq2; + sg2ub = 0.; + a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; + b = z__[ip1] * z__[ip1] * delsq; + if (a < 0.) { + tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( + d__1)))); + } else { + tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / + (c__ * 2.); + } - d__1 = abs(d__[2]), d__2 = abs(d__[6]), d__1 = max(d__1,d__2), d__2 = - abs(d__[3]), d__1 = max(d__1,d__2), d__2 = abs(d__[7]); - if (max(d__1,d__2) > thresh) { - goto L50; + eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau, + abs(d__1)))); } - - i__1 = *n - *j1 + 1; - if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u1, &tau1, &t[*j1 + *j1 * t_dim1], ldt, & work[1])!=TCL_OK) { return TCL_ERROR; } + if (orgati) { + ii = *i__; + *sigma = d__[*i__] + eta; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + work[j] = d__[j] + d__[*i__] + eta; + delta[j] = d__[j] - d__[*i__] - eta; + } + } else { + ii = *i__ + 1; + *sigma = d__[ip1] + eta; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + work[j] = d__[j] + d__[ip1] + eta; + delta[j] = d__[j] - d__[ip1] - eta; + } + } + iim1 = ii - 1; + iip1 = ii + 1; - if (dlarfx_(interp, "R", &j4, &dlaexc_c__3, u1, &tau1, &t[*j1 * t_dim1 + 1], ldt, &work[ 1])!=TCL_OK) { return TCL_ERROR; } + dpsi = 0.; + psi = 0.; + erretm = 0.; + i__1 = iim1; + for (j = 1; j <= i__1; ++j) { + temp = z__[j] / (work[j] * delta[j]); + psi += z__[j] * temp; + dpsi += temp * temp; + erretm += psi; + } + erretm = abs(erretm); - i__1 = *n - *j1 + 1; - if (dlarfx_(interp, "L", &dlaexc_c__3, &i__1, u2, &tau2, &t[j2 + *j1 * t_dim1], ldt, & work[1])!=TCL_OK) { return TCL_ERROR; } + dphi = 0.; + phi = 0.; + i__1 = iip1; + for (j = *n; j >= i__1; --j) { + temp = z__[j] / (work[j] * delta[j]); + phi += z__[j] * temp; + dphi += temp * temp; + erretm += phi; + } + w = rhoinv + phi + psi; - if (dlarfx_(interp, "R", &j4, &dlaexc_c__3, u2, &tau2, &t[j2 * t_dim1 + 1], ldt, &work[1])!=TCL_OK) { return TCL_ERROR; } + swtch3 = FALSE_; + if (orgati) { + if (w < 0.) { + swtch3 = TRUE_; + } + } else { + if (w > 0.) { + swtch3 = TRUE_; + } + } + if (ii == 1 || ii == *n) { + swtch3 = FALSE_; + } + temp = z__[ii] / (work[ii] * delta[ii]); + dw = dpsi + dphi + temp * temp; + temp = z__[ii] * temp; + w += temp; + erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + + abs(tau) * dw; - t[j3 + *j1 * t_dim1] = 0.; - t[j3 + j2 * t_dim1] = 0.; - t[j4 + *j1 * t_dim1] = 0.; - t[j4 + j2 * t_dim1] = 0.; - if (*wantq) { + if (abs(w) <= eps * erretm) { + goto L240; + } + if (w <= 0.) { + sg2lb = max(sg2lb,tau); + } else { + sg2ub = min(sg2ub,tau); + } - if (dlarfx_(interp, "R", n, &dlaexc_c__3, u1, &tau1, &q[*j1 * q_dim1 + 1], ldq, & work[1])!=TCL_OK) { return TCL_ERROR; } + ++niter; + if (! swtch3) { + dtipsq = work[ip1] * delta[ip1]; + dtisq = work[*i__] * delta[*i__]; + if (orgati) { + d__1 = z__[*i__] / dtisq; + c__ = w - dtipsq * dw + delsq * (d__1 * d__1); + } else { + d__1 = z__[ip1] / dtipsq; + c__ = w - dtisq * dw - delsq * (d__1 * d__1); + } + a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; + b = dtipsq * dtisq * w; + if (c__ == 0.) { + if (a == 0.) { + if (orgati) { + a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi + + dphi); + } else { + a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi + + dphi); + } + } + eta = b / a; + } else if (a <= 0.) { + eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( + c__ * 2.); + } else { + eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( + d__1)))); + } + } else { - if (dlarfx_(interp, "R", n, &dlaexc_c__3, u2, &tau2, &q[j2 * q_dim1 + 1], ldq, &work[ 1])!=TCL_OK) { return TCL_ERROR; } + dtiim = work[iim1] * delta[iim1]; + dtiip = work[iip1] * delta[iip1]; + temp = rhoinv + psi + phi; + if (orgati) { + temp1 = z__[iim1] / dtiim; + temp1 *= temp1; + c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) * + (d__[iim1] + d__[iip1]) * temp1; + zz[0] = z__[iim1] * z__[iim1]; + if (dpsi < temp1) { + zz[2] = dtiip * dtiip * dphi; + } else { + zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); + } + } else { + temp1 = z__[iip1] / dtiip; + temp1 *= temp1; + c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) * + (d__[iim1] + d__[iip1]) * temp1; + if (dphi < temp1) { + zz[0] = dtiim * dtiim * dpsi; + } else { + zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); + } + zz[2] = z__[iip1] * z__[iip1]; + } + zz[1] = z__[ii] * z__[ii]; + dd[0] = dtiim; + dd[1] = delta[ii] * work[ii]; + dd[2] = dtiip; + if (dlaed6_(interp, &niter, &orgati, &c__, dd, zz, &w, &eta, info)!=TCL_OK) { return TCL_ERROR; } + if (*info != 0) { + goto L240; + } } -L40: - if (*n2 == 2) { + if (w * eta >= 0.) { + eta = -w / dw; + } + if (orgati) { + temp1 = work[*i__] * delta[*i__]; + temp = eta - temp1; + } else { + temp1 = work[ip1] * delta[ip1]; + temp = eta - temp1; + } + if (temp > sg2ub || temp < sg2lb) { + if (w < 0.) { + eta = (sg2ub - tau) / 2.; + } else { + eta = (sg2lb - tau) / 2.; + } + } + tau += eta; + eta /= *sigma + sqrt(*sigma * *sigma + eta); - if (dlanv2_(interp, &t[*j1 + *j1 * t_dim1], &t[*j1 + j2 * t_dim1], &t[j2 + * j1 * t_dim1], &t[j2 + j2 * t_dim1], &wr1, &wi1, &wr2, & - wi2, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + prew = w; + *sigma += eta; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + work[j] += eta; + delta[j] -= eta; + } - i__1 = *n - *j1 - 1; - if (drot_(interp, &i__1, &t[*j1 + (*j1 + 2) * t_dim1], ldt, &t[j2 + (*j1 + 2) * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + dpsi = 0.; + psi = 0.; + erretm = 0.; + i__1 = iim1; + for (j = 1; j <= i__1; ++j) { + temp = z__[j] / (work[j] * delta[j]); + psi += z__[j] * temp; + dpsi += temp * temp; + erretm += psi; + } + erretm = abs(erretm); - i__1 = *j1 - 1; - if (drot_(interp, &i__1, &t[*j1 * t_dim1 + 1], &dlaexc_c__1, &t[j2 * t_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + dphi = 0.; + phi = 0.; + i__1 = iip1; + for (j = *n; j >= i__1; --j) { + temp = z__[j] / (work[j] * delta[j]); + phi += z__[j] * temp; + dphi += temp * temp; + erretm += phi; + } - if (*wantq) { - if (drot_(interp, n, &q[*j1 * q_dim1 + 1], &dlaexc_c__1, &q[j2 * q_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + temp = z__[ii] / (work[ii] * delta[ii]); + dw = dpsi + dphi + temp * temp; + temp = z__[ii] * temp; + w = rhoinv + phi + psi + temp; + erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + + abs(tau) * dw; + if (w <= 0.) { + sg2lb = max(sg2lb,tau); + } else { + sg2ub = min(sg2ub,tau); + } + swtch = FALSE_; + if (orgati) { + if (-w > abs(prew) / 10.) { + swtch = TRUE_; + } + } else { + if (w > abs(prew) / 10.) { + swtch = TRUE_; } } - if (*n1 == 2) { - - - j3 = *j1 + *n2; - j4 = j3 + 1; - if (dlanv2_(interp, &t[j3 + j3 * t_dim1], &t[j3 + j4 * t_dim1], &t[j4 + j3 * t_dim1], &t[j4 + j4 * t_dim1], &wr1, &wi1, &wr2, &wi2, & - cs, &sn)!=TCL_OK) { return TCL_ERROR; } + iter = niter + 1; - if (j3 + 2 <= *n) { - i__1 = *n - j3 - 1; - if (drot_(interp, &i__1, &t[j3 + (j3 + 2) * t_dim1], ldt, &t[j4 + (j3 + 2) * t_dim1], ldt, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + for (niter = iter; niter <= 20; ++niter) { + if (abs(w) <= eps * erretm) { + goto L240; } - i__1 = j3 - 1; - if (drot_(interp, &i__1, &t[j3 * t_dim1 + 1], &dlaexc_c__1, &t[j4 * t_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } - if (*wantq) { - if (drot_(interp, n, &q[j3 * q_dim1 + 1], &dlaexc_c__1, &q[j4 * q_dim1 + 1], & dlaexc_c__1, &cs, &sn)!=TCL_OK) { return TCL_ERROR; } + if (! swtch3) { + dtipsq = work[ip1] * delta[ip1]; + dtisq = work[*i__] * delta[*i__]; + if (! swtch) { + if (orgati) { + d__1 = z__[*i__] / dtisq; + c__ = w - dtipsq * dw + delsq * (d__1 * d__1); + } else { + d__1 = z__[ip1] / dtipsq; + c__ = w - dtisq * dw - delsq * (d__1 * d__1); + } + } else { + temp = z__[ii] / (work[ii] * delta[ii]); + if (orgati) { + dpsi += temp * temp; + } else { + dphi += temp * temp; + } + c__ = w - dtisq * dpsi - dtipsq * dphi; + } + a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; + b = dtipsq * dtisq * w; + if (c__ == 0.) { + if (a == 0.) { + if (! swtch) { + if (orgati) { + a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * + (dpsi + dphi); + } else { + a = z__[ip1] * z__[ip1] + dtisq * dtisq * ( + dpsi + dphi); + } + } else { + a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi; + } + } + eta = b / a; + } else if (a <= 0.) { + eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) + / (c__ * 2.); + } else { + eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, + abs(d__1)))); + } + } else { + dtiim = work[iim1] * delta[iim1]; + dtiip = work[iip1] * delta[iip1]; + temp = rhoinv + psi + phi; + if (swtch) { + c__ = temp - dtiim * dpsi - dtiip * dphi; + zz[0] = dtiim * dtiim * dpsi; + zz[2] = dtiip * dtiip * dphi; + } else { + if (orgati) { + temp1 = z__[iim1] / dtiim; + temp1 *= temp1; + temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[ + iip1]) * temp1; + c__ = temp - dtiip * (dpsi + dphi) - temp2; + zz[0] = z__[iim1] * z__[iim1]; + if (dpsi < temp1) { + zz[2] = dtiip * dtiip * dphi; + } else { + zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); + } + } else { + temp1 = z__[iip1] / dtiip; + temp1 *= temp1; + temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[ + iip1]) * temp1; + c__ = temp - dtiim * (dpsi + dphi) - temp2; + if (dphi < temp1) { + zz[0] = dtiim * dtiim * dpsi; + } else { + zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); + } + zz[2] = z__[iip1] * z__[iip1]; + } + } + dd[0] = dtiim; + dd[1] = delta[ii] * work[ii]; + dd[2] = dtiip; + if (dlaed6_(interp, &niter, &orgati, &c__, dd, zz, &w, &eta, info)!=TCL_OK) { return TCL_ERROR; } + + if (*info != 0) { + goto L240; + } } - } - - } -return TCL_OK; - - -L50: - *info = 1; -return TCL_OK; - - -} /* dlaexc_ */ -static /* Subroutine */ int zrot_ (Tcl_Interp *interp, integer *n, doublecomplex *cx, integer *incx, doublecomplex *cy, integer *incy, doublereal *c__, doublecomplex *s) -{ - integer i__1, i__2, i__3, i__4; - doublecomplex z__1, z__2, z__3, z__4; - - void d_cnjg(doublecomplex *, doublecomplex *); - - integer i__, ix, iy; - doublecomplex stemp; - + if (w * eta >= 0.) { + eta = -w / dw; + } + if (orgati) { + temp1 = work[*i__] * delta[*i__]; + temp = eta - temp1; + } else { + temp1 = work[ip1] * delta[ip1]; + temp = eta - temp1; + } + if (temp > sg2ub || temp < sg2lb) { + if (w < 0.) { + eta = (sg2ub - tau) / 2.; + } else { + eta = (sg2lb - tau) / 2.; + } + } + tau += eta; + eta /= *sigma + sqrt(*sigma * *sigma + eta); + *sigma += eta; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + work[j] += eta; + delta[j] -= eta; + } + prew = w; + dpsi = 0.; + psi = 0.; + erretm = 0.; + i__1 = iim1; + for (j = 1; j <= i__1; ++j) { + temp = z__[j] / (work[j] * delta[j]); + psi += z__[j] * temp; + dpsi += temp * temp; + erretm += psi; + } + erretm = abs(erretm); + dphi = 0.; + phi = 0.; + i__1 = iip1; + for (j = *n; j >= i__1; --j) { + temp = z__[j] / (work[j] * delta[j]); + phi += z__[j] * temp; + dphi += temp * temp; + erretm += phi; + } + temp = z__[ii] / (work[ii] * delta[ii]); + dw = dpsi + dphi + temp * temp; + temp = z__[ii] * temp; + w = rhoinv + phi + psi + temp; + erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + + abs(tau) * dw; + if (w * prew > 0. && abs(w) > abs(prew) / 10.) { + swtch = ! swtch; + } + if (w <= 0.) { + sg2lb = max(sg2lb,tau); + } else { + sg2ub = min(sg2ub,tau); + } + } - --cy; - --cx; + *info = 1; - if (*n <= 0) { -return TCL_OK; - } - if (*incx == 1 && *incy == 1) { - goto L20; } - - ix = 1; - iy = 1; - if (*incx < 0) { - ix = (-(*n) + 1) * *incx + 1; - } - if (*incy < 0) { - iy = (-(*n) + 1) * *incy + 1; - } - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = ix; - z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i; - i__3 = iy; - z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[ - i__3].i + s->i * cy[i__3].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - stemp.r = z__1.r, stemp.i = z__1.i; - i__2 = iy; - i__3 = iy; - z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i; - d_cnjg(&z__4, s); - i__4 = ix; - z__3.r = z__4.r * cx[i__4].r - z__4.i * cx[i__4].i, z__3.i = z__4.r * - cx[i__4].i + z__4.i * cx[i__4].r; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; - cy[i__2].r = z__1.r, cy[i__2].i = z__1.i; - i__2 = ix; - cx[i__2].r = stemp.r, cx[i__2].i = stemp.i; - ix += *incx; - iy += *incy; - } +L240: return TCL_OK; -L20: - i__1 = *n; - for (i__ = 1; i__ <= i__1; ++i__) { - i__2 = i__; - z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i; - i__3 = i__; - z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[ - i__3].i + s->i * cy[i__3].r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - stemp.r = z__1.r, stemp.i = z__1.i; - i__2 = i__; - i__3 = i__; - z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i; - d_cnjg(&z__4, s); - i__4 = i__; - z__3.r = z__4.r * cx[i__4].r - z__4.i * cx[i__4].i, z__3.i = z__4.r * - cx[i__4].i + z__4.i * cx[i__4].r; - z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; - cy[i__2].r = z__1.r, cy[i__2].i = z__1.i; - i__2 = i__; - cx[i__2].r = stemp.r, cx[i__2].i = stemp.i; - } -return TCL_OK; -} /* zrot_ */ -static /* Subroutine */ int zlartg_ (Tcl_Interp *interp, doublecomplex *f, doublecomplex *g, doublereal * cs, doublecomplex *sn, doublecomplex *r__) +} /* dlasd4_ */ +static /* Subroutine */ int dlasq4_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *tau, integer *ttype, doublereal *g) { integer i__1; - doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10; - doublecomplex z__1, z__2, z__3; - - double log(doublereal), pow_di(doublereal *, integer *), d_imag( - doublecomplex *), sqrt(doublereal); - void d_cnjg(doublecomplex *, doublecomplex *); - - doublereal d__; - integer i__; - doublereal f2, g2; - doublecomplex ff; - doublereal di, dr; - doublecomplex fs, gs; - doublereal f2s, g2s, eps, scale; - integer count; - doublereal safmn2; - doublereal safmx2; - doublereal safmin; - - - - - - - - - - - - - + doublereal d__1, d__2; + double sqrt(doublereal); + doublereal s, a2, b1, b2; + integer i4, nn, np; + doublereal gam, gap1, gap2; - safmin = dlamch_("S"); - eps = dlamch_("E"); - d__1 = dlamch_("B"); - i__1 = (integer) (log(safmin / eps) / log(dlamch_("B")) / 2.); - safmn2 = pow_di(&d__1, &i__1); - safmx2 = 1. / safmn2; - d__7 = (d__1 = f->r, abs(d__1)), d__8 = (d__2 = d_imag(f), abs(d__2)); - d__9 = (d__3 = g->r, abs(d__3)), d__10 = (d__4 = d_imag(g), abs(d__4)); - d__5 = max(d__7,d__8), d__6 = max(d__9,d__10); - scale = max(d__5,d__6); - fs.r = f->r, fs.i = f->i; - gs.r = g->r, gs.i = g->i; - count = 0; - if (scale >= safmx2) { -L10: - ++count; - z__1.r = safmn2 * fs.r, z__1.i = safmn2 * fs.i; - fs.r = z__1.r, fs.i = z__1.i; - z__1.r = safmn2 * gs.r, z__1.i = safmn2 * gs.i; - gs.r = z__1.r, gs.i = z__1.i; - scale *= safmn2; - if (scale >= safmx2) { - goto L10; - } - } else if (scale <= safmn2) { - if (g->r == 0. && g->i == 0.) { - *cs = 1.; - sn->r = 0., sn->i = 0.; - r__->r = f->r, r__->i = f->i; -return TCL_OK; - } -L20: - --count; - z__1.r = safmx2 * fs.r, z__1.i = safmx2 * fs.i; - fs.r = z__1.r, fs.i = z__1.i; - z__1.r = safmx2 * gs.r, z__1.i = safmx2 * gs.i; - gs.r = z__1.r, gs.i = z__1.i; - scale *= safmx2; - if (scale <= safmn2) { - goto L20; - } - } - d__1 = fs.r; - d__2 = d_imag(&fs); - f2 = d__1 * d__1 + d__2 * d__2; - d__1 = gs.r; - d__2 = d_imag(&gs); - g2 = d__1 * d__1 + d__2 * d__2; - if (f2 <= max(g2,1.) * safmin) { - if (f->r == 0. && f->i == 0.) { - *cs = 0.; - d__2 = g->r; - d__3 = d_imag(g); - d__1 = dlapy2_(&d__2, &d__3); - r__->r = d__1, r__->i = 0.; - d__1 = gs.r; - d__2 = d_imag(&gs); - d__ = dlapy2_(&d__1, &d__2); - d__1 = gs.r / d__; - d__2 = -d_imag(&gs) / d__; - z__1.r = d__1, z__1.i = d__2; - sn->r = z__1.r, sn->i = z__1.i; -return TCL_OK; - } - d__1 = fs.r; - d__2 = d_imag(&fs); - f2s = dlapy2_(&d__1, &d__2); - g2s = sqrt(g2); - *cs = f2s / g2s; - d__3 = (d__1 = f->r, abs(d__1)), d__4 = (d__2 = d_imag(f), abs(d__2)); - if (max(d__3,d__4) > 1.) { - d__1 = f->r; - d__2 = d_imag(f); - d__ = dlapy2_(&d__1, &d__2); - d__1 = f->r / d__; - d__2 = d_imag(f) / d__; - z__1.r = d__1, z__1.i = d__2; - ff.r = z__1.r, ff.i = z__1.i; - } else { - dr = safmx2 * f->r; - di = safmx2 * d_imag(f); - d__ = dlapy2_(&dr, &di); - d__1 = dr / d__; - d__2 = di / d__; - z__1.r = d__1, z__1.i = d__2; - ff.r = z__1.r, ff.i = z__1.i; - } - d__1 = gs.r / g2s; - d__2 = -d_imag(&gs) / g2s; - z__2.r = d__1, z__2.i = d__2; - z__1.r = ff.r * z__2.r - ff.i * z__2.i, z__1.i = ff.r * z__2.i + ff.i - * z__2.r; - sn->r = z__1.r, sn->i = z__1.i; - z__2.r = *cs * f->r, z__2.i = *cs * f->i; - z__3.r = sn->r * g->r - sn->i * g->i, z__3.i = sn->r * g->i + sn->i * - g->r; - z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; - r__->r = z__1.r, r__->i = z__1.i; - } else { - f2s = sqrt(g2 / f2 + 1.); - d__1 = f2s * fs.r; - d__2 = f2s * d_imag(&fs); - z__1.r = d__1, z__1.i = d__2; - r__->r = z__1.r, r__->i = z__1.i; - *cs = 1. / f2s; - d__ = f2 + g2; - d__1 = r__->r / d__; - d__2 = d_imag(r__) / d__; - z__1.r = d__1, z__1.i = d__2; - sn->r = z__1.r, sn->i = z__1.i; - d_cnjg(&z__2, &gs); - z__1.r = sn->r * z__2.r - sn->i * z__2.i, z__1.i = sn->r * z__2.i + - sn->i * z__2.r; - sn->r = z__1.r, sn->i = z__1.i; - if (count != 0) { - if (count > 0) { - i__1 = count; - for (i__ = 1; i__ <= i__1; ++i__) { - z__1.r = safmx2 * r__->r, z__1.i = safmx2 * r__->i; - r__->r = z__1.r, r__->i = z__1.i; - } - } else { - i__1 = -count; - for (i__ = 1; i__ <= i__1; ++i__) { - z__1.r = safmn2 * r__->r, z__1.i = safmn2 * r__->i; - r__->r = z__1.r, r__->i = z__1.i; - } - } - } - } -return TCL_OK; -} /* zlartg_ */ -static /* Subroutine */ int dlaed6_ (Tcl_Interp *interp, integer *kniter, logical *orgati, doublereal * rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal * tau, integer *info) -{ - integer i__1; - doublereal d__1, d__2, d__3, d__4; - double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *); - doublereal a, b, c__, f; - integer i__; - doublereal fc, df, ddf, lbd, eta, ubd, eps, base; - integer iter; - doublereal temp, temp1, temp2, temp3, temp4; - logical scale; - integer niter; - doublereal small1, small2, sminv1, sminv2; - doublereal dscale[3], sclfac, zscale[3], erretm, sclinv; @@ -63494,242 +66445,273 @@ static /* Subroutine */ int dlaed6_ (Tcl_Interp *interp, integer *kniter, logica + --z__; + if (*dmin__ <= 0.) { + *tau = -(*dmin__); + *ttype = -1; +return TCL_OK; + } + nn = (*n0 << 2) + *pp; + if (*n0in == *n0) { + if (*dmin__ == *dn || *dmin__ == *dn1) { + b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]); + b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]); + a2 = z__[nn - 7] + z__[nn - 5]; + if (*dmin__ == *dn && *dmin1 == *dn1) { + gap2 = *dmin2 - a2 - *dmin2 * .25; + if (gap2 > 0. && gap2 > b2) { + gap1 = a2 - *dn - b2 / gap2 * b2; + } else { + gap1 = a2 - *dn - (b1 + b2); + } + if (gap1 > 0. && gap1 > b1) { + d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5; + s = max(d__1,d__2); + *ttype = -2; + } else { + s = 0.; + if (*dn > b1) { + s = *dn - b1; + } + if (a2 > b1 + b2) { + d__1 = s, d__2 = a2 - (b1 + b2); + s = min(d__1,d__2); + } + d__1 = s, d__2 = *dmin__ * .333; + s = max(d__1,d__2); + *ttype = -3; + } + } else { + *ttype = -4; + s = *dmin__ * .25; + if (*dmin__ == *dn) { + gam = *dn; + a2 = 0.; + if (z__[nn - 5] > z__[nn - 7]) { +return TCL_OK; + } + b2 = z__[nn - 5] / z__[nn - 7]; + np = nn - 9; + } else { + np = nn - (*pp << 1); + b2 = z__[np - 2]; + gam = *dn1; + if (z__[np - 4] > z__[np - 2]) { +return TCL_OK; + } + a2 = z__[np - 4] / z__[np - 2]; + if (z__[nn - 9] > z__[nn - 11]) { +return TCL_OK; + } + b2 = z__[nn - 9] / z__[nn - 11]; + np = nn - 13; + } - --z__; - --d__; - *info = 0; + a2 += b2; + i__1 = (*i0 << 2) - 1 + *pp; + for (i4 = np; i4 >= i__1; i4 += -4) { + if (b2 == 0.) { + goto L20; + } + b1 = b2; + if (z__[i4] > z__[i4 - 2]) { +return TCL_OK; + } + b2 *= z__[i4] / z__[i4 - 2]; + a2 += b2; + if (max(b2,b1) * 100. < a2 || .563 < a2) { + goto L20; + } + } +L20: + a2 *= 1.05; - if (*orgati) { - lbd = d__[2]; - ubd = d__[3]; - } else { - lbd = d__[1]; - ubd = d__[2]; - } - if (*finit < 0.) { - lbd = 0.; - } else { - ubd = 0.; - } - niter = 1; - *tau = 0.; - if (*kniter == 2) { - if (*orgati) { - temp = (d__[3] - d__[2]) / 2.; - c__ = *rho + z__[1] / (d__[1] - d__[2] - temp); - a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3]; - b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2]; - } else { - temp = (d__[1] - d__[2]) / 2.; - c__ = *rho + z__[3] / (d__[3] - d__[2] - temp); - a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2]; - b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1]; - } - d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); - temp = max(d__1,d__2); - a /= temp; - b /= temp; - c__ /= temp; - if (c__ == 0.) { - *tau = b / a; - } else if (a <= 0.) { - *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( - c__ * 2.); - } else { - *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)) - )); - } - if (*tau < lbd || *tau > ubd) { - *tau = (lbd + ubd) / 2.; - } - if (d__[1] == *tau || d__[2] == *tau || d__[3] == *tau) { - *tau = 0.; - } else { - temp = *finit + *tau * z__[1] / (d__[1] * (d__[1] - *tau)) + *tau - * z__[2] / (d__[2] * (d__[2] - *tau)) + *tau * z__[3] / ( - d__[3] * (d__[3] - *tau)); - if (temp <= 0.) { - lbd = *tau; - } else { - ubd = *tau; - } - if (abs(*finit) <= abs(temp)) { - *tau = 0.; + if (a2 < .563) { + s = gam * (1. - sqrt(a2)) / (a2 + 1.); + } } - } - } - - + } else if (*dmin__ == *dn2) { - eps = dlamch_("Epsilon"); - base = dlamch_("Base"); - i__1 = (integer) (log(dlamch_("SafMin")) / log(base) / 3.); - small1 = pow_di(&base, &i__1); - sminv1 = 1. / small1; - small2 = small1 * small1; - sminv2 = sminv1 * sminv1; + *ttype = -5; + s = *dmin__ * .25; - if (*orgati) { - d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - * - tau, abs(d__2)); - temp = min(d__3,d__4); - } else { - d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - * - tau, abs(d__2)); - temp = min(d__3,d__4); - } - scale = FALSE_; - if (temp <= small1) { - scale = TRUE_; - if (temp <= small2) { + np = nn - (*pp << 1); + b1 = z__[np - 2]; + b2 = z__[np - 6]; + gam = *dn2; + if (z__[np - 8] > b2 || z__[np - 4] > b1) { +return TCL_OK; + } + a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.); - sclfac = sminv2; - sclinv = small2; - } else { + if (*n0 - *i0 > 2) { + b2 = z__[nn - 13] / z__[nn - 15]; + a2 += b2; + i__1 = (*i0 << 2) - 1 + *pp; + for (i4 = nn - 17; i4 >= i__1; i4 += -4) { + if (b2 == 0.) { + goto L40; + } + b1 = b2; + if (z__[i4] > z__[i4 - 2]) { +return TCL_OK; + } + b2 *= z__[i4] / z__[i4 - 2]; + a2 += b2; + if (max(b2,b1) * 100. < a2 || .563 < a2) { + goto L40; + } + } +L40: + a2 *= 1.05; + } - sclfac = sminv1; - sclinv = small1; - } + if (a2 < .563) { + s = gam * (1. - sqrt(a2)) / (a2 + 1.); + } + } else { - for (i__ = 1; i__ <= 3; ++i__) { - dscale[i__ - 1] = d__[i__] * sclfac; - zscale[i__ - 1] = z__[i__] * sclfac; + if (*ttype == -6) { + *g += (1. - *g) * .333; + } else if (*ttype == -18) { + *g = .083250000000000005; + } else { + *g = .25; + } + s = *g * *dmin__; + *ttype = -6; } - *tau *= sclfac; - lbd *= sclfac; - ubd *= sclfac; - } else { + } else if (*n0in == *n0 + 1) { - for (i__ = 1; i__ <= 3; ++i__) { - dscale[i__ - 1] = d__[i__]; - zscale[i__ - 1] = z__[i__]; - } - } - fc = 0.; - df = 0.; - ddf = 0.; - for (i__ = 1; i__ <= 3; ++i__) { - temp = 1. / (dscale[i__ - 1] - *tau); - temp1 = zscale[i__ - 1] * temp; - temp2 = temp1 * temp; - temp3 = temp2 * temp; - fc += temp1 / dscale[i__ - 1]; - df += temp2; - ddf += temp3; - } - f = *finit + *tau * fc; + if (*dmin1 == *dn1 && *dmin2 == *dn2) { - if (abs(f) <= 0.) { - goto L60; - } - if (f <= 0.) { - lbd = *tau; - } else { - ubd = *tau; - } + *ttype = -7; + s = *dmin1 * .333; + if (z__[nn - 5] > z__[nn - 7]) { +return TCL_OK; + } + b1 = z__[nn - 5] / z__[nn - 7]; + b2 = b1; + if (b2 == 0.) { + goto L60; + } + i__1 = (*i0 << 2) - 1 + *pp; + for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { + a2 = b1; + if (z__[i4] > z__[i4 - 2]) { +return TCL_OK; + } + b1 *= z__[i4] / z__[i4 - 2]; + b2 += b1; + if (max(b1,a2) * 100. < b2) { + goto L60; + } + } +L60: + b2 = sqrt(b2 * 1.05); + d__1 = b2; + a2 = *dmin1 / (d__1 * d__1 + 1.); + gap2 = *dmin2 * .5 - a2; + if (gap2 > 0. && gap2 > b2 * a2) { + d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); + s = max(d__1,d__2); + } else { + d__1 = s, d__2 = a2 * (1. - b2 * 1.01); + s = max(d__1,d__2); + *ttype = -8; + } + } else { + s = *dmin1 * .25; + if (*dmin1 == *dn1) { + s = *dmin1 * .5; + } + *ttype = -9; + } + } else if (*n0in == *n0 + 2) { - iter = niter + 1; - for (niter = iter; niter <= 40; ++niter) { - if (*orgati) { - temp1 = dscale[1] - *tau; - temp2 = dscale[2] - *tau; - } else { - temp1 = dscale[0] - *tau; - temp2 = dscale[1] - *tau; - } - a = (temp1 + temp2) * f - temp1 * temp2 * df; - b = temp1 * temp2 * f; - c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf; - d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); - temp = max(d__1,d__2); - a /= temp; - b /= temp; - c__ /= temp; - if (c__ == 0.) { - eta = b / a; - } else if (a <= 0.) { - eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ - * 2.); + if (*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7]) { + *ttype = -10; + s = *dmin2 * .333; + if (z__[nn - 5] > z__[nn - 7]) { +return TCL_OK; + } + b1 = z__[nn - 5] / z__[nn - 7]; + b2 = b1; + if (b2 == 0.) { + goto L80; + } + i__1 = (*i0 << 2) - 1 + *pp; + for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { + if (z__[i4] > z__[i4 - 2]) { +return TCL_OK; + } + b1 *= z__[i4] / z__[i4 - 2]; + b2 += b1; + if (b1 * 100. < b2) { + goto L80; + } + } +L80: + b2 = sqrt(b2 * 1.05); + d__1 = b2; + a2 = *dmin2 / (d__1 * d__1 + 1.); + gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[ + nn - 9]) - a2; + if (gap2 > 0. && gap2 > b2 * a2) { + d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); + s = max(d__1,d__2); + } else { + d__1 = s, d__2 = a2 * (1. - b2 * 1.01); + s = max(d__1,d__2); + } } else { - eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) - ); - } - if (f * eta >= 0.) { - eta = -f / df; + s = *dmin2 * .25; + *ttype = -11; } + } else if (*n0in > *n0 + 2) { - *tau += eta; - if (*tau < lbd || *tau > ubd) { - *tau = (lbd + ubd) / 2.; - } - fc = 0.; - erretm = 0.; - df = 0.; - ddf = 0.; - for (i__ = 1; i__ <= 3; ++i__) { - temp = 1. / (dscale[i__ - 1] - *tau); - temp1 = zscale[i__ - 1] * temp; - temp2 = temp1 * temp; - temp3 = temp2 * temp; - temp4 = temp1 / dscale[i__ - 1]; - fc += temp4; - erretm += abs(temp4); - df += temp2; - ddf += temp3; - } - f = *finit + *tau * fc; - erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df; - if (abs(f) <= eps * erretm) { - goto L60; - } - if (f <= 0.) { - lbd = *tau; - } else { - ubd = *tau; - } + s = 0.; + *ttype = -12; } - *info = 1; -L60: - - if (scale) { - *tau *= sclinv; - } + *tau = s; return TCL_OK; -} /* dlaed6_ */ -static /* Subroutine */ int dlasd5_ (Tcl_Interp *interp, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal * work) +} /* dlasq4_ */ +static /* Subroutine */ int dlasq5_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2, logical *ieee) { - doublereal d__1; + integer i__1; + doublereal d__1, d__2; - double sqrt(doublereal); + doublereal d__; + integer j4, j4p2; + doublereal emin, temp; - doublereal b, c__, w, del, tau, delsq; @@ -63752,101 +66734,147 @@ static /* Subroutine */ int dlasd5_ (Tcl_Interp *interp, integer *i__, doublerea - --work; - --delta; --z__; - --d__; - - del = d__[2] - d__[1]; - delsq = del * (d__[2] + d__[1]); - if (*i__ == 1) { - w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] * - z__[1] / (d__[1] * 3. + d__[2])) / del + 1.; - if (w > 0.) { - b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[1] * z__[1] * delsq; + if (*n0 - *i0 - 1 <= 0) { +return TCL_OK; + } + j4 = (*i0 << 2) + *pp - 3; + emin = z__[j4 + 4]; + d__ = z__[j4] - *tau; + *dmin__ = d__; + *dmin1 = -z__[j4]; - tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); + if (*ieee) { - tau /= d__[1] + sqrt(d__[1] * d__[1] + tau); - *dsigma = d__[1] + tau; - delta[1] = -tau; - delta[2] = del - tau; - work[1] = d__[1] * 2. + tau; - work[2] = d__[1] + tau + d__[2]; + if (*pp == 0) { + i__1 = *n0 - 3 << 2; + for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { + z__[j4 - 2] = d__ + z__[j4 - 1]; + temp = z__[j4 + 1] / z__[j4 - 2]; + d__ = d__ * temp - *tau; + *dmin__ = min(*dmin__,d__); + z__[j4] = z__[j4 - 1] * temp; + d__1 = z__[j4]; + emin = min(d__1,emin); + } } else { - b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[2] * z__[2] * delsq; + i__1 = *n0 - 3 << 2; + for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { + z__[j4 - 3] = d__ + z__[j4]; + temp = z__[j4 + 2] / z__[j4 - 3]; + d__ = d__ * temp - *tau; + *dmin__ = min(*dmin__,d__); + z__[j4 - 1] = z__[j4] * temp; + d__1 = z__[j4 - 1]; + emin = min(d__1,emin); + } + } - if (b > 0.) { - tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); - } else { - tau = (b - sqrt(b * b + c__ * 4.)) / 2.; - } + *dnm2 = d__; + *dmin2 = *dmin__; + j4 = (*n0 - 2 << 2) - *pp; + j4p2 = j4 + (*pp << 1) - 1; + z__[j4 - 2] = *dnm2 + z__[j4p2]; + z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); + *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; + *dmin__ = min(*dmin__,*dnm1); + *dmin1 = *dmin__; + j4 += 4; + j4p2 = j4 + (*pp << 1) - 1; + z__[j4 - 2] = *dnm1 + z__[j4p2]; + z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); + *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; + *dmin__ = min(*dmin__,*dn); - tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1))); - *dsigma = d__[2] + tau; - delta[1] = -(del + tau); - delta[2] = -tau; - work[1] = d__[1] + tau + d__[2]; - work[2] = d__[2] * 2. + tau; - } } else { - b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); - c__ = *rho * z__[2] * z__[2] * delsq; + if (*pp == 0) { + i__1 = *n0 - 3 << 2; + for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { + z__[j4 - 2] = d__ + z__[j4 - 1]; + if (d__ < 0.) { +return TCL_OK; + } else { + z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); + d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau; + } + *dmin__ = min(*dmin__,d__); + d__1 = emin, d__2 = z__[j4]; + emin = min(d__1,d__2); + } + } else { + i__1 = *n0 - 3 << 2; + for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { + z__[j4 - 3] = d__ + z__[j4]; + if (d__ < 0.) { +return TCL_OK; + } else { + z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); + d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau; + } + *dmin__ = min(*dmin__,d__); + d__1 = emin, d__2 = z__[j4 - 1]; + emin = min(d__1,d__2); + } + } - if (b > 0.) { - tau = (b + sqrt(b * b + c__ * 4.)) / 2.; + *dnm2 = d__; + *dmin2 = *dmin__; + j4 = (*n0 - 2 << 2) - *pp; + j4p2 = j4 + (*pp << 1) - 1; + z__[j4 - 2] = *dnm2 + z__[j4p2]; + if (*dnm2 < 0.) { +return TCL_OK; } else { - tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); + z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); + *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; } + *dmin__ = min(*dmin__,*dnm1); + *dmin1 = *dmin__; + j4 += 4; + j4p2 = j4 + (*pp << 1) - 1; + z__[j4 - 2] = *dnm1 + z__[j4p2]; + if (*dnm1 < 0.) { +return TCL_OK; + } else { + z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); + *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; + } + *dmin__ = min(*dmin__,*dn); - tau /= d__[2] + sqrt(d__[2] * d__[2] + tau); - *dsigma = d__[2] + tau; - delta[1] = -(del + tau); - delta[2] = -tau; - work[1] = d__[1] + tau + d__[2]; - work[2] = d__[2] * 2. + tau; } + + z__[j4 + 2] = *dn; + z__[(*n0 << 2) - *pp] = emin; return TCL_OK; -} /* dlasd5_ */ -static /* Subroutine */ int dlasy2_ (Tcl_Interp *interp, logical *ltranl, logical *ltranr, integer *isgn, integer *n1, integer *n2, doublereal *tl, integer *ldtl, doublereal * tr, integer *ldtr, doublereal *b, integer *ldb, doublereal *scale, doublereal *x, integer *ldx, doublereal *xnorm, integer *info) +} /* dlasq5_ */ +static /* Subroutine */ int dlasq6_ (Tcl_Interp *interp, integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2) { + integer i__1; + doublereal d__1, d__2; + + doublereal d__; + integer j4, j4p2; + doublereal emin, temp; + doublereal safmin; + + + + + - static integer locu12[4] = { 3,4,1,2 }; - static integer locl21[4] = { 2,1,4,3 }; - static integer locu22[4] = { 4,3,2,1 }; - static logical xswpiv[4] = { FALSE_,FALSE_,TRUE_,TRUE_ }; - static logical bswpiv[4] = { FALSE_,TRUE_,FALSE_,TRUE_ }; - integer b_dim1, b_offset, tl_dim1, tl_offset, tr_dim1, tr_offset, x_dim1, - x_offset; - doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8; - integer i__, j, k; - doublereal x2[2], l21, u11, u12; - integer ip, jp; - doublereal u22, t16[16] /* was [4][4] */, gam, bet, eps, sgn, tmp[4], - tau1, btmp[4], smin; - integer ipiv; - doublereal temp; - integer jpiv[4]; - doublereal xmax; - integer ipsv, jpsv; - logical bswap; - logical xswap; - doublereal smlnum; @@ -63860,905 +66888,467 @@ static /* Subroutine */ int dlasy2_ (Tcl_Interp *interp, logical *ltranl, logica + --z__; + + if (*n0 - *i0 - 1 <= 0) { +return TCL_OK; + } + safmin = dlamch_("Safe minimum"); + j4 = (*i0 << 2) + *pp - 3; + emin = z__[j4 + 4]; + d__ = z__[j4]; + *dmin__ = d__; + if (*pp == 0) { + i__1 = *n0 - 3 << 2; + for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { + z__[j4 - 2] = d__ + z__[j4 - 1]; + if (z__[j4 - 2] == 0.) { + z__[j4] = 0.; + d__ = z__[j4 + 1]; + *dmin__ = d__; + emin = 0.; + } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4 + - 2] < z__[j4 + 1]) { + temp = z__[j4 + 1] / z__[j4 - 2]; + z__[j4] = z__[j4 - 1] * temp; + d__ *= temp; + } else { + z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); + d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]); + } + *dmin__ = min(*dmin__,d__); + d__1 = emin, d__2 = z__[j4]; + emin = min(d__1,d__2); + } + } else { + i__1 = *n0 - 3 << 2; + for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { + z__[j4 - 3] = d__ + z__[j4]; + if (z__[j4 - 3] == 0.) { + z__[j4 - 1] = 0.; + d__ = z__[j4 + 2]; + *dmin__ = d__; + emin = 0.; + } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4 + - 3] < z__[j4 + 2]) { + temp = z__[j4 + 2] / z__[j4 - 3]; + z__[j4 - 1] = z__[j4] * temp; + d__ *= temp; + } else { + z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); + d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]); + } + *dmin__ = min(*dmin__,d__); + d__1 = emin, d__2 = z__[j4 - 1]; + emin = min(d__1,d__2); + } + } + *dnm2 = d__; + *dmin2 = *dmin__; + j4 = (*n0 - 2 << 2) - *pp; + j4p2 = j4 + (*pp << 1) - 1; + z__[j4 - 2] = *dnm2 + z__[j4p2]; + if (z__[j4 - 2] == 0.) { + z__[j4] = 0.; + *dnm1 = z__[j4p2 + 2]; + *dmin__ = *dnm1; + emin = 0.; + } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < + z__[j4p2 + 2]) { + temp = z__[j4p2 + 2] / z__[j4 - 2]; + z__[j4] = z__[j4p2] * temp; + *dnm1 = *dnm2 * temp; + } else { + z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); + *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]); + } + *dmin__ = min(*dmin__,*dnm1); + *dmin1 = *dmin__; + j4 += 4; + j4p2 = j4 + (*pp << 1) - 1; + z__[j4 - 2] = *dnm1 + z__[j4p2]; + if (z__[j4 - 2] == 0.) { + z__[j4] = 0.; + *dn = z__[j4p2 + 2]; + *dmin__ = *dn; + emin = 0.; + } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < + z__[j4p2 + 2]) { + temp = z__[j4p2 + 2] / z__[j4 - 2]; + z__[j4] = z__[j4p2] * temp; + *dn = *dnm1 * temp; + } else { + z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); + *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]); + } + *dmin__ = min(*dmin__,*dn); + z__[j4 + 2] = *dn; + z__[(*n0 << 2) - *pp] = emin; +return TCL_OK; +} /* dlasq6_ */ +static /* Subroutine */ int dlaed6_ (Tcl_Interp *interp, integer *kniter, logical *orgati, doublereal * rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal * tau, integer *info) +{ + integer i__1; + doublereal d__1, d__2, d__3, d__4; + double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *); + doublereal a, b, c__, f; + integer i__; + doublereal fc, df, ddf, lbd, eta, ubd, eps, base; + integer iter; + doublereal temp, temp1, temp2, temp3, temp4; + logical scale; + integer niter; + doublereal small1, small2, sminv1, sminv2; + doublereal dscale[3], sclfac, zscale[3], erretm, sclinv; - tl_dim1 = *ldtl; - tl_offset = 1 + tl_dim1; - tl -= tl_offset; - tr_dim1 = *ldtr; - tr_offset = 1 + tr_dim1; - tr -= tr_offset; - b_dim1 = *ldb; - b_offset = 1 + b_dim1; - b -= b_offset; - x_dim1 = *ldx; - x_offset = 1 + x_dim1; - x -= x_offset; - *info = 0; - if (*n1 == 0 || *n2 == 0) { -return TCL_OK; - } - eps = dlamch_("P"); - smlnum = dlamch_("S") / eps; - sgn = (doublereal) (*isgn); - k = *n1 + *n1 + *n2 - 2; - switch (k) { - case 1: goto L10; - case 2: goto L20; - case 3: goto L30; - case 4: goto L50; - } -L10: - tau1 = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; - bet = abs(tau1); - if (bet <= smlnum) { - tau1 = smlnum; - bet = smlnum; - *info = 1; - } - *scale = 1.; - gam = (d__1 = b[b_dim1 + 1], abs(d__1)); - if (smlnum * gam > bet) { - *scale = 1. / gam; - } - x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / tau1; - *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)); -return TCL_OK; -L20: - d__7 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__8 = (d__2 = tr[tr_dim1 + 1] - , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tr[(tr_dim1 << - 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tr[ - tr_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = - tr[(tr_dim1 << 1) + 2], abs(d__5)); - d__6 = eps * max(d__7,d__8); - smin = max(d__6,smlnum); - tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; - tmp[3] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; - if (*ltranr) { - tmp[1] = sgn * tr[tr_dim1 + 2]; - tmp[2] = sgn * tr[(tr_dim1 << 1) + 1]; - } else { - tmp[1] = sgn * tr[(tr_dim1 << 1) + 1]; - tmp[2] = sgn * tr[tr_dim1 + 2]; - } - btmp[0] = b[b_dim1 + 1]; - btmp[1] = b[(b_dim1 << 1) + 1]; - goto L40; -L30: - d__7 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__8 = (d__2 = tl[tl_dim1 + 1] - , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tl[(tl_dim1 << - 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tl[ - tl_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = - tl[(tl_dim1 << 1) + 2], abs(d__5)); - d__6 = eps * max(d__7,d__8); - smin = max(d__6,smlnum); - tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; - tmp[3] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; - if (*ltranl) { - tmp[1] = tl[(tl_dim1 << 1) + 1]; - tmp[2] = tl[tl_dim1 + 2]; - } else { - tmp[1] = tl[tl_dim1 + 2]; - tmp[2] = tl[(tl_dim1 << 1) + 1]; - } - btmp[0] = b[b_dim1 + 1]; - btmp[1] = b[b_dim1 + 2]; -L40: - ipiv = idamax_(&dlasy2_c__4, tmp, &dlasy2_c__1); - u11 = tmp[ipiv - 1]; - if (abs(u11) <= smin) { - *info = 1; - u11 = smin; - } - u12 = tmp[locu12[ipiv - 1] - 1]; - l21 = tmp[locl21[ipiv - 1] - 1] / u11; - u22 = tmp[locu22[ipiv - 1] - 1] - u12 * l21; - xswap = xswpiv[ipiv - 1]; - bswap = bswpiv[ipiv - 1]; - if (abs(u22) <= smin) { - *info = 1; - u22 = smin; - } - if (bswap) { - temp = btmp[1]; - btmp[1] = btmp[0] - l21 * temp; - btmp[0] = temp; - } else { - btmp[1] -= l21 * btmp[0]; - } - *scale = 1.; - if (smlnum * 2. * abs(btmp[1]) > abs(u22) || smlnum * 2. * abs(btmp[0]) > - abs(u11)) { - d__1 = abs(btmp[0]), d__2 = abs(btmp[1]); - *scale = .5 / max(d__1,d__2); - btmp[0] *= *scale; - btmp[1] *= *scale; - } - x2[1] = btmp[1] / u22; - x2[0] = btmp[0] / u11 - u12 / u11 * x2[1]; - if (xswap) { - temp = x2[1]; - x2[1] = x2[0]; - x2[0] = temp; - } - x[x_dim1 + 1] = x2[0]; - if (*n1 == 1) { - x[(x_dim1 << 1) + 1] = x2[1]; - *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 << 1) - + 1], abs(d__2)); - } else { - x[x_dim1 + 2] = x2[1]; - d__3 = (d__1 = x[x_dim1 + 1], abs(d__1)), d__4 = (d__2 = x[x_dim1 + 2] - , abs(d__2)); - *xnorm = max(d__3,d__4); - } -return TCL_OK; + --z__; + --d__; -L50: - d__5 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__6 = (d__2 = tr[(tr_dim1 << - 1) + 1], abs(d__2)), d__5 = max(d__5,d__6), d__6 = (d__3 = tr[ - tr_dim1 + 2], abs(d__3)), d__5 = max(d__5,d__6), d__6 = (d__4 = - tr[(tr_dim1 << 1) + 2], abs(d__4)); - smin = max(d__5,d__6); - d__5 = smin, d__6 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__5 = max(d__5, - d__6), d__6 = (d__2 = tl[(tl_dim1 << 1) + 1], abs(d__2)), d__5 = - max(d__5,d__6), d__6 = (d__3 = tl[tl_dim1 + 2], abs(d__3)), d__5 = - max(d__5,d__6), d__6 = (d__4 = tl[(tl_dim1 << 1) + 2], abs(d__4)) - ; - smin = max(d__5,d__6); - d__1 = eps * smin; - smin = max(d__1,smlnum); - btmp[0] = 0.; - if (dcopy_(interp, &dlasy2_c__16, btmp, &dlasy2_c__0, t16, &dlasy2_c__1)!=TCL_OK) { return TCL_ERROR; } + *info = 0; - t16[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; - t16[5] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; - t16[10] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; - t16[15] = tl[(tl_dim1 << 1) + 2] + sgn * tr[(tr_dim1 << 1) + 2]; - if (*ltranl) { - t16[4] = tl[tl_dim1 + 2]; - t16[1] = tl[(tl_dim1 << 1) + 1]; - t16[14] = tl[tl_dim1 + 2]; - t16[11] = tl[(tl_dim1 << 1) + 1]; + if (*orgati) { + lbd = d__[2]; + ubd = d__[3]; } else { - t16[4] = tl[(tl_dim1 << 1) + 1]; - t16[1] = tl[tl_dim1 + 2]; - t16[14] = tl[(tl_dim1 << 1) + 1]; - t16[11] = tl[tl_dim1 + 2]; + lbd = d__[1]; + ubd = d__[2]; } - if (*ltranr) { - t16[8] = sgn * tr[(tr_dim1 << 1) + 1]; - t16[13] = sgn * tr[(tr_dim1 << 1) + 1]; - t16[2] = sgn * tr[tr_dim1 + 2]; - t16[7] = sgn * tr[tr_dim1 + 2]; + if (*finit < 0.) { + lbd = 0.; } else { - t16[8] = sgn * tr[tr_dim1 + 2]; - t16[13] = sgn * tr[tr_dim1 + 2]; - t16[2] = sgn * tr[(tr_dim1 << 1) + 1]; - t16[7] = sgn * tr[(tr_dim1 << 1) + 1]; + ubd = 0.; } - btmp[0] = b[b_dim1 + 1]; - btmp[1] = b[b_dim1 + 2]; - btmp[2] = b[(b_dim1 << 1) + 1]; - btmp[3] = b[(b_dim1 << 1) + 2]; - - - for (i__ = 1; i__ <= 3; ++i__) { - xmax = 0.; - for (ip = i__; ip <= 4; ++ip) { - for (jp = i__; jp <= 4; ++jp) { - if ((d__1 = t16[ip + (jp << 2) - 5], abs(d__1)) >= xmax) { - xmax = (d__1 = t16[ip + (jp << 2) - 5], abs(d__1)); - ipsv = ip; - jpsv = jp; - } - } - } - if (ipsv != i__) { - if (dswap_(interp, &dlasy2_c__4, &t16[ipsv - 1], &dlasy2_c__4, &t16[i__ - 1], &dlasy2_c__4)!=TCL_OK) { return TCL_ERROR; } - temp = btmp[i__ - 1]; - btmp[i__ - 1] = btmp[ipsv - 1]; - btmp[ipsv - 1] = temp; + niter = 1; + *tau = 0.; + if (*kniter == 2) { + if (*orgati) { + temp = (d__[3] - d__[2]) / 2.; + c__ = *rho + z__[1] / (d__[1] - d__[2] - temp); + a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3]; + b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2]; + } else { + temp = (d__[1] - d__[2]) / 2.; + c__ = *rho + z__[3] / (d__[3] - d__[2] - temp); + a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2]; + b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1]; } - if (jpsv != i__) { - if (dswap_(interp, &dlasy2_c__4, &t16[(jpsv << 2) - 4], &dlasy2_c__1, &t16[(i__ << 2) - 4], &dlasy2_c__1)!=TCL_OK) { return TCL_ERROR; } - - + d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); + temp = max(d__1,d__2); + a /= temp; + b /= temp; + c__ /= temp; + if (c__ == 0.) { + *tau = b / a; + } else if (a <= 0.) { + *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( + c__ * 2.); + } else { + *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)) + )); } - jpiv[i__ - 1] = jpsv; - if ((d__1 = t16[i__ + (i__ << 2) - 5], abs(d__1)) < smin) { - *info = 1; - t16[i__ + (i__ << 2) - 5] = smin; + if (*tau < lbd || *tau > ubd) { + *tau = (lbd + ubd) / 2.; } - for (j = i__ + 1; j <= 4; ++j) { - t16[j + (i__ << 2) - 5] /= t16[i__ + (i__ << 2) - 5]; - btmp[j - 1] -= t16[j + (i__ << 2) - 5] * btmp[i__ - 1]; - for (k = i__ + 1; k <= 4; ++k) { - t16[j + (k << 2) - 5] -= t16[j + (i__ << 2) - 5] * t16[i__ + ( - k << 2) - 5]; + if (d__[1] == *tau || d__[2] == *tau || d__[3] == *tau) { + *tau = 0.; + } else { + temp = *finit + *tau * z__[1] / (d__[1] * (d__[1] - *tau)) + *tau + * z__[2] / (d__[2] * (d__[2] - *tau)) + *tau * z__[3] / ( + d__[3] * (d__[3] - *tau)); + if (temp <= 0.) { + lbd = *tau; + } else { + ubd = *tau; + } + if (abs(*finit) <= abs(temp)) { + *tau = 0.; } } } - if (abs(t16[15]) < smin) { - t16[15] = smin; - } - *scale = 1.; - if (smlnum * 8. * abs(btmp[0]) > abs(t16[0]) || smlnum * 8. * abs(btmp[1]) - > abs(t16[5]) || smlnum * 8. * abs(btmp[2]) > abs(t16[10]) || - smlnum * 8. * abs(btmp[3]) > abs(t16[15])) { - d__1 = abs(btmp[0]), d__2 = abs(btmp[1]), d__1 = max(d__1,d__2), d__2 - = abs(btmp[2]), d__1 = max(d__1,d__2), d__2 = abs(btmp[3]); - *scale = .125 / max(d__1,d__2); - btmp[0] *= *scale; - btmp[1] *= *scale; - btmp[2] *= *scale; - btmp[3] *= *scale; - } - for (i__ = 1; i__ <= 4; ++i__) { - k = 5 - i__; - temp = 1. / t16[k + (k << 2) - 5]; - tmp[k - 1] = btmp[k - 1] * temp; - for (j = k + 1; j <= 4; ++j) { - tmp[k - 1] -= temp * t16[k + (j << 2) - 5] * tmp[j - 1]; - } - } - for (i__ = 1; i__ <= 3; ++i__) { - if (jpiv[4 - i__ - 1] != 4 - i__) { - temp = tmp[4 - i__ - 1]; - tmp[4 - i__ - 1] = tmp[jpiv[4 - i__ - 1] - 1]; - tmp[jpiv[4 - i__ - 1] - 1] = temp; - } - } - x[x_dim1 + 1] = tmp[0]; - x[x_dim1 + 2] = tmp[1]; - x[(x_dim1 << 1) + 1] = tmp[2]; - x[(x_dim1 << 1) + 2] = tmp[3]; - d__1 = abs(tmp[0]) + abs(tmp[2]), d__2 = abs(tmp[1]) + abs(tmp[3]); - *xnorm = max(d__1,d__2); -return TCL_OK; - - -} /* dlasy2_ */ -static /* Subroutine */ int dlarfx_ (Tcl_Interp *interp, char *side, integer *m, integer *n, doublereal * v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) -{ - integer c_dim1, c_offset, i__1; - - integer j; - doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7, - v8, v9, t10, v10, sum; - - + eps = dlamch_("Epsilon"); + base = dlamch_("Base"); + i__1 = (integer) (log(dlamch_("SafMin")) / log(base) / 3.); + small1 = pow_di(&base, &i__1); + sminv1 = 1. / small1; + small2 = small1 * small1; + sminv2 = sminv1 * sminv1; + if (*orgati) { + d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - * + tau, abs(d__2)); + temp = min(d__3,d__4); + } else { + d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - * + tau, abs(d__2)); + temp = min(d__3,d__4); + } + scale = FALSE_; + if (temp <= small1) { + scale = TRUE_; + if (temp <= small2) { + sclfac = sminv2; + sclinv = small2; + } else { + sclfac = sminv1; + sclinv = small1; + } + for (i__ = 1; i__ <= 3; ++i__) { + dscale[i__ - 1] = d__[i__] * sclfac; + zscale[i__ - 1] = z__[i__] * sclfac; + } + *tau *= sclfac; + lbd *= sclfac; + ubd *= sclfac; + } else { + for (i__ = 1; i__ <= 3; ++i__) { + dscale[i__ - 1] = d__[i__]; + zscale[i__ - 1] = z__[i__]; + } + } + fc = 0.; + df = 0.; + ddf = 0.; + for (i__ = 1; i__ <= 3; ++i__) { + temp = 1. / (dscale[i__ - 1] - *tau); + temp1 = zscale[i__ - 1] * temp; + temp2 = temp1 * temp; + temp3 = temp2 * temp; + fc += temp1 / dscale[i__ - 1]; + df += temp2; + ddf += temp3; + } + f = *finit + *tau * fc; + if (abs(f) <= 0.) { + goto L60; + } + if (f <= 0.) { + lbd = *tau; + } else { + ubd = *tau; + } - --v; - c_dim1 = *ldc; - c_offset = 1 + c_dim1; - c__ -= c_offset; - --work; - if (*tau == 0.) { -return TCL_OK; - } - if (lsame_(side, "L")) { + iter = niter + 1; + for (niter = iter; niter <= 40; ++niter) { - switch (*m) { - case 1: goto L10; - case 2: goto L30; - case 3: goto L50; - case 4: goto L70; - case 5: goto L90; - case 6: goto L110; - case 7: goto L130; - case 8: goto L150; - case 9: goto L170; - case 10: goto L190; + if (*orgati) { + temp1 = dscale[1] - *tau; + temp2 = dscale[2] - *tau; + } else { + temp1 = dscale[0] - *tau; + temp2 = dscale[1] - *tau; + } + a = (temp1 + temp2) * f - temp1 * temp2 * df; + b = temp1 * temp2 * f; + c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf; + d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); + temp = max(d__1,d__2); + a /= temp; + b /= temp; + c__ /= temp; + if (c__ == 0.) { + eta = b / a; + } else if (a <= 0.) { + eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ + * 2.); + } else { + eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) + ); + } + if (f * eta >= 0.) { + eta = -f / df; } + *tau += eta; + if (*tau < lbd || *tau > ubd) { + *tau = (lbd + ubd) / 2.; + } - if (dlarf_(interp, side, m, n, &v[1], &dlarfx_c__1, tau, &c__[c_offset], ldc, &work[1])!=TCL_OK) { return TCL_ERROR; } - - goto L410; -L10: - - - t1 = 1. - *tau * v[1] * v[1]; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1]; + fc = 0.; + erretm = 0.; + df = 0.; + ddf = 0.; + for (i__ = 1; i__ <= 3; ++i__) { + temp = 1. / (dscale[i__ - 1] - *tau); + temp1 = zscale[i__ - 1] * temp; + temp2 = temp1 * temp; + temp3 = temp2 * temp; + temp4 = temp1 / dscale[i__ - 1]; + fc += temp4; + erretm += abs(temp4); + df += temp2; + ddf += temp3; } - goto L410; -L30: + f = *finit + *tau * fc; + erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df; + if (abs(f) <= eps * erretm) { + goto L60; + } + if (f <= 0.) { + lbd = *tau; + } else { + ubd = *tau; + } + } + *info = 1; +L60: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - } - goto L410; -L50: + if (scale) { + *tau *= sclinv; + } +return TCL_OK; - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - } - goto L410; -L70: +} /* dlaed6_ */ +static /* Subroutine */ int dlasd5_ (Tcl_Interp *interp, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal * work) +{ + doublereal d__1; + + double sqrt(doublereal); + doublereal b, c__, w, del, tau, delsq; - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - } - goto L410; -L90: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - } - goto L410; -L110: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - } - goto L410; -L130: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; - } - goto L410; -L150: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7] + v8 * c__[j * c_dim1 + 8]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; - c__[j * c_dim1 + 8] -= sum * t8; - } - goto L410; -L170: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * - c_dim1 + 9]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; - c__[j * c_dim1 + 8] -= sum * t8; - c__[j * c_dim1 + 9] -= sum * t9; - } - goto L410; -L190: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - v10 = v[10]; - t10 = *tau * v10; - i__1 = *n; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * - c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ - j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * - c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * - c_dim1 + 9] + v10 * c__[j * c_dim1 + 10]; - c__[j * c_dim1 + 1] -= sum * t1; - c__[j * c_dim1 + 2] -= sum * t2; - c__[j * c_dim1 + 3] -= sum * t3; - c__[j * c_dim1 + 4] -= sum * t4; - c__[j * c_dim1 + 5] -= sum * t5; - c__[j * c_dim1 + 6] -= sum * t6; - c__[j * c_dim1 + 7] -= sum * t7; - c__[j * c_dim1 + 8] -= sum * t8; - c__[j * c_dim1 + 9] -= sum * t9; - c__[j * c_dim1 + 10] -= sum * t10; - } - goto L410; - } else { - switch (*n) { - case 1: goto L210; - case 2: goto L230; - case 3: goto L250; - case 4: goto L270; - case 5: goto L290; - case 6: goto L310; - case 7: goto L330; - case 8: goto L350; - case 9: goto L370; - case 10: goto L390; - } - if (dlarf_(interp, side, m, n, &v[1], &dlarfx_c__1, tau, &c__[c_offset], ldc, &work[1])!=TCL_OK) { return TCL_ERROR; } - goto L410; -L210: - t1 = 1. - *tau * v[1] * v[1]; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - c__[j + c_dim1] = t1 * c__[j + c_dim1]; - } - goto L410; -L230: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - } - goto L410; -L250: + --work; + --delta; + --z__; + --d__; + del = d__[2] - d__[1]; + delsq = del * (d__[2] + d__[1]); + if (*i__ == 1) { + w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] * + z__[1] / (d__[1] * 3. + d__[2])) / del + 1.; + if (w > 0.) { + b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); + c__ = *rho * z__[1] * z__[1] * delsq; - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - } - goto L410; -L270: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + (c_dim1 << 2)] -= sum * t4; - } - goto L410; -L290: + tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * - c__[j + c_dim1 * 5]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + (c_dim1 << 2)] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - } - goto L410; -L310: + tau /= d__[1] + sqrt(d__[1] * d__[1] + tau); + *dsigma = d__[1] + tau; + delta[1] = -tau; + delta[2] = del - tau; + work[1] = d__[1] * 2. + tau; + work[2] = d__[1] + tau + d__[2]; + } else { + b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); + c__ = *rho * z__[2] * z__[2] * delsq; - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * - c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + (c_dim1 << 2)] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - } - goto L410; -L330: + if (b > 0.) { + tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); + } else { + tau = (b - sqrt(b * b + c__ * 4.)) / 2.; + } - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * - c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ - j + c_dim1 * 7]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + (c_dim1 << 2)] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; + tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1))); + *dsigma = d__[2] + tau; + delta[1] = -(del + tau); + delta[2] = -tau; + work[1] = d__[1] + tau + d__[2]; + work[2] = d__[2] * 2. + tau; } - goto L410; -L350: + } else { - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * - c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ - j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + (c_dim1 << 2)] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; - c__[j + (c_dim1 << 3)] -= sum * t8; - } - goto L410; -L370: + b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); + c__ = *rho * z__[2] * z__[2] * delsq; - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * - c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ - j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ - j + c_dim1 * 9]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + (c_dim1 << 2)] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; - c__[j + (c_dim1 << 3)] -= sum * t8; - c__[j + c_dim1 * 9] -= sum * t9; + if (b > 0.) { + tau = (b + sqrt(b * b + c__ * 4.)) / 2.; + } else { + tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); } - goto L410; -L390: - v1 = v[1]; - t1 = *tau * v1; - v2 = v[2]; - t2 = *tau * v2; - v3 = v[3]; - t3 = *tau * v3; - v4 = v[4]; - t4 = *tau * v4; - v5 = v[5]; - t5 = *tau * v5; - v6 = v[6]; - t6 = *tau * v6; - v7 = v[7]; - t7 = *tau * v7; - v8 = v[8]; - t8 = *tau * v8; - v9 = v[9]; - t9 = *tau * v9; - v10 = v[10]; - t10 = *tau * v10; - i__1 = *m; - for (j = 1; j <= i__1; ++j) { - sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * - c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * - c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ - j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ - j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10]; - c__[j + c_dim1] -= sum * t1; - c__[j + (c_dim1 << 1)] -= sum * t2; - c__[j + c_dim1 * 3] -= sum * t3; - c__[j + (c_dim1 << 2)] -= sum * t4; - c__[j + c_dim1 * 5] -= sum * t5; - c__[j + c_dim1 * 6] -= sum * t6; - c__[j + c_dim1 * 7] -= sum * t7; - c__[j + (c_dim1 << 3)] -= sum * t8; - c__[j + c_dim1 * 9] -= sum * t9; - c__[j + c_dim1 * 10] -= sum * t10; - } - goto L410; + tau /= d__[2] + sqrt(d__[2] * d__[2] + tau); + *dsigma = d__[2] + tau; + delta[1] = -(del + tau); + delta[2] = -tau; + work[1] = d__[1] + tau + d__[2]; + work[2] = d__[2] * 2. + tau; } -L410: return TCL_OK; -} /* dlarfx_ */ +} /* dlasd5_ */ diff --git a/generic/clapack_cutdown.h b/generic/clapack_cutdown.h index cfbff17..a2ad51a 100644 --- a/generic/clapack_cutdown.h +++ b/generic/clapack_cutdown.h @@ -16,4 +16,6 @@ MODULE_SCOPE /* Subroutine */ int dgesv_ (Tcl_Interp *interp, integer *n, intege MODULE_SCOPE /* Subroutine */ int zgesv_ (Tcl_Interp *interp, integer *n, integer *nrhs, doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b, integer *ldb, integer * info); MODULE_SCOPE /* Subroutine */ int dgesvx_ (Tcl_Interp *interp, char *fact, char *trans, integer *n, integer * nrhs, doublereal *a, integer *lda, doublereal *af, integer *ldaf, integer *ipiv, char *equed, doublereal *r__, doublereal *c__, doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal * rcond, doublereal *ferr, doublereal *berr, doublereal *work, integer * iwork, integer *info); MODULE_SCOPE /* Subroutine */ int zgesvx_ (Tcl_Interp *interp, char *fact, char *trans, integer *n, integer * nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer * ldaf, integer *ipiv, char *equed, doublereal *r__, doublereal *c__, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *rcond, doublereal *ferr, doublereal *berr, doublecomplex * work, doublereal *rwork, integer *info); +MODULE_SCOPE /* Subroutine */ int dgees_ (Tcl_Interp *interp, char *jobvs, char *sort, L_fp select, integer *n, doublereal *a, integer *lda, integer *sdim, doublereal *wr, doublereal *wi, doublereal *vs, integer *ldvs, doublereal *work, integer *lwork, logical *bwork, integer *info); +MODULE_SCOPE /* Subroutine */ int zgees_ (Tcl_Interp *interp, char *jobvs, char *sort, L_fp select, integer *n, doublecomplex *a, integer *lda, integer *sdim, doublecomplex *w, doublecomplex *vs, integer *ldvs, doublecomplex *work, integer *lwork, doublereal *rwork, logical *bwork, integer *info); diff --git a/generic/nacomplex.c b/generic/nacomplex.c index 5af97b4..90b28eb 100644 --- a/generic/nacomplex.c +++ b/generic/nacomplex.c @@ -2,8 +2,6 @@ #include #include -/* TODO: These are just stubs to test the different types */ - /* * Functions hndling the Tcl_ObjType Complex */ diff --git a/generic/schur.c b/generic/schur.c new file mode 100644 index 0000000..8bc11a2 --- /dev/null +++ b/generic/schur.c @@ -0,0 +1,182 @@ +#include "schur.h" +#include "clapack_cutdown.h" + +#define MIN(X, Y) ((X)<(Y) ? X : Y) +#define MAX(X, Y) ((X)>(Y) ? X : Y) + +static int doSchur(Tcl_Interp *interp, Tcl_Obj *matrix, Tcl_Obj **Z, Tcl_Obj **T) { + /* Compute Schur decomposition of a matrix. + * Return Schur vectors in Z and Schur form in T, + */ + + /* Convert matrix to VecTcl object */ + NumArrayInfo *info = NumArrayGetInfoFromObj(interp, matrix); + if (!info) { return TCL_ERROR; } + + /* Check that it is a square matrix */ + if (info->nDim != 2) { + /* Could be a scalar. In this case return the trivial + * decomposition */ + if (ISSCALARINFO(info)) { + *T = Tcl_DuplicateObj(matrix); + *Z = Tcl_NewDoubleObj(1.0); + return TCL_OK; + } + + Tcl_SetResult(interp, "Schur decomposition is only defined for square matrix", NULL); + return TCL_ERROR; + } + + + /* get matrix dimensions */ + long int m = info->dims[0]; + long int n = info->dims[1]; + + if (m != n) { + Tcl_SetResult(interp, "Schur decomposition is only defined for square matrix", NULL); + return TCL_ERROR; + } + + char *jobvs = "V"; + char *sort = "N"; + + if (info->type != NumArray_Complex128) { + /* Real-valued matrix, prepare for dgees */ + /* create a column-major copy of matrix + * This also converts an integer matrix to double */ + *T = NumArrayNewMatrixColMaj(NumArray_Float64, m, n); + NumArrayObjCopy(interp, matrix, *T); + + *Z = NumArrayNewMatrixColMaj(NumArray_Float64, m, m); + + /* Extract the raw pointers from the VecTcl objects */ + double *Tptr = NumArrayGetPtrFromObj(interp, *T); + double *Zptr = NumArrayGetPtrFromObj(interp, *Z); + + /* Space to store the eigenvalues */ + doublereal *wr = ckalloc(sizeof(doublereal)*n); + doublereal *wi = ckalloc(sizeof(doublereal)*n); + + /* setup workspace arrays */ + integer lwork = 3*n; + doublereal* work=ckalloc(sizeof(doublereal)*lwork); + logical *bwork = NULL; + integer sdim=0; + + /* Leading dimensions of T and Vr + * Don't compute left vectors. */ + integer ldt = n; + integer ldz = n; + integer info; + +/* Subroutine int dgees_(char *jobvs, char *sort, L_fp select, + * integer *n, doublereal *a, integer *lda, integer *sdim, + * doublereal *wr, doublereal *wi, doublereal *vs, integer *ldvs, + * doublereal *work, integer *lwork, logical *bwork, integer *info) */ + + + /* call out to dgees */ + int errcode=dgees_(interp, jobvs, sort, NULL, + &n, Tptr, &ldt, &sdim, + wr, wi, Zptr, &ldz, + work, &lwork, bwork, &info); + + /* free workspace */ + ckfree(work); + ckfree(wr); ckfree(wi); + + if (errcode != TCL_OK) { + /* release temporary storage for result */ + Tcl_DecrRefCount(*Z); + Tcl_DecrRefCount(*T); + if (errcode > 0) { + RESULTPRINTF(("DGEES failed to converge at eigenvector %d ", info)); + } + return TCL_ERROR; + } + + return TCL_OK; + + } else { + /* Complex matrix, prepare for zgees */ + /* create a column-major copy of matrix + * This also converts an integer matrix to double */ + *T = NumArrayNewMatrixColMaj(NumArray_Complex128, m, n); + NumArrayObjCopy(interp, matrix, *T); + + *Z = NumArrayNewMatrixColMaj(NumArray_Complex128, m, m); + + /* Extract the raw pointers from the VecTcl objects */ + doublecomplex *Tptr = NumArrayGetPtrFromObj(interp, *T); + doublecomplex *Zptr = NumArrayGetPtrFromObj(interp, *Z); + + /* Space to store the eigenvalues */ + doublecomplex *w = ckalloc(sizeof(doublecomplex)*n); + + /* setup workspace arrays */ + integer lwork = 2*n; + doublecomplex *work=ckalloc(sizeof(doublecomplex)*lwork); + doublereal *rwork=ckalloc(sizeof(doublereal)*n); + logical *bwork = NULL; + integer sdim=0; + + /* Leading dimensions of T and Vr + * Don't compute left vectors. */ + integer ldt = n; + integer ldz = n; + integer info; + + /* Subroutine int zgees_(char *jobvs, char *sort, L_fp select, + * integer *n, doublecomplex *a, integer *lda, integer *sdim, + * doublecomplex *w, doublecomplex *vs, integer *ldvs, + * doublecomplex *work, integer *lwork, doublereal *rwork, logical *bwork, integer *info) */ + + /* call out to dgees */ + int errcode=zgees_(interp, jobvs, sort, NULL, + &n, Tptr, &ldt, &sdim, + w, Zptr, &ldz, + work, &lwork, rwork, bwork, &info); + + /* free workspace */ + ckfree(work); + ckfree(rwork); + ckfree(w); + + if (errcode != TCL_OK) { + /* release temporary storage for result */ + Tcl_DecrRefCount(*Z); + Tcl_DecrRefCount(*T); + if (errcode > 0) { + RESULTPRINTF(("ZGEES failed to converge at eigenvector %d ", info)); + } + return TCL_ERROR; + } + + return TCL_OK; + + } +} + +int NumArraySchurCmd(ClientData dummy, Tcl_Interp *interp, int objc, Tcl_Obj *const *objv) { + if (objc != 2) { + Tcl_WrongNumArgs(interp, 1, objv, "matrix"); + return TCL_ERROR; + } + + Tcl_Obj *matrix = objv[1]; + + Tcl_Obj *Z, *T; + + if (doSchur(interp, matrix, &Z, &T) != TCL_OK) { + return TCL_ERROR; + } + + /* return as list */ + Tcl_Obj *result=Tcl_NewObj(); + Tcl_ListObjAppendElement(interp, result, Z); + Tcl_ListObjAppendElement(interp, result, T); + + Tcl_SetObjResult(interp, result); + return TCL_OK; +} + diff --git a/generic/schur.h b/generic/schur.h new file mode 100644 index 0000000..4750242 --- /dev/null +++ b/generic/schur.h @@ -0,0 +1,12 @@ +/* function definitions for basic linear algebra + * matrix decompositions / equation system solving */ + +#ifndef SCHUR_H +#define SCHUR_H +#include "vectclInt.h" + +/* Compute the Schur form */ +int NumArraySchurCmd(ClientData dummy, Tcl_Interp *interp, + int objc, Tcl_Obj *const *objv); + +#endif diff --git a/generic/vectcl.c b/generic/vectcl.c index 16a82bf..de5417e 100644 --- a/generic/vectcl.c +++ b/generic/vectcl.c @@ -8,6 +8,7 @@ #include "fft.h" #include "svd.h" #include "eig.h" +#include "schur.h" #include "bcexecute.h" #include "vmparser.h" @@ -245,6 +246,7 @@ static const EnsembleMap implementationMap[] = { {"eig", NumArrayEigCmd, NULL}, {"svd1", NumArraySVD1Cmd, NULL}, {"svd", NumArraySVDCmd, NULL}, + {"schur", NumArraySchurCmd, NULL}, /* Reductions */ {"sum", NumArraySumCmd, NULL}, {"axismin", NumArrayAxisMinCmd, NULL}, diff --git a/tools/cherrypick_clapack.tcl b/tools/cherrypick_clapack.tcl index 9bacaba..f42e732 100644 --- a/tools/cherrypick_clapack.tcl +++ b/tools/cherrypick_clapack.tcl @@ -4,8 +4,8 @@ set CLAPACK_DIR /Users/chris/Sources/CLAPACK-3.2.1/ -set tooldir [file dirname [info script]] -set destfile [file join $tooldir ../../generic/clackap_cutdown.c] +set tooldir [file normalize [file dirname [info script]]] +set destfile [file normalize [file join $tooldir ../generic/clapack_cutdown.c]] # helper procs for parsing proc shift {} { @@ -422,5 +422,6 @@ proc run_generator {} { dgesdd_ zgesdd_ dgemm_ zgemm_ \ dsyevr_ zheevr_ dgeev_ zgeev_ \ dgelss_ zgelss_ dgelsy_ zgelsy_ \ - dgesv_ zgesv_ dgesvx_ zgesvx_ + dgesv_ zgesv_ dgesvx_ zgesvx_ \ + dgees_ zgees_ }