aboutsummaryrefslogtreecommitdiffstats
path: root/test/monniaux/glpk-4.65/src/amd/amd_1.c
blob: 4f9b07d7c47f3e96a71fcce723c7af3b9c8870a5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
/* ========================================================================= */
/* === AMD_1 =============================================================== */
/* ========================================================================= */

/* ------------------------------------------------------------------------- */
/* AMD, Copyright (c) Timothy A. Davis,                                      */
/* Patrick R. Amestoy, and Iain S. Duff.  See ../README.txt for License.     */
/* email: davis at cise.ufl.edu    CISE Department, Univ. of Florida.        */
/* web: http://www.cise.ufl.edu/research/sparse/amd                          */
/* ------------------------------------------------------------------------- */

/* AMD_1: Construct A+A' for a sparse matrix A and perform the AMD ordering.
 *
 * The n-by-n sparse matrix A can be unsymmetric.  It is stored in MATLAB-style
 * compressed-column form, with sorted row indices in each column, and no
 * duplicate entries.  Diagonal entries may be present, but they are ignored.
 * Row indices of column j of A are stored in Ai [Ap [j] ... Ap [j+1]-1].
 * Ap [0] must be zero, and nz = Ap [n] is the number of entries in A.  The
 * size of the matrix, n, must be greater than or equal to zero.
 *
 * This routine must be preceded by a call to AMD_aat, which computes the
 * number of entries in each row/column in A+A', excluding the diagonal.
 * Len [j], on input, is the number of entries in row/column j of A+A'.  This
 * routine constructs the matrix A+A' and then calls AMD_2.  No error checking
 * is performed (this was done in AMD_valid).
 */

#include "amd_internal.h"

GLOBAL void AMD_1
(
    Int n,              /* n > 0 */
    const Int Ap [ ],   /* input of size n+1, not modified */
    const Int Ai [ ],   /* input of size nz = Ap [n], not modified */
    Int P [ ],          /* size n output permutation */
    Int Pinv [ ],       /* size n output inverse permutation */
    Int Len [ ],        /* size n input, undefined on output */
    Int slen,           /* slen >= sum (Len [0..n-1]) + 7n,
                         * ideally slen = 1.2 * sum (Len) + 8n */
    Int S [ ],          /* size slen workspace */
    double Control [ ], /* input array of size AMD_CONTROL */
    double Info [ ]     /* output array of size AMD_INFO */
)
{
    Int i, j, k, p, pfree, iwlen, pj, p1, p2, pj2, *Iw, *Pe, *Nv, *Head,
        *Elen, *Degree, *s, *W, *Sp, *Tp ;

    /* --------------------------------------------------------------------- */
    /* construct the matrix for AMD_2 */
    /* --------------------------------------------------------------------- */

    ASSERT (n > 0) ;

    iwlen = slen - 6*n ;
    s = S ;
    Pe = s ;        s += n ;
    Nv = s ;        s += n ;
    Head = s ;      s += n ;
    Elen = s ;      s += n ;
    Degree = s ;    s += n ;
    W = s ;         s += n ;
    Iw = s ;        s += iwlen ;

    ASSERT (AMD_valid (n, n, Ap, Ai) == AMD_OK) ;

    /* construct the pointers for A+A' */
    Sp = Nv ;                   /* use Nv and W as workspace for Sp and Tp [ */
    Tp = W ;
    pfree = 0 ;
    for (j = 0 ; j < n ; j++)
    {
        Pe [j] = pfree ;
        Sp [j] = pfree ;
        pfree += Len [j] ;
    }

    /* Note that this restriction on iwlen is slightly more restrictive than
     * what is strictly required in AMD_2.  AMD_2 can operate with no elbow
     * room at all, but it will be very slow.  For better performance, at
     * least size-n elbow room is enforced. */
    ASSERT (iwlen >= pfree + n) ;

#ifndef NDEBUG
    for (p = 0 ; p < iwlen ; p++) Iw [p] = EMPTY ;
#endif

    for (k = 0 ; k < n ; k++)
    {
        AMD_DEBUG1 (("Construct row/column k= "ID" of A+A'\n", k))  ;
        p1 = Ap [k] ;
        p2 = Ap [k+1] ;

        /* construct A+A' */
        for (p = p1 ; p < p2 ; )
        {
            /* scan the upper triangular part of A */
            j = Ai [p] ;
            ASSERT (j >= 0 && j < n) ;
            if (j < k)
            {
                /* entry A (j,k) in the strictly upper triangular part */
                ASSERT (Sp [j] < (j == n-1 ? pfree : Pe [j+1])) ;
                ASSERT (Sp [k] < (k == n-1 ? pfree : Pe [k+1])) ;
                Iw [Sp [j]++] = k ;
                Iw [Sp [k]++] = j ;
                p++ ;
            }
            else if (j == k)
            {
                /* skip the diagonal */
                p++ ;
                break ;
            }
            else /* j > k */
            {
                /* first entry below the diagonal */
                break ;
            }
            /* scan lower triangular part of A, in column j until reaching
             * row k.  Start where last scan left off. */
            ASSERT (Ap [j] <= Tp [j] && Tp [j] <= Ap [j+1]) ;
            pj2 = Ap [j+1] ;
            for (pj = Tp [j] ; pj < pj2 ; )
            {
                i = Ai [pj] ;
                ASSERT (i >= 0 && i < n) ;
                if (i < k)
                {
                    /* A (i,j) is only in the lower part, not in upper */
                    ASSERT (Sp [i] < (i == n-1 ? pfree : Pe [i+1])) ;
                    ASSERT (Sp [j] < (j == n-1 ? pfree : Pe [j+1])) ;
                    Iw [Sp [i]++] = j ;
                    Iw [Sp [j]++] = i ;
                    pj++ ;
                }
                else if (i == k)
                {
                    /* entry A (k,j) in lower part and A (j,k) in upper */
                    pj++ ;
                    break ;
                }
                else /* i > k */
                {
                    /* consider this entry later, when k advances to i */
                    break ;
                }
            }
            Tp [j] = pj ;
        }
        Tp [k] = p ;
    }

    /* clean up, for remaining mismatched entries */
    for (j = 0 ; j < n ; j++)
    {
        for (pj = Tp [j] ; pj < Ap [j+1] ; pj++)
        {
            i = Ai [pj] ;
            ASSERT (i >= 0 && i < n) ;
            /* A (i,j) is only in the lower part, not in upper */
            ASSERT (Sp [i] < (i == n-1 ? pfree : Pe [i+1])) ;
            ASSERT (Sp [j] < (j == n-1 ? pfree : Pe [j+1])) ;
            Iw [Sp [i]++] = j ;
            Iw [Sp [j]++] = i ;
        }
    }

#ifndef NDEBUG
    for (j = 0 ; j < n-1 ; j++) ASSERT (Sp [j] == Pe [j+1]) ;
    ASSERT (Sp [n-1] == pfree) ;
#endif

    /* Tp and Sp no longer needed ] */

    /* --------------------------------------------------------------------- */
    /* order the matrix */
    /* --------------------------------------------------------------------- */

    AMD_2 (n, Pe, Iw, Len, iwlen, pfree,
        Nv, Pinv, P, Head, Elen, Degree, W, Control, Info) ;
}