GNU Octave
4.0.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Main Page
Namespaces
Classes
Files
File List
File Members
All
Classes
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Properties
Friends
Macros
Pages
liboctave
operators
Sparse-op-defs.h
Go to the documentation of this file.
1
/*
2
3
Copyright (C) 2004-2015 David Bateman
4
Copyright (C) 1998-2004 Andy Adler
5
Copyright (C) 2008 Jaroslav Hajek
6
7
This file is part of Octave.
8
9
Octave is free software; you can redistribute it and/or modify it
10
under the terms of the GNU General Public License as published by the
11
Free Software Foundation; either version 3 of the License, or (at your
12
option) any later version.
13
14
Octave is distributed in the hope that it will be useful, but WITHOUT
15
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17
for more details.
18
19
You should have received a copy of the GNU General Public License
20
along with Octave; see the file COPYING. If not, see
21
<http://www.gnu.org/licenses/>.
22
23
*/
24
25
#if !defined (octave_Sparse_op_defs_h)
26
#define octave_Sparse_op_defs_h 1
27
28
#include "
Array-util.h
"
29
#include "
oct-locbuf.h
"
30
#include "
mx-inlines.cc
"
31
32
// sparse matrix by scalar operations.
33
34
#define SPARSE_SMS_BIN_OP_1(R, F, OP, M, S) \
35
R \
36
F (const M& m, const S& s) \
37
{ \
38
octave_idx_type nr = m.rows (); \
39
octave_idx_type nc = m.cols (); \
40
\
41
R r (nr, nc, (0.0 OP s)); \
42
\
43
for (octave_idx_type j = 0; j < nc; j++) \
44
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
45
r.xelem (m.ridx (i), j) = m.data (i) OP s; \
46
return r; \
47
}
48
49
#define SPARSE_SMS_BIN_OP_2(R, F, OP, M, S) \
50
R \
51
F (const M& m, const S& s) \
52
{ \
53
octave_idx_type nr = m.rows (); \
54
octave_idx_type nc = m.cols (); \
55
octave_idx_type nz = m.nnz (); \
56
\
57
R r (nr, nc, nz); \
58
\
59
for (octave_idx_type i = 0; i < nz; i++) \
60
{ \
61
r.xdata (i) = m.data (i) OP s; \
62
r.xridx (i) = m.ridx (i); \
63
} \
64
for (octave_idx_type i = 0; i < nc + 1; i++) \
65
r.xcidx (i) = m.cidx (i); \
66
\
67
r.maybe_compress (true); \
68
return r; \
69
}
70
71
#define SPARSE_SMS_BIN_OPS(R1, R2, M, S) \
72
SPARSE_SMS_BIN_OP_1 (R1, operator +, +, M, S) \
73
SPARSE_SMS_BIN_OP_1 (R1, operator -, -, M, S) \
74
SPARSE_SMS_BIN_OP_2 (R2, operator *, *, M, S) \
75
SPARSE_SMS_BIN_OP_2 (R2, operator /, /, M, S)
76
77
#define SPARSE_SMS_CMP_OP(F, OP, M, MZ, MC, S, SZ, SC) \
78
SparseBoolMatrix \
79
F (const M& m, const S& s) \
80
{ \
81
octave_idx_type nr = m.rows (); \
82
octave_idx_type nc = m.cols (); \
83
SparseBoolMatrix r; \
84
\
85
if (MC (MZ) OP SC (s)) \
86
{ \
87
r = SparseBoolMatrix (nr, nc, true); \
88
for (octave_idx_type j = 0; j < nc; j++) \
89
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
90
if (! (MC (m.data (i)) OP SC (s))) \
91
r.data (m.ridx (i) + j * nr) = false; \
92
r.maybe_compress (true); \
93
} \
94
else \
95
{ \
96
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
97
r.cidx (0) = static_cast<octave_idx_type> (0); \
98
octave_idx_type nel = 0; \
99
for (octave_idx_type j = 0; j < nc; j++) \
100
{ \
101
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
102
if (MC (m.data (i)) OP SC (s)) \
103
{ \
104
r.ridx (nel) = m.ridx (i); \
105
r.data (nel++) = true; \
106
} \
107
r.cidx (j + 1) = nel; \
108
} \
109
r.maybe_compress (false); \
110
} \
111
return r; \
112
}
113
114
#define SPARSE_SMS_CMP_OPS(M, MZ, CM, S, SZ, CS) \
115
SPARSE_SMS_CMP_OP (mx_el_lt, <, M, MZ, , S, SZ, ) \
116
SPARSE_SMS_CMP_OP (mx_el_le, <=, M, MZ, , S, SZ, ) \
117
SPARSE_SMS_CMP_OP (mx_el_ge, >=, M, MZ, , S, SZ, ) \
118
SPARSE_SMS_CMP_OP (mx_el_gt, >, M, MZ, , S, SZ, ) \
119
SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \
120
SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, )
121
122
#define SPARSE_SMS_EQNE_OPS(M, MZ, CM, S, SZ, CS) \
123
SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \
124
SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, )
125
126
#define SPARSE_SMS_BOOL_OP(F, OP, M, S, LHS_ZERO, RHS_ZERO) \
127
SparseBoolMatrix \
128
F (const M& m, const S& s) \
129
{ \
130
octave_idx_type nr = m.rows (); \
131
octave_idx_type nc = m.cols (); \
132
SparseBoolMatrix r; \
133
\
134
if (nr > 0 && nc > 0) \
135
{ \
136
if (LHS_ZERO OP (s != RHS_ZERO)) \
137
{ \
138
r = SparseBoolMatrix (nr, nc, true); \
139
for (octave_idx_type j = 0; j < nc; j++) \
140
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
141
if (! ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO))) \
142
r.data (m.ridx (i) + j * nr) = false; \
143
r.maybe_compress (true); \
144
} \
145
else \
146
{ \
147
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
148
r.cidx (0) = static_cast<octave_idx_type> (0); \
149
octave_idx_type nel = 0; \
150
for (octave_idx_type j = 0; j < nc; j++) \
151
{ \
152
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
153
if ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO)) \
154
{ \
155
r.ridx (nel) = m.ridx (i); \
156
r.data (nel++) = true; \
157
} \
158
r.cidx (j + 1) = nel; \
159
} \
160
r.maybe_compress (false); \
161
} \
162
} \
163
return r; \
164
}
165
166
#define SPARSE_SMS_BOOL_OPS2(M, S, LHS_ZERO, RHS_ZERO) \
167
SPARSE_SMS_BOOL_OP (mx_el_and, &&, M, S, LHS_ZERO, RHS_ZERO) \
168
SPARSE_SMS_BOOL_OP (mx_el_or, ||, M, S, LHS_ZERO, RHS_ZERO)
169
170
#define SPARSE_SMS_BOOL_OPS(M, S, ZERO) \
171
SPARSE_SMS_BOOL_OPS2(M, S, ZERO, ZERO)
172
173
// scalar by sparse matrix operations.
174
175
#define SPARSE_SSM_BIN_OP_1(R, F, OP, S, M) \
176
R \
177
F (const S& s, const M& m) \
178
{ \
179
octave_idx_type nr = m.rows (); \
180
octave_idx_type nc = m.cols (); \
181
\
182
R r (nr, nc, (s OP 0.0)); \
183
\
184
for (octave_idx_type j = 0; j < nc; j++) \
185
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
186
r.xelem (m.ridx (i), j) = s OP m.data (i); \
187
\
188
return r; \
189
}
190
191
#define SPARSE_SSM_BIN_OP_2(R, F, OP, S, M) \
192
R \
193
F (const S& s, const M& m) \
194
{ \
195
octave_idx_type nr = m.rows (); \
196
octave_idx_type nc = m.cols (); \
197
octave_idx_type nz = m.nnz (); \
198
\
199
R r (nr, nc, nz); \
200
\
201
for (octave_idx_type i = 0; i < nz; i++) \
202
{ \
203
r.xdata (i) = s OP m.data (i); \
204
r.xridx (i) = m.ridx (i); \
205
} \
206
for (octave_idx_type i = 0; i < nc + 1; i++) \
207
r.xcidx (i) = m.cidx (i); \
208
\
209
r.maybe_compress(true); \
210
return r; \
211
}
212
213
#define SPARSE_SSM_BIN_OPS(R1, R2, S, M) \
214
SPARSE_SSM_BIN_OP_1 (R1, operator +, +, S, M) \
215
SPARSE_SSM_BIN_OP_1 (R1, operator -, -, S, M) \
216
SPARSE_SSM_BIN_OP_2 (R2, operator *, *, S, M) \
217
SPARSE_SSM_BIN_OP_2 (R2, operator /, /, S, M)
218
219
#define SPARSE_SSM_CMP_OP(F, OP, S, SZ, SC, M, MZ, MC) \
220
SparseBoolMatrix \
221
F (const S& s, const M& m) \
222
{ \
223
octave_idx_type nr = m.rows (); \
224
octave_idx_type nc = m.cols (); \
225
SparseBoolMatrix r; \
226
\
227
if (SC (s) OP SC (MZ)) \
228
{ \
229
r = SparseBoolMatrix (nr, nc, true); \
230
for (octave_idx_type j = 0; j < nc; j++) \
231
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
232
if (! (SC (s) OP MC (m.data (i)))) \
233
r.data (m.ridx (i) + j * nr) = false; \
234
r.maybe_compress (true); \
235
} \
236
else \
237
{ \
238
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
239
r.cidx (0) = static_cast<octave_idx_type> (0); \
240
octave_idx_type nel = 0; \
241
for (octave_idx_type j = 0; j < nc; j++) \
242
{ \
243
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
244
if (SC (s) OP MC (m.data (i))) \
245
{ \
246
r.ridx (nel) = m.ridx (i); \
247
r.data (nel++) = true; \
248
} \
249
r.cidx (j + 1) = nel; \
250
} \
251
r.maybe_compress (false); \
252
} \
253
return r; \
254
}
255
256
#define SPARSE_SSM_CMP_OPS(S, SZ, SC, M, MZ, MC) \
257
SPARSE_SSM_CMP_OP (mx_el_lt, <, S, SZ, , M, MZ, ) \
258
SPARSE_SSM_CMP_OP (mx_el_le, <=, S, SZ, , M, MZ, ) \
259
SPARSE_SSM_CMP_OP (mx_el_ge, >=, S, SZ, , M, MZ, ) \
260
SPARSE_SSM_CMP_OP (mx_el_gt, >, S, SZ, , M, MZ, ) \
261
SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \
262
SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, )
263
264
#define SPARSE_SSM_EQNE_OPS(S, SZ, SC, M, MZ, MC) \
265
SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \
266
SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, )
267
268
#define SPARSE_SSM_BOOL_OP(F, OP, S, M, LHS_ZERO, RHS_ZERO) \
269
SparseBoolMatrix \
270
F (const S& s, const M& m) \
271
{ \
272
octave_idx_type nr = m.rows (); \
273
octave_idx_type nc = m.cols (); \
274
SparseBoolMatrix r; \
275
\
276
if (nr > 0 && nc > 0) \
277
{ \
278
if ((s != LHS_ZERO) OP RHS_ZERO) \
279
{ \
280
r = SparseBoolMatrix (nr, nc, true); \
281
for (octave_idx_type j = 0; j < nc; j++) \
282
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
283
if (! ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO))) \
284
r.data (m.ridx (i) + j * nr) = false; \
285
r.maybe_compress (true); \
286
} \
287
else \
288
{ \
289
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
290
r.cidx (0) = static_cast<octave_idx_type> (0); \
291
octave_idx_type nel = 0; \
292
for (octave_idx_type j = 0; j < nc; j++) \
293
{ \
294
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
295
if ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO)) \
296
{ \
297
r.ridx (nel) = m.ridx (i); \
298
r.data (nel++) = true; \
299
} \
300
r.cidx (j + 1) = nel; \
301
} \
302
r.maybe_compress (false); \
303
} \
304
} \
305
return r; \
306
}
307
308
#define SPARSE_SSM_BOOL_OPS2(S, M, LHS_ZERO, RHS_ZERO) \
309
SPARSE_SSM_BOOL_OP (mx_el_and, &&, S, M, LHS_ZERO, RHS_ZERO) \
310
SPARSE_SSM_BOOL_OP (mx_el_or, ||, S, M, LHS_ZERO, RHS_ZERO)
311
312
#define SPARSE_SSM_BOOL_OPS(S, M, ZERO) \
313
SPARSE_SSM_BOOL_OPS2(S, M, ZERO, ZERO)
314
315
// sparse matrix by sparse matrix operations.
316
317
#define SPARSE_SMSM_BIN_OP_1(R, F, OP, M1, M2) \
318
R \
319
F (const M1& m1, const M2& m2) \
320
{ \
321
R r; \
322
\
323
octave_idx_type m1_nr = m1.rows (); \
324
octave_idx_type m1_nc = m1.cols (); \
325
\
326
octave_idx_type m2_nr = m2.rows (); \
327
octave_idx_type m2_nc = m2.cols (); \
328
\
329
if (m1_nr == 1 && m1_nc == 1) \
330
{ \
331
if (m1.elem (0,0) == 0.) \
332
r = OP R (m2); \
333
else \
334
{ \
335
r = R (m2_nr, m2_nc, m1.data (0) OP 0.); \
336
\
337
for (octave_idx_type j = 0 ; j < m2_nc ; j++) \
338
{ \
339
octave_quit (); \
340
octave_idx_type idxj = j * m2_nr; \
341
for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \
342
{ \
343
octave_quit (); \
344
r.data (idxj + m2.ridx (i)) = m1.data (0) OP m2.data (i); \
345
} \
346
} \
347
r.maybe_compress (); \
348
} \
349
} \
350
else if (m2_nr == 1 && m2_nc == 1) \
351
{ \
352
if (m2.elem (0,0) == 0.) \
353
r = R (m1); \
354
else \
355
{ \
356
r = R (m1_nr, m1_nc, 0. OP m2.data (0)); \
357
\
358
for (octave_idx_type j = 0 ; j < m1_nc ; j++) \
359
{ \
360
octave_quit (); \
361
octave_idx_type idxj = j * m1_nr; \
362
for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \
363
{ \
364
octave_quit (); \
365
r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.data (0); \
366
} \
367
} \
368
r.maybe_compress (); \
369
} \
370
} \
371
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
372
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
373
else \
374
{ \
375
r = R (m1_nr, m1_nc, (m1.nnz () + m2.nnz ())); \
376
\
377
octave_idx_type jx = 0; \
378
r.cidx (0) = 0; \
379
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
380
{ \
381
octave_idx_type ja = m1.cidx (i); \
382
octave_idx_type ja_max = m1.cidx (i+1); \
383
bool ja_lt_max= ja < ja_max; \
384
\
385
octave_idx_type jb = m2.cidx (i); \
386
octave_idx_type jb_max = m2.cidx (i+1); \
387
bool jb_lt_max = jb < jb_max; \
388
\
389
while (ja_lt_max || jb_lt_max) \
390
{ \
391
octave_quit (); \
392
if ((! jb_lt_max) || \
393
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
394
{ \
395
r.ridx (jx) = m1.ridx (ja); \
396
r.data (jx) = m1.data (ja) OP 0.; \
397
jx++; \
398
ja++; \
399
ja_lt_max= ja < ja_max; \
400
} \
401
else if ((! ja_lt_max) || \
402
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \
403
{ \
404
r.ridx (jx) = m2.ridx (jb); \
405
r.data (jx) = 0. OP m2.data (jb); \
406
jx++; \
407
jb++; \
408
jb_lt_max= jb < jb_max; \
409
} \
410
else \
411
{ \
412
if ((m1.data (ja) OP m2.data (jb)) != 0.) \
413
{ \
414
r.data (jx) = m1.data (ja) OP m2.data (jb); \
415
r.ridx (jx) = m1.ridx (ja); \
416
jx++; \
417
} \
418
ja++; \
419
ja_lt_max= ja < ja_max; \
420
jb++; \
421
jb_lt_max= jb < jb_max; \
422
} \
423
} \
424
r.cidx (i+1) = jx; \
425
} \
426
\
427
r.maybe_compress (); \
428
} \
429
\
430
return r; \
431
}
432
433
#define SPARSE_SMSM_BIN_OP_2(R, F, OP, M1, M2) \
434
R \
435
F (const M1& m1, const M2& m2) \
436
{ \
437
R r; \
438
\
439
octave_idx_type m1_nr = m1.rows (); \
440
octave_idx_type m1_nc = m1.cols (); \
441
\
442
octave_idx_type m2_nr = m2.rows (); \
443
octave_idx_type m2_nc = m2.cols (); \
444
\
445
if (m1_nr == 1 && m1_nc == 1) \
446
{ \
447
if (m1.elem (0,0) == 0.) \
448
r = R (m2_nr, m2_nc); \
449
else \
450
{ \
451
r = R (m2); \
452
octave_idx_type m2_nnz = m2.nnz (); \
453
\
454
for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \
455
{ \
456
octave_quit (); \
457
r.data (i) = m1.data (0) OP r.data (i); \
458
} \
459
r.maybe_compress (); \
460
} \
461
} \
462
else if (m2_nr == 1 && m2_nc == 1) \
463
{ \
464
if (m2.elem (0,0) == 0.) \
465
r = R (m1_nr, m1_nc); \
466
else \
467
{ \
468
r = R (m1); \
469
octave_idx_type m1_nnz = m1.nnz (); \
470
\
471
for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \
472
{ \
473
octave_quit (); \
474
r.data (i) = r.data (i) OP m2.data (0); \
475
} \
476
r.maybe_compress (); \
477
} \
478
} \
479
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
480
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
481
else \
482
{ \
483
r = R (m1_nr, m1_nc, (m1.nnz () > m2.nnz () ? m1.nnz () : m2.nnz ())); \
484
\
485
octave_idx_type jx = 0; \
486
r.cidx (0) = 0; \
487
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
488
{ \
489
octave_idx_type ja = m1.cidx (i); \
490
octave_idx_type ja_max = m1.cidx (i+1); \
491
bool ja_lt_max= ja < ja_max; \
492
\
493
octave_idx_type jb = m2.cidx (i); \
494
octave_idx_type jb_max = m2.cidx (i+1); \
495
bool jb_lt_max = jb < jb_max; \
496
\
497
while (ja_lt_max || jb_lt_max) \
498
{ \
499
octave_quit (); \
500
if ((! jb_lt_max) || \
501
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
502
{ \
503
ja++; ja_lt_max= ja < ja_max; \
504
} \
505
else if ((! ja_lt_max) || \
506
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \
507
{ \
508
jb++; jb_lt_max= jb < jb_max; \
509
} \
510
else \
511
{ \
512
if ((m1.data (ja) OP m2.data (jb)) != 0.) \
513
{ \
514
r.data (jx) = m1.data (ja) OP m2.data (jb); \
515
r.ridx (jx) = m1.ridx (ja); \
516
jx++; \
517
} \
518
ja++; ja_lt_max= ja < ja_max; \
519
jb++; jb_lt_max= jb < jb_max; \
520
} \
521
} \
522
r.cidx (i+1) = jx; \
523
} \
524
\
525
r.maybe_compress (); \
526
} \
527
\
528
return r; \
529
}
530
531
#define SPARSE_SMSM_BIN_OP_3(R, F, OP, M1, M2) \
532
R \
533
F (const M1& m1, const M2& m2) \
534
{ \
535
R r; \
536
\
537
octave_idx_type m1_nr = m1.rows (); \
538
octave_idx_type m1_nc = m1.cols (); \
539
\
540
octave_idx_type m2_nr = m2.rows (); \
541
octave_idx_type m2_nc = m2.cols (); \
542
\
543
if (m1_nr == 1 && m1_nc == 1) \
544
{ \
545
if ((m1.elem (0,0) OP Complex ()) == Complex ()) \
546
{ \
547
octave_idx_type m2_nnz = m2.nnz (); \
548
r = R (m2); \
549
for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \
550
r.data (i) = m1.elem (0,0) OP r.data (i); \
551
r.maybe_compress (); \
552
} \
553
else \
554
{ \
555
r = R (m2_nr, m2_nc, m1.elem (0,0) OP Complex ()); \
556
for (octave_idx_type j = 0 ; j < m2_nc ; j++) \
557
{ \
558
octave_quit (); \
559
octave_idx_type idxj = j * m2_nr; \
560
for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \
561
{ \
562
octave_quit (); \
563
r.data (idxj + m2.ridx (i)) = m1.elem (0,0) OP m2.data (i); \
564
} \
565
} \
566
r.maybe_compress (); \
567
} \
568
} \
569
else if (m2_nr == 1 && m2_nc == 1) \
570
{ \
571
if ((Complex () OP m1.elem (0,0)) == Complex ()) \
572
{ \
573
octave_idx_type m1_nnz = m1.nnz (); \
574
r = R (m1); \
575
for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \
576
r.data (i) = r.data (i) OP m2.elem (0,0); \
577
r.maybe_compress (); \
578
} \
579
else \
580
{ \
581
r = R (m1_nr, m1_nc, Complex () OP m2.elem (0,0)); \
582
for (octave_idx_type j = 0 ; j < m1_nc ; j++) \
583
{ \
584
octave_quit (); \
585
octave_idx_type idxj = j * m1_nr; \
586
for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \
587
{ \
588
octave_quit (); \
589
r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.elem (0,0); \
590
} \
591
} \
592
r.maybe_compress (); \
593
} \
594
} \
595
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
596
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
597
else \
598
{ \
599
\
600
/* FIXME: Kludge... Always double/Complex, so Complex () */
\
601
r = R (m1_nr, m1_nc, (Complex () OP Complex ())); \
602
\
603
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
604
{ \
605
octave_idx_type ja = m1.cidx (i); \
606
octave_idx_type ja_max = m1.cidx (i+1); \
607
bool ja_lt_max= ja < ja_max; \
608
\
609
octave_idx_type jb = m2.cidx (i); \
610
octave_idx_type jb_max = m2.cidx (i+1); \
611
bool jb_lt_max = jb < jb_max; \
612
\
613
while (ja_lt_max || jb_lt_max) \
614
{ \
615
octave_quit (); \
616
if ((! jb_lt_max) || \
617
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
618
{ \
619
/* keep those kludges coming */
\
620
r.elem (m1.ridx (ja),i) = m1.data (ja) OP Complex (); \
621
ja++; \
622
ja_lt_max= ja < ja_max; \
623
} \
624
else if ((! ja_lt_max) || \
625
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \
626
{ \
627
/* keep those kludges coming */
\
628
r.elem (m2.ridx (jb),i) = Complex () OP m2.data (jb); \
629
jb++; \
630
jb_lt_max= jb < jb_max; \
631
} \
632
else \
633
{ \
634
r.elem (m1.ridx (ja),i) = m1.data (ja) OP m2.data (jb); \
635
ja++; \
636
ja_lt_max= ja < ja_max; \
637
jb++; \
638
jb_lt_max= jb < jb_max; \
639
} \
640
} \
641
} \
642
r.maybe_compress (true); \
643
} \
644
\
645
return r; \
646
}
647
648
// Note that SM ./ SM needs to take into account the NaN and Inf values
649
// implied by the division by zero.
650
// FIXME: Are the NaNs double(NaN) or Complex(NaN,Nan) in the complex case?
651
#define SPARSE_SMSM_BIN_OPS(R1, R2, M1, M2) \
652
SPARSE_SMSM_BIN_OP_1 (R1, operator +, +, M1, M2) \
653
SPARSE_SMSM_BIN_OP_1 (R1, operator -, -, M1, M2) \
654
SPARSE_SMSM_BIN_OP_2 (R2, product, *, M1, M2) \
655
SPARSE_SMSM_BIN_OP_3 (R2, quotient, /, M1, M2)
656
657
// FIXME: this macro duplicates the bodies of the template functions
658
// defined in the SPARSE_SSM_CMP_OP and SPARSE_SMS_CMP_OP macros.
659
660
#define SPARSE_SMSM_CMP_OP(F, OP, M1, Z1, C1, M2, Z2, C2) \
661
SparseBoolMatrix \
662
F (const M1& m1, const M2& m2) \
663
{ \
664
SparseBoolMatrix r; \
665
\
666
octave_idx_type m1_nr = m1.rows (); \
667
octave_idx_type m1_nc = m1.cols (); \
668
\
669
octave_idx_type m2_nr = m2.rows (); \
670
octave_idx_type m2_nc = m2.cols (); \
671
\
672
if (m1_nr == 1 && m1_nc == 1) \
673
{ \
674
if (C1 (m1.elem (0,0)) OP C2 (Z2)) \
675
{ \
676
r = SparseBoolMatrix (m2_nr, m2_nc, true); \
677
for (octave_idx_type j = 0; j < m2_nc; j++) \
678
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
679
if (! (C1 (m1.elem (0,0)) OP C2 (m2.data (i)))) \
680
r.data (m2.ridx (i) + j * m2_nr) = false; \
681
r.maybe_compress (true); \
682
} \
683
else \
684
{ \
685
r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \
686
r.cidx (0) = static_cast<octave_idx_type> (0); \
687
octave_idx_type nel = 0; \
688
for (octave_idx_type j = 0; j < m2_nc; j++) \
689
{ \
690
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
691
if (C1 (m1.elem (0,0)) OP C2 (m2.data (i))) \
692
{ \
693
r.ridx (nel) = m2.ridx (i); \
694
r.data (nel++) = true; \
695
} \
696
r.cidx (j + 1) = nel; \
697
} \
698
r.maybe_compress (false); \
699
} \
700
} \
701
else if (m2_nr == 1 && m2_nc == 1) \
702
{ \
703
if (C1 (Z1) OP C2 (m2.elem (0,0))) \
704
{ \
705
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
706
for (octave_idx_type j = 0; j < m1_nc; j++) \
707
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
708
if (! (C1 (m1.data (i)) OP C2 (m2.elem (0,0)))) \
709
r.data (m1.ridx (i) + j * m1_nr) = false; \
710
r.maybe_compress (true); \
711
} \
712
else \
713
{ \
714
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \
715
r.cidx (0) = static_cast<octave_idx_type> (0); \
716
octave_idx_type nel = 0; \
717
for (octave_idx_type j = 0; j < m1_nc; j++) \
718
{ \
719
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
720
if (C1 (m1.data (i)) OP C2 (m2.elem (0,0))) \
721
{ \
722
r.ridx (nel) = m1.ridx (i); \
723
r.data (nel++) = true; \
724
} \
725
r.cidx (j + 1) = nel; \
726
} \
727
r.maybe_compress (false); \
728
} \
729
} \
730
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
731
{ \
732
if (m1_nr != 0 || m1_nc != 0) \
733
{ \
734
if (C1 (Z1) OP C2 (Z2)) \
735
{ \
736
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
737
for (octave_idx_type j = 0; j < m1_nc; j++) \
738
{ \
739
octave_idx_type i1 = m1.cidx (j); \
740
octave_idx_type e1 = m1.cidx (j+1); \
741
octave_idx_type i2 = m2.cidx (j); \
742
octave_idx_type e2 = m2.cidx (j+1); \
743
while (i1 < e1 || i2 < e2) \
744
{ \
745
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
746
{ \
747
if (! (C1 (Z1) OP C2 (m2.data (i2)))) \
748
r.data (m2.ridx (i2) + j * m1_nr) = false; \
749
i2++; \
750
} \
751
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
752
{ \
753
if (! (C1 (m1.data (i1)) OP C2 (Z2))) \
754
r.data (m1.ridx (i1) + j * m1_nr) = false; \
755
i1++; \
756
} \
757
else \
758
{ \
759
if (! (C1 (m1.data (i1)) OP C2 (m2.data (i2)))) \
760
r.data (m1.ridx (i1) + j * m1_nr) = false; \
761
i1++; \
762
i2++; \
763
} \
764
} \
765
} \
766
r.maybe_compress (true); \
767
} \
768
else \
769
{ \
770
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \
771
r.cidx (0) = static_cast<octave_idx_type> (0); \
772
octave_idx_type nel = 0; \
773
for (octave_idx_type j = 0; j < m1_nc; j++) \
774
{ \
775
octave_idx_type i1 = m1.cidx (j); \
776
octave_idx_type e1 = m1.cidx (j+1); \
777
octave_idx_type i2 = m2.cidx (j); \
778
octave_idx_type e2 = m2.cidx (j+1); \
779
while (i1 < e1 || i2 < e2) \
780
{ \
781
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
782
{ \
783
if (C1 (Z1) OP C2 (m2.data (i2))) \
784
{ \
785
r.ridx (nel) = m2.ridx (i2); \
786
r.data (nel++) = true; \
787
} \
788
i2++; \
789
} \
790
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
791
{ \
792
if (C1 (m1.data (i1)) OP C2 (Z2)) \
793
{ \
794
r.ridx (nel) = m1.ridx (i1); \
795
r.data (nel++) = true; \
796
} \
797
i1++; \
798
} \
799
else \
800
{ \
801
if (C1 (m1.data (i1)) OP C2 (m2.data (i2))) \
802
{ \
803
r.ridx (nel) = m1.ridx (i1); \
804
r.data (nel++) = true; \
805
} \
806
i1++; \
807
i2++; \
808
} \
809
} \
810
r.cidx (j + 1) = nel; \
811
} \
812
r.maybe_compress (false); \
813
} \
814
} \
815
} \
816
else \
817
{ \
818
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
819
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
820
} \
821
return r; \
822
}
823
824
#define SPARSE_SMSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
825
SPARSE_SMSM_CMP_OP (mx_el_lt, <, M1, Z1, , M2, Z2, ) \
826
SPARSE_SMSM_CMP_OP (mx_el_le, <=, M1, Z1, , M2, Z2, ) \
827
SPARSE_SMSM_CMP_OP (mx_el_ge, >=, M1, Z1, , M2, Z2, ) \
828
SPARSE_SMSM_CMP_OP (mx_el_gt, >, M1, Z1, , M2, Z2, ) \
829
SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \
830
SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, )
831
832
#define SPARSE_SMSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
833
SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \
834
SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, )
835
836
// FIXME: this macro duplicates the bodies of the template functions
837
// defined in the SPARSE_SSM_BOOL_OP and SPARSE_SMS_BOOL_OP macros.
838
839
#define SPARSE_SMSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
840
SparseBoolMatrix \
841
F (const M1& m1, const M2& m2) \
842
{ \
843
SparseBoolMatrix r; \
844
\
845
octave_idx_type m1_nr = m1.rows (); \
846
octave_idx_type m1_nc = m1.cols (); \
847
\
848
octave_idx_type m2_nr = m2.rows (); \
849
octave_idx_type m2_nc = m2.cols (); \
850
\
851
if (m1_nr == 1 && m1_nc == 1) \
852
{ \
853
if (m2_nr > 0 && m2_nc > 0) \
854
{ \
855
if ((m1.elem (0,0) != LHS_ZERO) OP RHS_ZERO) \
856
{ \
857
r = SparseBoolMatrix (m2_nr, m2_nc, true); \
858
for (octave_idx_type j = 0; j < m2_nc; j++) \
859
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
860
if (! ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO))) \
861
r.data (m2.ridx (i) + j * m2_nr) = false; \
862
r.maybe_compress (true); \
863
} \
864
else \
865
{ \
866
r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \
867
r.cidx (0) = static_cast<octave_idx_type> (0); \
868
octave_idx_type nel = 0; \
869
for (octave_idx_type j = 0; j < m2_nc; j++) \
870
{ \
871
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
872
if ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO)) \
873
{ \
874
r.ridx (nel) = m2.ridx (i); \
875
r.data (nel++) = true; \
876
} \
877
r.cidx (j + 1) = nel; \
878
} \
879
r.maybe_compress (false); \
880
} \
881
} \
882
} \
883
else if (m2_nr == 1 && m2_nc == 1) \
884
{ \
885
if (m1_nr > 0 && m1_nc > 0) \
886
{ \
887
if (LHS_ZERO OP (m2.elem (0,0) != RHS_ZERO)) \
888
{ \
889
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
890
for (octave_idx_type j = 0; j < m1_nc; j++) \
891
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
892
if (! ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO))) \
893
r.data (m1.ridx (i) + j * m1_nr) = false; \
894
r.maybe_compress (true); \
895
} \
896
else \
897
{ \
898
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \
899
r.cidx (0) = static_cast<octave_idx_type> (0); \
900
octave_idx_type nel = 0; \
901
for (octave_idx_type j = 0; j < m1_nc; j++) \
902
{ \
903
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
904
if ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO)) \
905
{ \
906
r.ridx (nel) = m1.ridx (i); \
907
r.data (nel++) = true; \
908
} \
909
r.cidx (j + 1) = nel; \
910
} \
911
r.maybe_compress (false); \
912
} \
913
} \
914
} \
915
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
916
{ \
917
if (m1_nr != 0 || m1_nc != 0) \
918
{ \
919
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \
920
r.cidx (0) = static_cast<octave_idx_type> (0); \
921
octave_idx_type nel = 0; \
922
for (octave_idx_type j = 0; j < m1_nc; j++) \
923
{ \
924
octave_idx_type i1 = m1.cidx (j); \
925
octave_idx_type e1 = m1.cidx (j+1); \
926
octave_idx_type i2 = m2.cidx (j); \
927
octave_idx_type e2 = m2.cidx (j+1); \
928
while (i1 < e1 || i2 < e2) \
929
{ \
930
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
931
{ \
932
if (LHS_ZERO OP m2.data (i2) != RHS_ZERO) \
933
{ \
934
r.ridx (nel) = m2.ridx (i2); \
935
r.data (nel++) = true; \
936
} \
937
i2++; \
938
} \
939
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
940
{ \
941
if (m1.data (i1) != LHS_ZERO OP RHS_ZERO) \
942
{ \
943
r.ridx (nel) = m1.ridx (i1); \
944
r.data (nel++) = true; \
945
} \
946
i1++; \
947
} \
948
else \
949
{ \
950
if (m1.data (i1) != LHS_ZERO OP m2.data (i2) != RHS_ZERO) \
951
{ \
952
r.ridx (nel) = m1.ridx (i1); \
953
r.data (nel++) = true; \
954
} \
955
i1++; \
956
i2++; \
957
} \
958
} \
959
r.cidx (j + 1) = nel; \
960
} \
961
r.maybe_compress (false); \
962
} \
963
} \
964
else \
965
{ \
966
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
967
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
968
} \
969
return r; \
970
}
971
972
#define SPARSE_SMSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
973
SPARSE_SMSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
974
SPARSE_SMSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO)
975
976
#define SPARSE_SMSM_BOOL_OPS(M1, M2, ZERO) \
977
SPARSE_SMSM_BOOL_OPS2(M1, M2, ZERO, ZERO)
978
979
// matrix by sparse matrix operations.
980
981
#define SPARSE_MSM_BIN_OP_1(R, F, OP, M1, M2) \
982
R \
983
F (const M1& m1, const M2& m2) \
984
{ \
985
R r; \
986
\
987
octave_idx_type m1_nr = m1.rows (); \
988
octave_idx_type m1_nc = m1.cols (); \
989
\
990
octave_idx_type m2_nr = m2.rows (); \
991
octave_idx_type m2_nc = m2.cols (); \
992
\
993
if (m2_nr == 1 && m2_nc == 1) \
994
r = R (m1 OP m2.elem (0,0)); \
995
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
996
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
997
else \
998
{ \
999
r = R (F (m1, m2.matrix_value ())); \
1000
} \
1001
return r; \
1002
}
1003
1004
#define SPARSE_MSM_BIN_OP_2(R, F, OP, M1, M2) \
1005
R \
1006
F (const M1& m1, const M2& m2) \
1007
{ \
1008
R r; \
1009
\
1010
octave_idx_type m1_nr = m1.rows (); \
1011
octave_idx_type m1_nc = m1.cols (); \
1012
\
1013
octave_idx_type m2_nr = m2.rows (); \
1014
octave_idx_type m2_nc = m2.cols (); \
1015
\
1016
if (m2_nr == 1 && m2_nc == 1) \
1017
r = R (m1 OP m2.elem (0,0)); \
1018
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1019
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1020
else \
1021
{ \
1022
if (do_mx_check (m1, mx_inline_all_finite<M1::element_type>)) \
1023
{ \
1024
/* Sparsity pattern is preserved. */
\
1025
octave_idx_type m2_nz = m2.nnz (); \
1026
r = R (m2_nr, m2_nc, m2_nz); \
1027
for (octave_idx_type j = 0, k = 0; j < m2_nc; j++) \
1028
{ \
1029
octave_quit (); \
1030
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
1031
{ \
1032
octave_idx_type mri = m2.ridx (i); \
1033
R::element_type x = m1(mri, j) OP m2.data (i); \
1034
if (x != 0.0) \
1035
{ \
1036
r.xdata (k) = x; \
1037
r.xridx (k) = m2.ridx (i); \
1038
k++; \
1039
} \
1040
} \
1041
r.xcidx (j+1) = k; \
1042
} \
1043
r.maybe_compress (false); \
1044
return r; \
1045
} \
1046
else \
1047
r = R (F (m1, m2.matrix_value ())); \
1048
} \
1049
\
1050
return r; \
1051
}
1052
1053
// FIXME: Pass a specific ZERO value
1054
#define SPARSE_MSM_BIN_OPS(R1, R2, M1, M2) \
1055
SPARSE_MSM_BIN_OP_1 (R1, operator +, +, M1, M2) \
1056
SPARSE_MSM_BIN_OP_1 (R1, operator -, -, M1, M2) \
1057
SPARSE_MSM_BIN_OP_2 (R2, product, *, M1, M2) \
1058
SPARSE_MSM_BIN_OP_1 (R2, quotient, /, M1, M2)
1059
1060
#define SPARSE_MSM_CMP_OP(F, OP, M1, C1, M2, C2) \
1061
SparseBoolMatrix \
1062
F (const M1& m1, const M2& m2) \
1063
{ \
1064
SparseBoolMatrix r; \
1065
\
1066
octave_idx_type m1_nr = m1.rows (); \
1067
octave_idx_type m1_nc = m1.cols (); \
1068
\
1069
octave_idx_type m2_nr = m2.rows (); \
1070
octave_idx_type m2_nc = m2.cols (); \
1071
\
1072
if (m2_nr == 1 && m2_nc == 1) \
1073
r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \
1074
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1075
{ \
1076
if (m1_nr != 0 || m1_nc != 0) \
1077
{ \
1078
/* Count num of nonzero elements */
\
1079
octave_idx_type nel = 0; \
1080
for (octave_idx_type j = 0; j < m1_nc; j++) \
1081
for (octave_idx_type i = 0; i < m1_nr; i++) \
1082
if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \
1083
nel++; \
1084
\
1085
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1086
\
1087
octave_idx_type ii = 0; \
1088
r.cidx (0) = 0; \
1089
for (octave_idx_type j = 0; j < m1_nc; j++) \
1090
{ \
1091
for (octave_idx_type i = 0; i < m1_nr; i++) \
1092
{ \
1093
bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \
1094
if (el) \
1095
{ \
1096
r.data (ii) = el; \
1097
r.ridx (ii++) = i; \
1098
} \
1099
} \
1100
r.cidx (j+1) = ii; \
1101
} \
1102
} \
1103
} \
1104
else \
1105
{ \
1106
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1107
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1108
} \
1109
return r; \
1110
}
1111
1112
#define SPARSE_MSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
1113
SPARSE_MSM_CMP_OP (mx_el_lt, <, M1, , M2, ) \
1114
SPARSE_MSM_CMP_OP (mx_el_le, <=, M1, , M2, ) \
1115
SPARSE_MSM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \
1116
SPARSE_MSM_CMP_OP (mx_el_gt, >, M1, , M2, ) \
1117
SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1118
SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1119
1120
#define SPARSE_MSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
1121
SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1122
SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1123
1124
#define SPARSE_MSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
1125
SparseBoolMatrix \
1126
F (const M1& m1, const M2& m2) \
1127
{ \
1128
SparseBoolMatrix r; \
1129
\
1130
octave_idx_type m1_nr = m1.rows (); \
1131
octave_idx_type m1_nc = m1.cols (); \
1132
\
1133
octave_idx_type m2_nr = m2.rows (); \
1134
octave_idx_type m2_nc = m2.cols (); \
1135
\
1136
if (m2_nr == 1 && m2_nc == 1) \
1137
r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \
1138
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1139
{ \
1140
if (m1_nr != 0 || m1_nc != 0) \
1141
{ \
1142
/* Count num of nonzero elements */
\
1143
octave_idx_type nel = 0; \
1144
for (octave_idx_type j = 0; j < m1_nc; j++) \
1145
for (octave_idx_type i = 0; i < m1_nr; i++) \
1146
if ((m1.elem (i, j) != LHS_ZERO) \
1147
OP (m2.elem (i, j) != RHS_ZERO)) \
1148
nel++; \
1149
\
1150
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1151
\
1152
octave_idx_type ii = 0; \
1153
r.cidx (0) = 0; \
1154
for (octave_idx_type j = 0; j < m1_nc; j++) \
1155
{ \
1156
for (octave_idx_type i = 0; i < m1_nr; i++) \
1157
{ \
1158
bool el = (m1.elem (i, j) != LHS_ZERO) \
1159
OP (m2.elem (i, j) != RHS_ZERO); \
1160
if (el) \
1161
{ \
1162
r.data (ii) = el; \
1163
r.ridx (ii++) = i; \
1164
} \
1165
} \
1166
r.cidx (j+1) = ii; \
1167
} \
1168
} \
1169
} \
1170
else \
1171
{ \
1172
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1173
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1174
} \
1175
return r; \
1176
}
1177
1178
#define SPARSE_MSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
1179
SPARSE_MSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
1180
SPARSE_MSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO)
1181
1182
#define SPARSE_MSM_BOOL_OPS(M1, M2, ZERO) \
1183
SPARSE_MSM_BOOL_OPS2(M1, M2, ZERO, ZERO)
1184
1185
// sparse matrix by matrix operations.
1186
1187
#define SPARSE_SMM_BIN_OP_1(R, F, OP, M1, M2) \
1188
R \
1189
F (const M1& m1, const M2& m2) \
1190
{ \
1191
R r; \
1192
\
1193
octave_idx_type m1_nr = m1.rows (); \
1194
octave_idx_type m1_nc = m1.cols (); \
1195
\
1196
octave_idx_type m2_nr = m2.rows (); \
1197
octave_idx_type m2_nc = m2.cols (); \
1198
\
1199
if (m1_nr == 1 && m1_nc == 1) \
1200
r = R (m1.elem (0,0) OP m2); \
1201
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1202
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1203
else \
1204
{ \
1205
r = R (m1.matrix_value () OP m2); \
1206
} \
1207
return r; \
1208
}
1209
1210
// sm .* m preserves sparsity if m contains no Infs nor Nans.
1211
#define SPARSE_SMM_BIN_OP_2_CHECK_product(ET) \
1212
do_mx_check (m2, mx_inline_all_finite<ET>)
1213
1214
// sm ./ m preserves sparsity if m contains no NaNs or zeros.
1215
#define SPARSE_SMM_BIN_OP_2_CHECK_quotient(ET) \
1216
! do_mx_check (m2, mx_inline_any_nan<ET>) && m2.nnz () == m2.numel ()
1217
1218
#define SPARSE_SMM_BIN_OP_2(R, F, OP, M1, M2) \
1219
R \
1220
F (const M1& m1, const M2& m2) \
1221
{ \
1222
R r; \
1223
\
1224
octave_idx_type m1_nr = m1.rows (); \
1225
octave_idx_type m1_nc = m1.cols (); \
1226
\
1227
octave_idx_type m2_nr = m2.rows (); \
1228
octave_idx_type m2_nc = m2.cols (); \
1229
\
1230
if (m1_nr == 1 && m1_nc == 1) \
1231
r = R (m1.elem (0,0) OP m2); \
1232
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1233
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1234
else \
1235
{ \
1236
if (SPARSE_SMM_BIN_OP_2_CHECK_ ## F(M2::element_type)) \
1237
{ \
1238
/* Sparsity pattern is preserved. */
\
1239
octave_idx_type m1_nz = m1.nnz (); \
1240
r = R (m1_nr, m1_nc, m1_nz); \
1241
for (octave_idx_type j = 0, k = 0; j < m1_nc; j++) \
1242
{ \
1243
octave_quit (); \
1244
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
1245
{ \
1246
octave_idx_type mri = m1.ridx (i); \
1247
R::element_type x = m1.data (i) OP m2 (mri, j); \
1248
if (x != 0.0) \
1249
{ \
1250
r.xdata (k) = x; \
1251
r.xridx (k) = m1.ridx (i); \
1252
k++; \
1253
} \
1254
} \
1255
r.xcidx (j+1) = k; \
1256
} \
1257
r.maybe_compress (false); \
1258
return r; \
1259
} \
1260
else \
1261
r = R (F (m1.matrix_value (), m2)); \
1262
} \
1263
\
1264
return r; \
1265
}
1266
1267
#define SPARSE_SMM_BIN_OPS(R1, R2, M1, M2) \
1268
SPARSE_SMM_BIN_OP_1 (R1, operator +, +, M1, M2) \
1269
SPARSE_SMM_BIN_OP_1 (R1, operator -, -, M1, M2) \
1270
SPARSE_SMM_BIN_OP_2 (R2, product, *, M1, M2) \
1271
SPARSE_SMM_BIN_OP_2 (R2, quotient, /, M1, M2)
1272
1273
#define SPARSE_SMM_CMP_OP(F, OP, M1, C1, M2, C2) \
1274
SparseBoolMatrix \
1275
F (const M1& m1, const M2& m2) \
1276
{ \
1277
SparseBoolMatrix r; \
1278
\
1279
octave_idx_type m1_nr = m1.rows (); \
1280
octave_idx_type m1_nc = m1.cols (); \
1281
\
1282
octave_idx_type m2_nr = m2.rows (); \
1283
octave_idx_type m2_nc = m2.cols (); \
1284
\
1285
if (m1_nr == 1 && m1_nc == 1) \
1286
r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \
1287
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1288
{ \
1289
if (m1_nr != 0 || m1_nc != 0) \
1290
{ \
1291
/* Count num of nonzero elements */
\
1292
octave_idx_type nel = 0; \
1293
for (octave_idx_type j = 0; j < m1_nc; j++) \
1294
for (octave_idx_type i = 0; i < m1_nr; i++) \
1295
if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \
1296
nel++; \
1297
\
1298
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1299
\
1300
octave_idx_type ii = 0; \
1301
r.cidx (0) = 0; \
1302
for (octave_idx_type j = 0; j < m1_nc; j++) \
1303
{ \
1304
for (octave_idx_type i = 0; i < m1_nr; i++) \
1305
{ \
1306
bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \
1307
if (el) \
1308
{ \
1309
r.data (ii) = el; \
1310
r.ridx (ii++) = i; \
1311
} \
1312
} \
1313
r.cidx (j+1) = ii; \
1314
} \
1315
} \
1316
} \
1317
else \
1318
{ \
1319
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1320
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1321
} \
1322
return r; \
1323
}
1324
1325
#define SPARSE_SMM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
1326
SPARSE_SMM_CMP_OP (mx_el_lt, <, M1, , M2, ) \
1327
SPARSE_SMM_CMP_OP (mx_el_le, <=, M1, , M2, ) \
1328
SPARSE_SMM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \
1329
SPARSE_SMM_CMP_OP (mx_el_gt, >, M1, , M2, ) \
1330
SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1331
SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1332
1333
#define SPARSE_SMM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
1334
SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1335
SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1336
1337
#define SPARSE_SMM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
1338
SparseBoolMatrix \
1339
F (const M1& m1, const M2& m2) \
1340
{ \
1341
SparseBoolMatrix r; \
1342
\
1343
octave_idx_type m1_nr = m1.rows (); \
1344
octave_idx_type m1_nc = m1.cols (); \
1345
\
1346
octave_idx_type m2_nr = m2.rows (); \
1347
octave_idx_type m2_nc = m2.cols (); \
1348
\
1349
if (m1_nr == 1 && m1_nc == 1) \
1350
r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \
1351
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1352
{ \
1353
if (m1_nr != 0 || m1_nc != 0) \
1354
{ \
1355
/* Count num of nonzero elements */
\
1356
octave_idx_type nel = 0; \
1357
for (octave_idx_type j = 0; j < m1_nc; j++) \
1358
for (octave_idx_type i = 0; i < m1_nr; i++) \
1359
if ((m1.elem (i, j) != LHS_ZERO) \
1360
OP (m2.elem (i, j) != RHS_ZERO)) \
1361
nel++; \
1362
\
1363
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1364
\
1365
octave_idx_type ii = 0; \
1366
r.cidx (0) = 0; \
1367
for (octave_idx_type j = 0; j < m1_nc; j++) \
1368
{ \
1369
for (octave_idx_type i = 0; i < m1_nr; i++) \
1370
{ \
1371
bool el = (m1.elem (i, j) != LHS_ZERO) \
1372
OP (m2.elem (i, j) != RHS_ZERO); \
1373
if (el) \
1374
{ \
1375
r.data (ii) = el; \
1376
r.ridx (ii++) = i; \
1377
} \
1378
} \
1379
r.cidx (j+1) = ii; \
1380
} \
1381
} \
1382
} \
1383
else \
1384
{ \
1385
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1386
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1387
} \
1388
return r; \
1389
}
1390
1391
#define SPARSE_SMM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
1392
SPARSE_SMM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
1393
SPARSE_SMM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO)
1394
1395
#define SPARSE_SMM_BOOL_OPS(M1, M2, ZERO) \
1396
SPARSE_SMM_BOOL_OPS2(M1, M2, ZERO, ZERO)
1397
1398
// Avoid some code duplication. Maybe we should use templates.
1399
1400
#define SPARSE_CUMSUM(RET_TYPE, ELT_TYPE, FCN) \
1401
\
1402
octave_idx_type nr = rows (); \
1403
octave_idx_type nc = cols (); \
1404
\
1405
RET_TYPE retval; \
1406
\
1407
if (nr > 0 && nc > 0) \
1408
{ \
1409
if ((nr == 1 && dim == -1) || dim == 1) \
1410
/* Ugly!! Is there a better way? */
\
1411
retval = transpose (). FCN (0) .transpose (); \
1412
else \
1413
{ \
1414
octave_idx_type nel = 0; \
1415
for (octave_idx_type i = 0; i < nc; i++) \
1416
{ \
1417
ELT_TYPE t = ELT_TYPE (); \
1418
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1419
{ \
1420
t += data (j); \
1421
if (t != ELT_TYPE ()) \
1422
{ \
1423
if (j == cidx (i+1) - 1) \
1424
nel += nr - ridx (j); \
1425
else \
1426
nel += ridx (j+1) - ridx (j); \
1427
} \
1428
} \
1429
} \
1430
retval = RET_TYPE (nr, nc, nel); \
1431
retval.cidx (0) = 0; \
1432
octave_idx_type ii = 0; \
1433
for (octave_idx_type i = 0; i < nc; i++) \
1434
{ \
1435
ELT_TYPE t = ELT_TYPE (); \
1436
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1437
{ \
1438
t += data (j); \
1439
if (t != ELT_TYPE ()) \
1440
{ \
1441
if (j == cidx (i+1) - 1) \
1442
{ \
1443
for (octave_idx_type k = ridx (j); k < nr; k++) \
1444
{ \
1445
retval.data (ii) = t; \
1446
retval.ridx (ii++) = k; \
1447
} \
1448
} \
1449
else \
1450
{ \
1451
for (octave_idx_type k = ridx (j); k < ridx (j+1); k++) \
1452
{ \
1453
retval.data (ii) = t; \
1454
retval.ridx (ii++) = k; \
1455
} \
1456
} \
1457
} \
1458
} \
1459
retval.cidx (i+1) = ii; \
1460
} \
1461
} \
1462
} \
1463
else \
1464
retval = RET_TYPE (nr,nc); \
1465
\
1466
return retval
1467
1468
1469
#define SPARSE_CUMPROD(RET_TYPE, ELT_TYPE, FCN) \
1470
\
1471
octave_idx_type nr = rows (); \
1472
octave_idx_type nc = cols (); \
1473
\
1474
RET_TYPE retval; \
1475
\
1476
if (nr > 0 && nc > 0) \
1477
{ \
1478
if ((nr == 1 && dim == -1) || dim == 1) \
1479
/* Ugly!! Is there a better way? */
\
1480
retval = transpose (). FCN (0) .transpose (); \
1481
else \
1482
{ \
1483
octave_idx_type nel = 0; \
1484
for (octave_idx_type i = 0; i < nc; i++) \
1485
{ \
1486
octave_idx_type jj = 0; \
1487
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1488
{ \
1489
if (jj == ridx (j)) \
1490
{ \
1491
nel++; \
1492
jj++; \
1493
} \
1494
else \
1495
break; \
1496
} \
1497
} \
1498
retval = RET_TYPE (nr, nc, nel); \
1499
retval.cidx (0) = 0; \
1500
octave_idx_type ii = 0; \
1501
for (octave_idx_type i = 0; i < nc; i++) \
1502
{ \
1503
ELT_TYPE t = ELT_TYPE (1.); \
1504
octave_idx_type jj = 0; \
1505
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1506
{ \
1507
if (jj == ridx (j)) \
1508
{ \
1509
t *= data (j); \
1510
retval.data (ii) = t; \
1511
retval.ridx (ii++) = jj++; \
1512
} \
1513
else \
1514
break; \
1515
} \
1516
retval.cidx (i+1) = ii; \
1517
} \
1518
} \
1519
} \
1520
else \
1521
retval = RET_TYPE (nr,nc); \
1522
\
1523
return retval
1524
1525
#define SPARSE_BASE_REDUCTION_OP(RET_TYPE, EL_TYPE, ROW_EXPR, COL_EXPR, \
1526
INIT_VAL, MT_RESULT) \
1527
\
1528
octave_idx_type nr = rows (); \
1529
octave_idx_type nc = cols (); \
1530
\
1531
RET_TYPE retval; \
1532
\
1533
if (nr > 0 && nc > 0) \
1534
{ \
1535
if ((nr == 1 && dim == -1) || dim == 1) \
1536
{ \
1537
/* Define j here to allow fancy definition for prod method */
\
1538
octave_idx_type j = 0; \
1539
OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nr); \
1540
\
1541
for (octave_idx_type i = 0; i < nr; i++) \
1542
tmp[i] = INIT_VAL; \
1543
for (j = 0; j < nc; j++) \
1544
{ \
1545
for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \
1546
{ \
1547
ROW_EXPR; \
1548
} \
1549
} \
1550
octave_idx_type nel = 0; \
1551
for (octave_idx_type i = 0; i < nr; i++) \
1552
if (tmp[i] != EL_TYPE ()) \
1553
nel++ ; \
1554
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nel); \
1555
retval.cidx (0) = 0; \
1556
retval.cidx (1) = nel; \
1557
nel = 0; \
1558
for (octave_idx_type i = 0; i < nr; i++) \
1559
if (tmp[i] != EL_TYPE ()) \
1560
{ \
1561
retval.data (nel) = tmp[i]; \
1562
retval.ridx (nel++) = i; \
1563
} \
1564
} \
1565
else \
1566
{ \
1567
OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nc); \
1568
\
1569
for (octave_idx_type j = 0; j < nc; j++) \
1570
{ \
1571
tmp[j] = INIT_VAL; \
1572
for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \
1573
{ \
1574
COL_EXPR; \
1575
} \
1576
} \
1577
octave_idx_type nel = 0; \
1578
for (octave_idx_type i = 0; i < nc; i++) \
1579
if (tmp[i] != EL_TYPE ()) \
1580
nel++ ; \
1581
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nel); \
1582
retval.cidx (0) = 0; \
1583
nel = 0; \
1584
for (octave_idx_type i = 0; i < nc; i++) \
1585
if (tmp[i] != EL_TYPE ()) \
1586
{ \
1587
retval.data (nel) = tmp[i]; \
1588
retval.ridx (nel++) = 0; \
1589
retval.cidx (i+1) = retval.cidx (i) + 1; \
1590
} \
1591
else \
1592
retval.cidx (i+1) = retval.cidx (i); \
1593
} \
1594
} \
1595
else if (nc == 0 && (nr == 0 || (nr == 1 && dim == -1))) \
1596
{ \
1597
if (MT_RESULT) \
1598
{ \
1599
retval = RET_TYPE (static_cast<octave_idx_type> (1), \
1600
static_cast<octave_idx_type> (1), \
1601
static_cast<octave_idx_type> (1)); \
1602
retval.cidx (0) = 0; \
1603
retval.cidx (1) = 1; \
1604
retval.ridx (0) = 0; \
1605
retval.data (0) = MT_RESULT; \
1606
} \
1607
else \
1608
retval = RET_TYPE (static_cast<octave_idx_type> (1), \
1609
static_cast<octave_idx_type> (1), \
1610
static_cast<octave_idx_type> (0)); \
1611
} \
1612
else if (nr == 0 && (dim == 0 || dim == -1)) \
1613
{ \
1614
if (MT_RESULT) \
1615
{ \
1616
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nc); \
1617
retval.cidx (0) = 0; \
1618
for (octave_idx_type i = 0; i < nc ; i++) \
1619
{ \
1620
retval.ridx (i) = 0; \
1621
retval.cidx (i+1) = i+1; \
1622
retval.data (i) = MT_RESULT; \
1623
} \
1624
} \
1625
else \
1626
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, \
1627
static_cast<octave_idx_type> (0)); \
1628
} \
1629
else if (nc == 0 && dim == 1) \
1630
{ \
1631
if (MT_RESULT) \
1632
{ \
1633
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nr); \
1634
retval.cidx (0) = 0; \
1635
retval.cidx (1) = nr; \
1636
for (octave_idx_type i = 0; i < nr; i++) \
1637
{ \
1638
retval.ridx (i) = i; \
1639
retval.data (i) = MT_RESULT; \
1640
} \
1641
} \
1642
else \
1643
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), \
1644
static_cast<octave_idx_type> (0)); \
1645
} \
1646
else \
1647
retval.resize (nr > 0, nc > 0); \
1648
\
1649
return retval
1650
1651
#define SPARSE_REDUCTION_OP_ROW_EXPR(OP) \
1652
tmp[ridx (i)] OP data (i)
1653
1654
#define SPARSE_REDUCTION_OP_COL_EXPR(OP) \
1655
tmp[j] OP data (i)
1656
1657
#define SPARSE_REDUCTION_OP(RET_TYPE, EL_TYPE, OP, INIT_VAL, MT_RESULT) \
1658
SPARSE_BASE_REDUCTION_OP (RET_TYPE, EL_TYPE, \
1659
SPARSE_REDUCTION_OP_ROW_EXPR (OP), \
1660
SPARSE_REDUCTION_OP_COL_EXPR (OP), \
1661
INIT_VAL, MT_RESULT)
1662
1663
1664
// Don't break from this loop if the test succeeds because
1665
// we are looping over the rows and not the columns in the inner loop.
1666
#define SPARSE_ANY_ALL_OP_ROW_CODE(TEST_OP, TEST_TRUE_VAL) \
1667
if (data (i) TEST_OP 0.0) \
1668
tmp[ridx (i)] = TEST_TRUE_VAL;
1669
1670
#define SPARSE_ANY_ALL_OP_COL_CODE(TEST_OP, TEST_TRUE_VAL) \
1671
if (data (i) TEST_OP 0.0) \
1672
{ \
1673
tmp[j] = TEST_TRUE_VAL; \
1674
break; \
1675
}
1676
1677
#define SPARSE_ANY_ALL_OP(DIM, INIT_VAL, MT_RESULT, TEST_OP, TEST_TRUE_VAL) \
1678
SPARSE_BASE_REDUCTION_OP (SparseBoolMatrix, char, \
1679
SPARSE_ANY_ALL_OP_ROW_CODE (TEST_OP, TEST_TRUE_VAL), \
1680
SPARSE_ANY_ALL_OP_COL_CODE (TEST_OP, TEST_TRUE_VAL), \
1681
INIT_VAL, MT_RESULT)
1682
1683
#define SPARSE_ALL_OP(DIM) \
1684
if ((rows () == 1 && dim == -1) || dim == 1) \
1685
return transpose (). all (0). transpose (); \
1686
else \
1687
{ \
1688
SPARSE_ANY_ALL_OP (DIM, (cidx (j+1) - cidx (j) < nr ? false : true), \
1689
true, ==, false); \
1690
}
1691
1692
#define SPARSE_ANY_OP(DIM) SPARSE_ANY_ALL_OP (DIM, false, false, !=, true)
1693
1694
#define SPARSE_SPARSE_MUL(RET_TYPE, RET_EL_TYPE, EL_TYPE) \
1695
octave_idx_type nr = m.rows (); \
1696
octave_idx_type nc = m.cols (); \
1697
\
1698
octave_idx_type a_nr = a.rows (); \
1699
octave_idx_type a_nc = a.cols (); \
1700
\
1701
if (nr == 1 && nc == 1) \
1702
{ \
1703
RET_EL_TYPE s = m.elem (0,0); \
1704
octave_idx_type nz = a.nnz (); \
1705
RET_TYPE r (a_nr, a_nc, nz); \
1706
\
1707
for (octave_idx_type i = 0; i < nz; i++) \
1708
{ \
1709
octave_quit (); \
1710
r.data (i) = s * a.data (i); \
1711
r.ridx (i) = a.ridx (i); \
1712
} \
1713
for (octave_idx_type i = 0; i < a_nc + 1; i++) \
1714
{ \
1715
octave_quit (); \
1716
r.cidx (i) = a.cidx (i); \
1717
} \
1718
\
1719
r.maybe_compress (true); \
1720
return r; \
1721
} \
1722
else if (a_nr == 1 && a_nc == 1) \
1723
{ \
1724
RET_EL_TYPE s = a.elem (0,0); \
1725
octave_idx_type nz = m.nnz (); \
1726
RET_TYPE r (nr, nc, nz); \
1727
\
1728
for (octave_idx_type i = 0; i < nz; i++) \
1729
{ \
1730
octave_quit (); \
1731
r.data (i) = m.data (i) * s; \
1732
r.ridx (i) = m.ridx (i); \
1733
} \
1734
for (octave_idx_type i = 0; i < nc + 1; i++) \
1735
{ \
1736
octave_quit (); \
1737
r.cidx (i) = m.cidx (i); \
1738
} \
1739
\
1740
r.maybe_compress (true); \
1741
return r; \
1742
} \
1743
else if (nc != a_nr) \
1744
{ \
1745
gripe_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
1746
return RET_TYPE (); \
1747
} \
1748
else \
1749
{ \
1750
OCTAVE_LOCAL_BUFFER (octave_idx_type, w, nr); \
1751
RET_TYPE retval (nr, a_nc, static_cast<octave_idx_type> (0)); \
1752
for (octave_idx_type i = 0; i < nr; i++) \
1753
w[i] = 0; \
1754
retval.xcidx (0) = 0; \
1755
\
1756
octave_idx_type nel = 0; \
1757
\
1758
for (octave_idx_type i = 0; i < a_nc; i++) \
1759
{ \
1760
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1761
{ \
1762
octave_idx_type col = a.ridx (j); \
1763
for (octave_idx_type k = m.cidx (col) ; k < m.cidx (col+1); k++) \
1764
{ \
1765
if (w[m.ridx (k)] < i + 1) \
1766
{ \
1767
w[m.ridx (k)] = i + 1; \
1768
nel++; \
1769
} \
1770
octave_quit (); \
1771
} \
1772
} \
1773
retval.xcidx (i+1) = nel; \
1774
} \
1775
\
1776
if (nel == 0) \
1777
return RET_TYPE (nr, a_nc); \
1778
else \
1779
{ \
1780
for (octave_idx_type i = 0; i < nr; i++) \
1781
w[i] = 0; \
1782
\
1783
OCTAVE_LOCAL_BUFFER (RET_EL_TYPE, Xcol, nr); \
1784
\
1785
retval.change_capacity (nel); \
1786
/* The optimal break-point as estimated from simulations */
\
1787
/* Note that Mergesort is O(nz log(nz)) while searching all */
\
1788
/* values is O(nr), where nz here is nonzero per row of */
\
1789
/* length nr. The test itself was then derived from the */
\
1790
/* simulation with random square matrices and the observation */
\
1791
/* of the number of nonzero elements in the output matrix */
\
1792
/* it was found that the breakpoints were */
\
1793
/* nr: 500 1000 2000 5000 10000 */
\
1794
/* nz: 6 25 97 585 2202 */
\
1795
/* The below is a simplication of the 'polyfit'-ed parameters */
\
1796
/* to these breakpoints */
\
1797
octave_idx_type n_per_col = (a_nc > 43000 ? 43000 : \
1798
(a_nc * a_nc) / 43000); \
1799
octave_idx_type ii = 0; \
1800
octave_idx_type *ri = retval.xridx (); \
1801
octave_sort<octave_idx_type> sort; \
1802
\
1803
for (octave_idx_type i = 0; i < a_nc ; i++) \
1804
{ \
1805
if (retval.xcidx (i+1) - retval.xcidx (i) > n_per_col) \
1806
{ \
1807
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1808
{ \
1809
octave_idx_type col = a.ridx (j); \
1810
EL_TYPE tmpval = a.data (j); \
1811
for (octave_idx_type k = m.cidx (col) ; \
1812
k < m.cidx (col+1); k++) \
1813
{ \
1814
octave_quit (); \
1815
octave_idx_type row = m.ridx (k); \
1816
if (w[row] < i + 1) \
1817
{ \
1818
w[row] = i + 1; \
1819
Xcol[row] = tmpval * m.data (k); \
1820
} \
1821
else \
1822
Xcol[row] += tmpval * m.data (k); \
1823
} \
1824
} \
1825
for (octave_idx_type k = 0; k < nr; k++) \
1826
if (w[k] == i + 1) \
1827
{ \
1828
retval.xdata (ii) = Xcol[k]; \
1829
retval.xridx (ii++) = k; \
1830
} \
1831
} \
1832
else \
1833
{ \
1834
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1835
{ \
1836
octave_idx_type col = a.ridx (j); \
1837
EL_TYPE tmpval = a.data (j); \
1838
for (octave_idx_type k = m.cidx (col) ; \
1839
k < m.cidx (col+1); k++) \
1840
{ \
1841
octave_quit (); \
1842
octave_idx_type row = m.ridx (k); \
1843
if (w[row] < i + 1) \
1844
{ \
1845
w[row] = i + 1; \
1846
retval.xridx (ii++) = row;\
1847
Xcol[row] = tmpval * m.data (k); \
1848
} \
1849
else \
1850
Xcol[row] += tmpval * m.data (k); \
1851
} \
1852
} \
1853
sort.sort (ri + retval.xcidx (i), ii - retval.xcidx (i)); \
1854
for (octave_idx_type k = retval.xcidx (i); k < ii; k++) \
1855
retval.xdata (k) = Xcol[retval.xridx (k)]; \
1856
} \
1857
} \
1858
retval.maybe_compress (true);\
1859
return retval; \
1860
} \
1861
}
1862
1863
#define SPARSE_FULL_MUL(RET_TYPE, EL_TYPE, ZERO) \
1864
octave_idx_type nr = m.rows (); \
1865
octave_idx_type nc = m.cols (); \
1866
\
1867
octave_idx_type a_nr = a.rows (); \
1868
octave_idx_type a_nc = a.cols (); \
1869
\
1870
if (nr == 1 && nc == 1) \
1871
{ \
1872
RET_TYPE retval = m.elem (0,0) * a; \
1873
return retval; \
1874
} \
1875
else if (nc != a_nr) \
1876
{ \
1877
gripe_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
1878
return RET_TYPE (); \
1879
} \
1880
else \
1881
{ \
1882
RET_TYPE retval (nr, a_nc, ZERO); \
1883
\
1884
for (octave_idx_type i = 0; i < a_nc ; i++) \
1885
{ \
1886
for (octave_idx_type j = 0; j < a_nr; j++) \
1887
{ \
1888
octave_quit (); \
1889
\
1890
EL_TYPE tmpval = a.elem (j,i); \
1891
for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \
1892
retval.elem (m.ridx (k),i) += tmpval * m.data (k); \
1893
} \
1894
} \
1895
return retval; \
1896
}
1897
1898
#define SPARSE_FULL_TRANS_MUL(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \
1899
octave_idx_type nr = m.rows (); \
1900
octave_idx_type nc = m.cols (); \
1901
\
1902
octave_idx_type a_nr = a.rows (); \
1903
octave_idx_type a_nc = a.cols (); \
1904
\
1905
if (nr == 1 && nc == 1) \
1906
{ \
1907
RET_TYPE retval = CONJ_OP (m.elem (0,0)) * a; \
1908
return retval; \
1909
} \
1910
else if (nr != a_nr) \
1911
{ \
1912
gripe_nonconformant ("operator *", nc, nr, a_nr, a_nc); \
1913
return RET_TYPE (); \
1914
} \
1915
else \
1916
{ \
1917
RET_TYPE retval (nc, a_nc); \
1918
\
1919
for (octave_idx_type i = 0; i < a_nc ; i++) \
1920
{ \
1921
for (octave_idx_type j = 0; j < nc; j++) \
1922
{ \
1923
octave_quit (); \
1924
\
1925
EL_TYPE acc = ZERO; \
1926
for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \
1927
acc += a.elem (m.ridx (k),i) * CONJ_OP (m.data (k)); \
1928
retval.xelem (j,i) = acc; \
1929
} \
1930
} \
1931
return retval; \
1932
}
1933
1934
#define FULL_SPARSE_MUL(RET_TYPE, EL_TYPE, ZERO) \
1935
octave_idx_type nr = m.rows (); \
1936
octave_idx_type nc = m.cols (); \
1937
\
1938
octave_idx_type a_nr = a.rows (); \
1939
octave_idx_type a_nc = a.cols (); \
1940
\
1941
if (a_nr == 1 && a_nc == 1) \
1942
{ \
1943
RET_TYPE retval = m * a.elem (0,0); \
1944
return retval; \
1945
} \
1946
else if (nc != a_nr) \
1947
{ \
1948
gripe_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
1949
return RET_TYPE (); \
1950
} \
1951
else \
1952
{ \
1953
RET_TYPE retval (nr, a_nc, ZERO); \
1954
\
1955
for (octave_idx_type i = 0; i < a_nc ; i++) \
1956
{ \
1957
octave_quit (); \
1958
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1959
{ \
1960
octave_idx_type col = a.ridx (j); \
1961
EL_TYPE tmpval = a.data (j); \
1962
\
1963
for (octave_idx_type k = 0 ; k < nr; k++) \
1964
retval.xelem (k,i) += tmpval * m.elem (k,col); \
1965
} \
1966
} \
1967
return retval; \
1968
}
1969
1970
#define FULL_SPARSE_MUL_TRANS(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \
1971
octave_idx_type nr = m.rows (); \
1972
octave_idx_type nc = m.cols (); \
1973
\
1974
octave_idx_type a_nr = a.rows (); \
1975
octave_idx_type a_nc = a.cols (); \
1976
\
1977
if (a_nr == 1 && a_nc == 1) \
1978
{ \
1979
RET_TYPE retval = m * CONJ_OP (a.elem (0,0)); \
1980
return retval; \
1981
} \
1982
else if (nc != a_nc) \
1983
{ \
1984
gripe_nonconformant ("operator *", nr, nc, a_nc, a_nr); \
1985
return RET_TYPE (); \
1986
} \
1987
else \
1988
{ \
1989
RET_TYPE retval (nr, a_nr, ZERO); \
1990
\
1991
for (octave_idx_type i = 0; i < a_nc ; i++) \
1992
{ \
1993
octave_quit (); \
1994
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1995
{ \
1996
octave_idx_type col = a.ridx (j); \
1997
EL_TYPE tmpval = CONJ_OP (a.data (j)); \
1998
for (octave_idx_type k = 0 ; k < nr; k++) \
1999
retval.xelem (k,col) += tmpval * m.elem (k,i); \
2000
} \
2001
} \
2002
return retval; \
2003
}
2004
2005
#endif
oct-locbuf.h
Array-util.h
mx-inlines.cc
Generated on Thu Jun 4 2015 23:30:26 for GNU Octave by
1.8.8