30 #include <sys/types.h>
31 #include <sys/times.h>
33 #ifdef HAVE_SYS_RESOURCE_H
34 #include <sys/resource.h>
72 #if ! defined (CLOCKS_PER_SEC)
74 #define CLOCKS_PER_SEC CLK_TCK
76 #error "no definition for CLOCKS_PER_SEC!"
80 #if ! defined (HAVE_HYPOTF) && defined (HAVE__HYPOTF)
81 #define hypotf _hypotf
85 #define ANY_ALL(FCN) \
87 octave_value retval; \
89 int nargin = args.length (); \
91 if (nargin == 1 || nargin == 2) \
93 int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \
98 retval = args(0).FCN (dim); \
100 error (#FCN ": invalid dimension argument = %d", dim + 1); \
103 error (#FCN ": expecting dimension argument to be an integer"); \
112 @deftypefn {Built-in Function} {} all (@var{x})\n\
113 @deftypefnx {Built-in Function} {} all (@var{x}, @var{dim})\n\
114 For a vector argument, return true (logical 1) if all elements of the vector\n\
117 For a matrix argument, return a row vector of logical ones and\n\
118 zeros with each element indicating whether all of the elements of the\n\
119 corresponding column of the matrix are nonzero. For example:\n\
123 all ([2, 3; 1, 0])\n\
124 @result{} [ 1, 0 ]\n\
128 If the optional argument @var{dim} is supplied, work along dimension\n\
159 @deftypefn {Built-in Function} {} any (@var{x})\n\
160 @deftypefnx {Built-in Function} {} any (@var{x}, @var{dim})\n\
161 For a vector argument, return true (logical 1) if any element of the vector\n\
164 For a matrix argument, return a row vector of logical ones and\n\
165 zeros with each element indicating whether any of the elements of the\n\
166 corresponding column of the matrix are nonzero. For example:\n\
171 @result{} [ 1, 1, 0, 0 ]\n\
175 If the optional argument @var{dim} is supplied, work along dimension\n\
176 @var{dim}. For example:\n\
180 any (eye (2, 4), 2)\n\
181 @result{} [ 1; 1 ]\n\
215 @deftypefn {Mapping Function} {} atan2 (@var{y}, @var{x})\n\
216 Compute atan (@var{y} / @var{x}) for corresponding elements of @var{y}\n\
219 @var{y} and @var{x} must match in size and orientation.\n\
220 @seealso{tan, tand, tanh, atanh}\n\
225 int nargin = args.
length ();
229 if (! args(0).is_numeric_type ())
231 else if (! args(1).is_numeric_type ())
233 else if (args(0).is_complex_type () || args(1).is_complex_type ())
234 error (
"atan2: not defined for complex numbers");
235 else if (args(0).is_single_type () || args(1).is_single_type ())
237 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
238 retval = atan2f (args(0).float_value (), args(1).float_value ());
243 retval = binmap<float> (a0, a1, ::atan2f,
"atan2");
248 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
249 retval =
atan2 (args(0).scalar_value (), args(1).scalar_value ());
250 else if (args(0).is_sparse_type ())
254 retval = binmap<double> (m0, m1,
::atan2,
"atan2");
258 NDArray a0 = args(0).array_value ();
259 NDArray a1 = args(1).array_value ();
260 retval = binmap<double> (a0, a1,
::atan2,
"atan2");
358 retval = binmap<float> (a0, a1, ::hypotf,
"hypot");
369 retval = binmap<double> (m0, m1, ::hypot,
"hypot");
375 retval = binmap<double> (a0, a1, ::hypot,
"hypot");
383 DEFUN (hypot, args, ,
385 @deftypefn {Built-in Function} {} hypot (@var{x}, @var{y})\n\
386 @deftypefnx {Built-in Function} {} hypot (@var{x}, @var{y}, @var{z}, @dots{})\n\
387 Compute the element-by-element square root of the sum of the squares of\n\
388 @var{x} and @var{y}.\n\
390 This is equivalent to\n\
391 @code{sqrt (@var{x}.^2 + @var{y}.^2)}, but is calculated in a manner that\n\
392 avoids overflows for large values of @var{x} or @var{y}.\n\
394 @code{hypot} can also be called with more than 2 arguments; in this case,\n\
395 the arguments are accumulated from left to right:\n\
399 hypot (hypot (@var{x}, @var{y}), @var{z})\n\
400 hypot (hypot (hypot (@var{x}, @var{y}), @var{z}), @var{w}), etc.\n\
407 int nargin = args.
length ();
411 retval =
do_hypot (args(0), args(1));
413 else if (nargin >= 3)
417 retval =
do_hypot (retval, args(i));
464 template<
typename T,
typename ET>
478 DEFUN (log2, args, nargout,
480 @deftypefn {Mapping Function} {} log2 (@var{x})\n\
481 @deftypefnx {Mapping Function} {[@var{f}, @var{e}] =} log2 (@var{x})\n\
482 Compute the base-2 logarithm of each element of @var{x}.\n\
484 If called with two output arguments, split @var{x} into\n\
485 binary mantissa and exponent so that\n\
487 ${1 \\over 2} \\le \\left| f \\right| < 1$\n\
490 @code{1/2 <= abs(f) < 1}\n\
492 and @var{e} is an integer. If\n\
494 $x = 0$, $f = e = 0$.\n\
497 @code{x = 0}, @code{f = e = 0}.\n\
499 @seealso{pow2, log, log10, exp}\n\
504 if (args.length () == 1)
507 retval(0) = args(0).log2 ();
508 else if (args(0).is_single_type ())
510 if (args(0).is_real_type ())
520 else if (args(0).is_complex_type ())
531 else if (args(0).is_real_type ())
541 else if (args(0).is_complex_type ())
583 @deftypefn {Mapping Function} {} rem (@var{x}, @var{y})\n\
584 Return the remainder of the division @code{@var{x} / @var{y}}.\n\
586 The remainder is computed using the expression\n\
589 x - y .* fix (x ./ y)\n\
592 An error message is printed if the dimensions of the arguments do not agree,\n\
593 or if either of the arguments is complex.\n\
599 int nargin = args.
length ();
603 if (! args(0).is_numeric_type ())
605 else if (! args(1).is_numeric_type ())
607 else if (args(0).is_complex_type () || args(1).is_complex_type ())
608 error (
"rem: not defined for complex numbers");
609 else if (args(0).is_integer_type () || args(1).is_integer_type ())
622 #define MAKE_INT_BRANCH(X) \
625 X##NDArray a0 = args(0).X##_array_value (); \
626 X##NDArray a1 = args(1).X##_array_value (); \
627 retval = binmap<octave_##X,octave_##X,octave_##X> (a0, a1, rem, "rem"); \
638 #undef MAKE_INT_BRANCH
644 error (
"rem: cannot combine %s and %d",
645 args(0).class_name ().c_str (),
646 args(1).class_name ().c_str ());
648 else if (args(0).is_single_type () || args(1).is_single_type ())
650 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
651 retval =
xrem (args(0).float_value (), args(1).float_value ());
656 retval = binmap<float> (a0, a1, xrem<float>,
"rem");
661 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
662 retval =
xrem (args(0).scalar_value (), args(1).scalar_value ());
663 else if (args(0).is_sparse_type () || args(1).is_sparse_type ())
667 retval = binmap<double> (m0, m1, xrem<double>,
"rem");
671 NDArray a0 = args(0).array_value ();
672 NDArray a1 = args(1).array_value ();
673 retval = binmap<double> (a0, a1, xrem<double>,
"rem");
736 @deftypefn {Mapping Function} {} mod (@var{x}, @var{y})\n\
737 Compute the modulo of @var{x} and @var{y}.\n\
739 Conceptually this is given by\n\
742 x - y .* floor (x ./ y)\n\
746 and is written such that the correct modulus is returned for integer types.\n\
747 This function handles negative values correctly. That is,\n\
748 @code{mod (-1, 3)} is 2, not -1, as @code{rem (-1, 3)} returns.\n\
749 @code{mod (@var{x}, 0)} returns @var{x}.\n\
751 An error results if the dimensions of the arguments do not agree, or if\n\
752 either of the arguments is complex.\n\
758 int nargin = args.
length ();
762 if (! args(0).is_numeric_type ())
764 else if (! args(1).is_numeric_type ())
766 else if (args(0).is_complex_type () || args(1).is_complex_type ())
767 error (
"mod: not defined for complex numbers");
768 else if (args(0).is_integer_type () || args(1).is_integer_type ())
781 #define MAKE_INT_BRANCH(X) \
784 X##NDArray a0 = args(0).X##_array_value (); \
785 X##NDArray a1 = args(1).X##_array_value (); \
786 retval = binmap<octave_##X,octave_##X,octave_##X> (a0, a1, mod, "mod"); \
797 #undef MAKE_INT_BRANCH
803 error (
"mod: cannot combine %s and %d",
804 args(0).class_name ().c_str (),
805 args(1).class_name ().c_str ());
807 else if (args(0).is_single_type () || args(1).is_single_type ())
809 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
810 retval =
xmod (args(0).float_value (), args(1).float_value ());
815 retval = binmap<float> (a0, a1, xmod<float>,
"mod");
820 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
821 retval =
xmod (args(0).scalar_value (), args(1).scalar_value ());
822 else if (args(0).is_sparse_type () || args(1).is_sparse_type ())
826 retval = binmap<double> (m0, m1, xmod<double>,
"mod");
830 NDArray a0 = args(0).array_value ();
831 NDArray a1 = args(1).array_value ();
832 retval = binmap<double> (a0, a1, xmod<double>,
"mod");
890 #define NATIVE_REDUCTION_1(FCN, TYPE, DIM) \
891 (arg.is_ ## TYPE ## _type ()) \
893 TYPE ## NDArray tmp = arg. TYPE ##_array_value (); \
897 retval = tmp.FCN (DIM); \
901 #define NATIVE_REDUCTION(FCN, BOOL_FCN) \
903 octave_value retval; \
905 int nargin = args.length (); \
907 bool isnative = false; \
908 bool isdouble = false; \
910 if (nargin > 1 && args(nargin - 1).is_string ()) \
912 std::string str = args(nargin - 1).string_value (); \
914 if (str == "native") \
916 else if (str == "double") \
919 error ("sum: unrecognized string argument"); \
923 if (nargin == 1 || nargin == 2) \
925 octave_value arg = args(0); \
927 int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \
933 if (arg.is_sparse_type ()) \
935 if (arg.is_real_type ()) \
937 SparseMatrix tmp = arg.sparse_matrix_value (); \
940 retval = tmp.FCN (dim); \
944 SparseComplexMatrix tmp \
945 = arg.sparse_complex_matrix_value (); \
948 retval = tmp.FCN (dim); \
955 if NATIVE_REDUCTION_1 (FCN, uint8, dim) \
956 else if NATIVE_REDUCTION_1 (FCN, uint16, dim) \
957 else if NATIVE_REDUCTION_1 (FCN, uint32, dim) \
958 else if NATIVE_REDUCTION_1 (FCN, uint64, dim) \
959 else if NATIVE_REDUCTION_1 (FCN, int8, dim) \
960 else if NATIVE_REDUCTION_1 (FCN, int16, dim) \
961 else if NATIVE_REDUCTION_1 (FCN, int32, dim) \
962 else if NATIVE_REDUCTION_1 (FCN, int64, dim) \
963 else if (arg.is_bool_type ()) \
965 boolNDArray tmp = arg.bool_array_value (); \
967 retval = boolNDArray (tmp.BOOL_FCN (dim)); \
969 else if (arg.is_char_matrix ()) \
971 error (#FCN, ": invalid char type"); \
973 else if (!isdouble && arg.is_single_type ()) \
975 if (arg.is_complex_type ()) \
977 FloatComplexNDArray tmp = \
978 arg.float_complex_array_value (); \
981 retval = tmp.FCN (dim); \
983 else if (arg.is_real_type ()) \
985 FloatNDArray tmp = arg.float_array_value (); \
988 retval = tmp.FCN (dim); \
991 else if (arg.is_complex_type ()) \
993 ComplexNDArray tmp = arg.complex_array_value (); \
996 retval = tmp.FCN (dim); \
998 else if (arg.is_real_type ()) \
1000 NDArray tmp = arg.array_value (); \
1002 if (! error_state) \
1003 retval = tmp.FCN (dim); \
1007 gripe_wrong_type_arg (#FCN, arg); \
1011 else if (arg.is_bool_type ()) \
1013 boolNDArray tmp = arg.bool_array_value (); \
1014 if (! error_state) \
1015 retval = tmp.FCN (dim); \
1017 else if (!isdouble && arg.is_single_type ()) \
1019 if (arg.is_real_type ()) \
1021 FloatNDArray tmp = arg.float_array_value (); \
1023 if (! error_state) \
1024 retval = tmp.FCN (dim); \
1026 else if (arg.is_complex_type ()) \
1028 FloatComplexNDArray tmp = \
1029 arg.float_complex_array_value (); \
1031 if (! error_state) \
1032 retval = tmp.FCN (dim); \
1035 else if (arg.is_real_type ()) \
1037 NDArray tmp = arg.array_value (); \
1039 if (! error_state) \
1040 retval = tmp.FCN (dim); \
1042 else if (arg.is_complex_type ()) \
1044 ComplexNDArray tmp = arg.complex_array_value (); \
1046 if (! error_state) \
1047 retval = tmp.FCN (dim); \
1051 gripe_wrong_type_arg (#FCN, arg); \
1057 error (#FCN ": invalid dimension argument = %d", dim + 1); \
1066 #define DATA_REDUCTION(FCN) \
1068 octave_value retval; \
1070 int nargin = args.length (); \
1072 if (nargin == 1 || nargin == 2) \
1074 octave_value arg = args(0); \
1076 int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \
1078 if (! error_state) \
1082 if (arg.is_real_type ()) \
1084 if (arg.is_sparse_type ()) \
1086 SparseMatrix tmp = arg.sparse_matrix_value (); \
1088 if (! error_state) \
1089 retval = tmp.FCN (dim); \
1091 else if (arg.is_single_type ()) \
1093 FloatNDArray tmp = arg.float_array_value (); \
1095 if (! error_state) \
1096 retval = tmp.FCN (dim); \
1100 NDArray tmp = arg.array_value (); \
1102 if (! error_state) \
1103 retval = tmp.FCN (dim); \
1106 else if (arg.is_complex_type ()) \
1108 if (arg.is_sparse_type ()) \
1110 SparseComplexMatrix tmp = arg.sparse_complex_matrix_value (); \
1112 if (! error_state) \
1113 retval = tmp.FCN (dim); \
1115 else if (arg.is_single_type ()) \
1117 FloatComplexNDArray tmp \
1118 = arg.float_complex_array_value (); \
1120 if (! error_state) \
1121 retval = tmp.FCN (dim); \
1125 ComplexNDArray tmp = arg.complex_array_value (); \
1127 if (! error_state) \
1128 retval = tmp.FCN (dim); \
1133 gripe_wrong_type_arg (#FCN, arg); \
1138 error (#FCN ": invalid dimension argument = %d", dim + 1); \
1146 DEFUN (cumprod, args, ,
1148 @deftypefn {Built-in Function} {} cumprod (@var{x})\n\
1149 @deftypefnx {Built-in Function} {} cumprod (@var{x}, @var{dim})\n\
1150 Cumulative product of elements along dimension @var{dim}.\n\
1152 If @var{dim} is omitted, it defaults to the first non-singleton dimension.\n\
1153 @seealso{prod, cumsum}\n\
1179 DEFUN (cumsum, args, ,
1181 @deftypefn {Built-in Function} {} cumsum (@var{x})\n\
1182 @deftypefnx {Built-in Function} {} cumsum (@var{x}, @var{dim})\n\
1183 @deftypefnx {Built-in Function} {} cumsum (@dots{}, \"native\")\n\
1184 @deftypefnx {Built-in Function} {} cumsum (@dots{}, \"double\")\n\
1185 @deftypefnx {Built-in Function} {} cumsum (@dots{}, \"extra\")\n\
1186 Cumulative sum of elements along dimension @var{dim}.\n\
1188 If @var{dim} is omitted, it defaults to the first non-singleton dimension.\n\
1190 See @code{sum} for an explanation of the optional parameters\n\
1191 @qcode{\"native\"}, @qcode{\"double\"}, and @qcode{\"extra\"}.\n\
1192 @seealso{sum, cumprod}\n\
1197 int nargin = args.
length ();
1199 bool isnative =
false;
1200 bool isdouble =
false;
1202 if (nargin > 1 && args(nargin - 1).is_string ())
1204 std::string str = args(nargin - 1).string_value ();
1206 if (str ==
"native")
1208 else if (str ==
"double")
1211 error (
"cumsum: unrecognized string argument");
1218 if (nargin == 1 || nargin == 2)
1225 dim = args(1).int_value () - 1;
1227 error (
"cumsum: invalid dimension argument = %d", dim + 1);
1259 #define MAKE_INT_BRANCH(X) \
1262 retval = arg.X ## _array_value ().cumsum (dim); \
1264 retval = arg.array_value ().cumsum (dim); \
1274 #undef MAKE_INT_BRANCH
1326 DEFUN (diag, args, ,
1328 @deftypefn {Built-in Function} {@var{M} =} diag (@var{v})\n\
1329 @deftypefnx {Built-in Function} {@var{M} =} diag (@var{v}, @var{k})\n\
1330 @deftypefnx {Built-in Function} {@var{M} =} diag (@var{v}, @var{m}, @var{n})\n\
1331 @deftypefnx {Built-in Function} {@var{v} =} diag (@var{M})\n\
1332 @deftypefnx {Built-in Function} {@var{v} =} diag (@var{M}, @var{k})\n\
1333 Return a diagonal matrix with vector @var{v} on diagonal @var{k}.\n\
1335 The second argument is optional. If it is positive, the vector is placed on\n\
1336 the @var{k}-th superdiagonal. If it is negative, it is placed on the\n\
1337 @var{-k}-th subdiagonal. The default value of @var{k} is 0, and the vector\n\
1338 is placed on the main diagonal. For example:\n\
1342 diag ([1, 2, 3], 1)\n\
1343 @result{} 0 1 0 0\n\
1351 The 3-input form returns a diagonal matrix with vector @var{v} on the main\n\
1352 diagonal and the resulting matrix being of size @var{m} rows x @var{n}\n\
1355 Given a matrix argument, instead of a vector, @code{diag} extracts the\n\
1356 @var{k}-th diagonal of the matrix.\n\
1361 int nargin = args.
length ();
1363 if (nargin == 1 && args(0).is_defined ())
1364 retval = args(0).
diag ();
1365 else if (nargin == 2 && args(0).is_defined () && args(1).is_defined ())
1370 error (
"diag: invalid argument K");
1372 retval = args(0).
diag (k);
1374 else if (nargin == 3)
1384 retval = arg0.
diag (m, n);
1386 error (
"diag: invalid dimensions");
1389 error (
"diag: V must be a vector");
1463 DEFUN (prod, args, ,
1465 @deftypefn {Built-in Function} {} prod (@var{x})\n\
1466 @deftypefnx {Built-in Function} {} prod (@var{x}, @var{dim})\n\
1467 @deftypefnx {Built-in Function} {} prod (@dots{}, \"native\")\n\
1468 @deftypefnx {Built-in Function} {} prod (@dots{}, \"double\")\n\
1469 Product of elements along dimension @var{dim}.\n\
1471 If @var{dim} is omitted, it defaults to the first non-singleton dimension.\n\
1473 The optional @qcode{\"type\"} input determines the class of the variable\n\
1474 used for calculations. If the argument @qcode{\"native\"} is given, then\n\
1475 the operation is performed in the same type as the original argument, rather\n\
1476 than the default double type.\n\
1482 prod ([true, true])\n\
1484 prod ([true, true], \"native\")\n\
1489 On the contrary, if @qcode{\"double\"} is given, the operation is performed\n\
1490 in double precision even for single precision inputs.\n\
1491 @seealso{cumprod, sum}\n\
1496 int nargin = args.
length ();
1498 bool isnative =
false;
1499 bool isdouble =
false;
1501 if (nargin > 1 && args(nargin - 1).is_string ())
1503 std::string str = args(nargin - 1).string_value ();
1505 if (str ==
"native")
1507 else if (str ==
"double")
1510 error (
"prod: unrecognized type argument '%s'", str.c_str ());
1517 if (nargin == 1 || nargin == 2)
1524 dim = args(1).int_value () - 1;
1526 error (
"prod: invalid dimension DIM = %d", dim + 1);
1558 #define MAKE_INT_BRANCH(X) \
1561 retval = arg.X ## _array_value ().prod (dim); \
1563 retval = arg.array_value ().prod (dim); \
1573 #undef MAKE_INT_BRANCH
1675 int n_args = args.
length ();
1676 for (
int i = 0; i < n_args; i++)
1677 if (args(i).numel () != 1)
1683 template <
class TYPE,
class T>
1689 int n_args = args.
length ();
1696 if (dim == -1 || dim == -2)
1708 result(j) = octave_value_extract<T> (args(j));
1719 array_list[j] = octave_value_extract<TYPE> (args(j));
1727 template <
class TYPE,
class T>
1733 int n_args = args.
length ();
1740 sparse_list[j] = octave_value_extract<TYPE> (args(j));
1748 template<
class TYPE>
1754 single_type_concat<TYPE, typename TYPE::element_type> (result, args, dim);
1765 int n_args = args.
length ();
1772 map_list[j] = octave_value_extract<MAP> (args(j));
1785 single_type_concat_map<octave_scalar_map> (result, args, dim);
1787 single_type_concat_map<octave_map> (result, args, dim);
1813 error (
"conversion from %s to %s failed", dtype.c_str (),
1831 error (
"%s constructor failed for %s argument", dtype.c_str (),
1835 error (
"no constructor for %s!", dtype.c_str ());
1865 error (
"%s/%s method did not return a value",
1866 dtype.c_str (), cattype.c_str ());
1888 if (t1_type == dtype)
1903 std::string cname = tmp(0).class_name ();
1904 std::list<std::string> parents = tmp(0).parent_class_name_list ();
1923 int n_args = args.
length ();
1927 else if (n_args == 1)
1929 else if (n_args > 1)
1931 std::string result_type;
1933 bool all_strings_p =
true;
1934 bool all_sq_strings_p =
true;
1935 bool all_dq_strings_p =
true;
1936 bool all_real_p =
true;
1937 bool all_cmplx_p =
true;
1938 bool any_sparse_p =
false;
1939 bool any_cell_p =
false;
1940 bool any_class_p =
false;
1942 bool first_elem_is_struct =
false;
1944 for (
int i = 0; i < n_args; i++)
1948 result_type = args(i).class_name ();
1950 first_elem_is_struct = args(i).is_map ();
1955 if (all_strings_p && ! args(i).is_string ())
1956 all_strings_p =
false;
1957 if (all_sq_strings_p && ! args(i).is_sq_string ())
1958 all_sq_strings_p =
false;
1959 if (all_dq_strings_p && ! args(i).is_dq_string ())
1960 all_dq_strings_p =
false;
1961 if (all_real_p && ! args(i).is_real_type ())
1963 if (all_cmplx_p && ! (args(i).is_complex_type ()
1964 || args(i).is_real_type ()))
1965 all_cmplx_p =
false;
1966 if (!any_sparse_p && args(i).is_sparse_type ())
1967 any_sparse_p =
true;
1968 if (!any_cell_p && args(i).is_cell ())
1970 if (!any_class_p && args(i).is_object ())
1974 if (any_cell_p && ! any_class_p && ! first_elem_is_struct)
1976 for (
int i = 0; i < n_args; i++)
1978 if (! args(i).is_cell ())
1979 args(i) =
Cell (args(i));
1987 else if (result_type ==
"double")
1992 retval = do_single_type_concat<SparseMatrix> (args, dim);
1994 retval = do_single_type_concat<SparseComplexMatrix> (args, dim);
1999 retval = do_single_type_concat<NDArray> (args, dim);
2001 retval = do_single_type_concat<ComplexNDArray> (args, dim);
2004 else if (result_type ==
"single")
2007 retval = do_single_type_concat<FloatNDArray> (args, dim);
2009 retval = do_single_type_concat<FloatComplexNDArray> (args, dim);
2011 else if (result_type ==
"char")
2013 char type = all_dq_strings_p ?
'"' :
'\'';
2015 if (! all_strings_p)
2017 "numeric", result_type);
2021 charNDArray result = do_single_type_concat<charNDArray> (args, dim);
2025 else if (result_type ==
"logical")
2028 retval = do_single_type_concat<SparseBoolMatrix> (args, dim);
2030 retval = do_single_type_concat<boolNDArray> (args, dim);
2032 else if (result_type ==
"int8")
2033 retval = do_single_type_concat<int8NDArray> (args, dim);
2034 else if (result_type ==
"int16")
2035 retval = do_single_type_concat<int16NDArray> (args, dim);
2036 else if (result_type ==
"int32")
2037 retval = do_single_type_concat<int32NDArray> (args, dim);
2038 else if (result_type ==
"int64")
2039 retval = do_single_type_concat<int64NDArray> (args, dim);
2040 else if (result_type ==
"uint8")
2041 retval = do_single_type_concat<uint8NDArray> (args, dim);
2042 else if (result_type ==
"uint16")
2043 retval = do_single_type_concat<uint16NDArray> (args, dim);
2044 else if (result_type ==
"uint32")
2045 retval = do_single_type_concat<uint32NDArray> (args, dim);
2046 else if (result_type ==
"uint64")
2047 retval = do_single_type_concat<uint64NDArray> (args, dim);
2048 else if (result_type ==
"cell")
2049 retval = do_single_type_concat<Cell> (args, dim);
2050 else if (result_type ==
"struct")
2060 if (dim == -1 || dim == -2)
2066 for (
int i = 1; i < args.length (); i++)
2068 if (! (dv.*concat_rule) (args(i).dims (), dim))
2071 error (
"cat: dimension mismatch");
2096 int dv_len = dv.
length ();
2099 for (
int j = 0; j < n_args; j++)
2114 error (
"%s: indexing error", fname.c_str ());
2130 DEFUN (horzcat, args, ,
2132 @deftypefn {Built-in Function} {} horzcat (@var{array1}, @var{array2}, @dots{}, @var{arrayN})\n\
2133 Return the horizontal concatenation of N-D array objects, @var{array1},\n\
2134 @var{array2}, @dots{}, @var{arrayN} along dimension 2.\n\
2136 Arrays may also be concatenated horizontally using the syntax for creating\n\
2137 new matrices. For example:\n\
2140 @var{hcat} = [ @var{array1}, @var{array2}, @dots{} ]\n\
2142 @seealso{cat, vertcat}\n\
2145 return do_cat (args, -2,
"horzcat");
2340 DEFUN (vertcat, args, ,
2342 @deftypefn {Built-in Function} {} vertcat (@var{array1}, @var{array2}, @dots{}, @var{arrayN})\n\
2343 Return the vertical concatenation of N-D array objects, @var{array1},\n\
2344 @var{array2}, @dots{}, @var{arrayN} along dimension 1.\n\
2346 Arrays may also be concatenated vertically using the syntax for creating\n\
2347 new matrices. For example:\n\
2350 @var{vcat} = [ @var{array1}; @var{array2}; @dots{} ]\n\
2352 @seealso{cat, horzcat}\n\
2355 return do_cat (args, -1,
"vertcat");
2366 @deftypefn {Built-in Function} {} cat (@var{dim}, @var{array1}, @var{array2}, @dots{}, @var{arrayN})\n\
2367 Return the concatenation of N-D array objects, @var{array1},\n\
2368 @var{array2}, @dots{}, @var{arrayN} along dimension @var{dim}.\n\
2373 B = zeros (2, 2);\n\
2375 @result{} 1 1 0 0\n\
2380 Alternatively, we can concatenate @var{A} and @var{B} along the\n\
2381 second dimension in the following way:\n\
2389 @var{dim} can be larger than the dimensions of the N-D array objects\n\
2390 and the result will thus have @var{dim} dimensions as the\n\
2391 following example shows:\n\
2395 cat (4, ones (2, 2), zeros (2, 2))\n\
2396 @result{} ans(:,:,1,1) =\n\
2407 @seealso{horzcat, vertcat}\n\
2412 if (args.length () > 0)
2419 retval =
do_cat (args.slice (1, args.length () - 1), dim,
"cat");
2421 error (
"cat: DIM must be a valid dimension");
2424 error (
"cat: DIM must be an integer");
2580 if (args.
length () == 2 && args(1).
length () >= args(1).ndims ())
2582 Array<int> vec = args(1).int_vector_value ();
2589 for (
int i = 0; i < n; i++)
2603 DEFUN (permute, args, ,
2605 @deftypefn {Built-in Function} {} permute (@var{A}, @var{perm})\n\
2606 Return the generalized transpose for an N-D array object @var{A}.\n\
2608 The permutation vector @var{perm} must contain the elements\n\
2609 @code{1:ndims (A)} (in any order, but each element must appear only once).\n\
2611 The @var{N}th dimension of @var{A} gets remapped to dimension\n\
2612 @code{@var{PERM}(@var{N})}. For example:\n\
2616 @var{x} = zeros ([2, 3, 5, 7]);\n\
2618 @result{} 2 3 5 7\n\
2620 size (permute (@var{x}, [2, 1, 3, 4]))\n\
2621 @result{} 3 2 5 7\n\
2623 size (permute (@var{x}, [1, 3, 4, 2]))\n\
2624 @result{} 2 5 7 3\n\
2626 ## The identity permutation\n\
2627 size (permute (@var{x}, [1, 2, 3, 4]))\n\
2628 @result{} 2 3 5 7\n\
2631 @seealso{ipermute}\n\
2637 DEFUN (ipermute, args, ,
2639 @deftypefn {Built-in Function} {} ipermute (@var{A}, @var{iperm})\n\
2640 The inverse of the @code{permute} function.\n\
2645 ipermute (permute (A, perm), perm)\n\
2649 returns the original array @var{A}.\n\
2650 @seealso{permute}\n\
2658 @deftypefn {Built-in Function} {} length (@var{a})\n\
2659 Return the length of the object @var{a}.\n\
2661 The length is 0 for empty objects, 1 for scalars, and the number of elements\n\
2662 for vectors. For matrix objects, the length is the number of rows or\n\
2663 columns, whichever is greater (this odd definition is used for compatibility\n\
2664 with @sc{matlab}).\n\
2665 @seealso{numel, size}\n\
2670 if (args.length () == 1)
2671 retval = args(0).
length ();
2678 DEFUN (ndims, args, ,
2680 @deftypefn {Built-in Function} {} ndims (@var{a})\n\
2681 Return the number of dimensions of @var{a}.\n\
2683 For any array, the result will always be greater than or equal to 2.\n\
2684 Trailing singleton dimensions are not counted.\n\
2688 ndims (ones (4, 1, 2, 1))\n\
2697 if (args.length () == 1)
2698 retval = args(0).
ndims ();
2705 DEFUN (numel, args, ,
2707 @deftypefn {Built-in Function} {} numel (@var{a})\n\
2708 @deftypefnx {Built-in Function} {} numel (@var{a}, @var{idx1}, @var{idx2}, @dots{})\n\
2709 Return the number of elements in the object @var{a}.\n\
2711 Optionally, if indices @var{idx1}, @var{idx2}, @dots{} are supplied,\n\
2712 return the number of elements that would result from the indexing\n\
2715 @var{a}(@var{idx1}, @var{idx2}, @dots{})\n\
2718 Note that the indices do not have to be numerical. For example,\n\
2723 @var{b} = ones (2, 3);\n\
2724 numel (@var{a}, @var{b})\n\
2729 will return 6, as this is the number of ways to index with @var{b}.\n\
2731 This method is also called when an object appears as lvalue with cs-list\n\
2732 indexing, i.e., @code{object@{@dots{}@}} or @code{object(@dots{}).field}.\n\
2740 retval = args(0).
numel ();
2741 else if (nargin > 1)
2745 retval =
dims_to_numel (args(0).dims (), args.slice (1, nargin-1));
2755 @deftypefn {Built-in Function} {} size (@var{a})\n\
2756 @deftypefnx {Built-in Function} {} size (@var{a}, @var{dim})\n\
2757 Return the number of rows and columns of @var{a}.\n\
2759 With one input argument and one output argument, the result is returned\n\
2760 in a row vector. If there are multiple output arguments, the number of\n\
2761 rows is assigned to the first, and the number of columns to the second,\n\
2762 etc. For example:\n\
2766 size ([1, 2; 3, 4; 5, 6])\n\
2767 @result{} [ 3, 2 ]\n\
2769 [nr, nc] = size ([1, 2; 3, 4; 5, 6])\n\
2775 If given a second argument, @code{size} will return the size of the\n\
2776 corresponding dimension. For example,\n\
2780 size ([1, 2; 3, 4; 5, 6], 2)\n\
2786 returns the number of columns in the given matrix.\n\
2787 @seealso{numel, ndims, length, rows, columns}\n\
2792 int nargin = args.
length ();
2796 const dim_vector dimensions = args(0).dims ();
2802 for (
int i = 0; i < nargout; i++)
2803 retval(i) = rdims(i);
2807 int ndims = dimensions.
length ();
2811 for (
int i = 0; i < ndims; i++)
2812 m(i) = dimensions(i);
2817 else if (nargin == 2 && nargout < 2)
2822 error (
"size: DIM must be a scalar");
2830 retval(0) = dv(nd-1);
2835 error (
"size: requested dimension DIM (= %d) out of range", nd);
2844 DEFUN (size_equal, args, ,
2846 @deftypefn {Built-in Function} {} size_equal (@var{a}, @var{b}, @dots{})\n\
2847 Return true if the dimensions of all arguments agree.\n\
2849 Trailing singleton dimensions are ignored.\n\
2850 When called with a single or no argument @code{size_equal} returns true.\n\
2851 @seealso{size, numel, ndims}\n\
2856 int nargin = args.
length ();
2864 for (
int i = 1; i < nargin; ++i)
2868 if (a_dims != b_dims)
2881 @deftypefn {Built-in Function} {@var{n} =} nnz (@var{a})\n\
2882 Return the number of nonzero elements in @var{a}.\n\
2883 @seealso{nzmax, nonzeros, find}\n\
2888 if (args.length () == 1)
2889 retval = args(0).
nnz ();
2896 DEFUN (nzmax, args, ,
2898 @deftypefn {Built-in Function} {@var{n} =} nzmax (@var{SM})\n\
2899 Return the amount of storage allocated to the sparse matrix @var{SM}.\n\
2901 Note that Octave tends to crop unused memory at the first opportunity\n\
2902 for sparse objects. Thus, in general the value of @code{nzmax} will be the\n\
2903 same as @code{nnz} except for some cases of user-created sparse objects.\n\
2904 @seealso{nnz, spalloc, sparse}\n\
2909 if (args.length () == 1)
2910 retval = args(0).
nzmax ();
2917 DEFUN (rows, args, ,
2919 @deftypefn {Built-in Function} {} rows (@var{a})\n\
2920 Return the number of rows of @var{a}.\n\
2921 @seealso{columns, size, length, numel, isscalar, isvector, ismatrix}\n\
2926 if (args.length () == 1)
2927 retval = args(0).
rows ();
2934 DEFUN (columns, args, ,
2936 @deftypefn {Built-in Function} {} columns (@var{a})\n\
2937 Return the number of columns of @var{a}.\n\
2938 @seealso{rows, size, length, numel, isscalar, isvector, ismatrix}\n\
2943 if (args.length () == 1)
2953 @deftypefn {Built-in Function} {} sum (@var{x})\n\
2954 @deftypefnx {Built-in Function} {} sum (@var{x}, @var{dim})\n\
2955 @deftypefnx {Built-in Function} {} sum (@dots{}, \"native\")\n\
2956 @deftypefnx {Built-in Function} {} sum (@dots{}, \"double\")\n\
2957 @deftypefnx {Built-in Function} {} sum (@dots{}, \"extra\")\n\
2958 Sum of elements along dimension @var{dim}.\n\
2960 If @var{dim} is omitted, it defaults to the first non-singleton dimension.\n\
2962 The optional @qcode{\"type\"} input determines the class of the variable\n\
2963 used for calculations. If the argument @qcode{\"native\"} is given, then\n\
2964 the operation is performed in the same type as the original argument, rather\n\
2965 than the default double type.\n\
2971 sum ([true, true])\n\
2973 sum ([true, true], \"native\")\n\
2978 On the contrary, if @qcode{\"double\"} is given, the sum is performed in\n\
2979 double precision even for single precision inputs.\n\
2981 For double precision inputs, the @qcode{\"extra\"} option will use a more\n\
2982 accurate algorithm than straightforward summation. For single precision\n\
2983 inputs, @qcode{\"extra\"} is the same as @qcode{\"double\"}. Otherwise,\n\
2984 @qcode{\"extra\"} has no effect.\n\
2985 @seealso{cumsum, sumsq, prod}\n\
2990 int nargin = args.
length ();
2992 bool isnative =
false;
2993 bool isdouble =
false;
2994 bool isextra =
false;
2996 if (nargin > 1 && args(nargin - 1).is_string ())
2998 std::string str = args(nargin - 1).string_value ();
3000 if (str ==
"native")
3002 else if (str ==
"double")
3004 else if (str ==
"extra")
3007 error (
"sum: unrecognized type argument '%s'", str.c_str ());
3014 if (nargin == 1 || nargin == 2)
3021 dim = args(1).int_value () - 1;
3023 error (
"sum: invalid dimension DIM = %d", dim + 1);
3034 warning (
"sum: 'extra' not yet implemented for sparse matrices");
3046 warning (
"sum: 'extra' not yet implemented for sparse matrices");
3055 if (isdouble || isextra)
3061 if (isdouble || isextra)
3067 #define MAKE_INT_BRANCH(X) \
3070 retval = arg.X ## _array_value ().sum (dim); \
3072 retval = arg.X ## _array_value ().dsum (dim); \
3082 #undef MAKE_INT_BRANCH
3181 DEFUN (sumsq, args, ,
3183 @deftypefn {Built-in Function} {} sumsq (@var{x})\n\
3184 @deftypefnx {Built-in Function} {} sumsq (@var{x}, @var{dim})\n\
3185 Sum of squares of elements along dimension @var{dim}.\n\
3187 If @var{dim} is omitted, it defaults to the first non-singleton dimension.\n\
3189 This function is conceptually equivalent to computing\n\
3192 sum (x .* conj (x), dim)\n\
3196 but it uses less memory and avoids calling @code{conj} if @var{x} is real.\n\
3197 @seealso{sum, prod}\n\
3221 DEFUN (islogical, args, ,
3223 @deftypefn {Built-in Function} {} islogical (@var{x})\n\
3224 @deftypefnx {Built-in Function} {} isbool (@var{x})\n\
3225 Return true if @var{x} is a logical object.\n\
3226 @seealso{isfloat, isinteger, ischar, isnumeric, isa}\n\
3231 if (args.length () == 1)
3255 DEFUN (isinteger, args, ,
3257 @deftypefn {Built-in Function} {} isinteger (@var{x})\n\
3258 Return true if @var{x} is an integer object (int8, uint8, int16, etc.).\n\
3260 Note that @w{@code{isinteger (14)}} is false because numeric constants in\n\
3261 Octave are double precision floating point values.\n\
3262 @seealso{isfloat, ischar, islogical, isnumeric, isa}\n\
3267 if (args.length () == 1)
3275 DEFUN (iscomplex, args, ,
3277 @deftypefn {Built-in Function} {} iscomplex (@var{x})\n\
3278 Return true if @var{x} is a complex-valued numeric object.\n\
3279 @seealso{isreal, isnumeric, islogical, ischar, isfloat, isa}\n\
3284 if (args.length () == 1)
3292 DEFUN (isfloat, args, ,
3294 @deftypefn {Built-in Function} {} isfloat (@var{x})\n\
3295 Return true if @var{x} is a floating-point numeric object.\n\
3297 Objects of class double or single are floating-point objects.\n\
3298 @seealso{isinteger, ischar, islogical, isnumeric, isa}\n\
3303 if (args.length () == 1)
3314 DEFUN (complex, args, ,
3316 @deftypefn {Built-in Function} {} complex (@var{x})\n\
3317 @deftypefnx {Built-in Function} {} complex (@var{re}, @var{im})\n\
3318 Return a complex value from real arguments.\n\
3320 With 1 real argument @var{x}, return the complex result @code{@var{x} + 0i}.\n\
3322 With 2 real arguments, return the complex result @code{@var{re} + @var{im}}.\n\
3323 @code{complex} can often be more convenient than expressions such as\n\
3329 complex ([1, 2], [3, 4])\n\
3330 @result{} [ 1 + 3i 2 + 4i ]\n\
3333 @seealso{real, imag, iscomplex, abs, arg}\n\
3338 int nargin = args.
length ();
3357 if (arg.
numel () == 1)
3374 if (arg.
numel () == 1)
3391 error (
"complex: invalid conversion");
3394 else if (nargin == 2)
3406 if (re.
numel () == 1)
3409 if (re_val.
nnz () == 0)
3421 i < im_val.
cidx (j + 1); i++)
3422 result.
data (im_val.
ridx (i) + off) +=
3428 else if (im.
numel () == 1)
3431 if (im_val.
nnz () == 0)
3444 i < re_val.
cidx (j + 1); i++)
3445 result.
data (re_val.
ridx (i) + off) +=
3453 if (re_val.
dims () == im_val.
dims ())
3462 error (
"complex: dimension mismatch");
3468 if (re.
numel () == 1)
3472 if (im.
numel () == 1)
3502 if (im.
numel () == 1)
3524 if (re_val.
dims () == im_val.
dims ())
3537 error (
"complex: dimension mismatch");
3542 else if (re.
numel () == 1)
3546 if (im.
numel () == 1)
3573 if (im.
numel () == 1)
3593 if (re_val.
dims () == im_val.
dims ())
3604 error (
"complex: dimension mismatch");
3610 error (
"complex: invalid conversion");
3618 DEFUN (isreal, args, ,
3620 @deftypefn {Built-in Function} {} isreal (@var{x})\n\
3621 Return true if @var{x} is a non-complex matrix or scalar.\n\
3623 For compatibility with @sc{matlab}, this includes logical and character\n\
3625 @seealso{iscomplex, isnumeric, isa}\n\
3630 if (args.length () == 1)
3638 DEFUN (isempty, args, ,
3640 @deftypefn {Built-in Function} {} isempty (@var{a})\n\
3641 Return true if @var{a} is an empty matrix (any one of its dimensions is\n\
3643 @seealso{isnull, isa}\n\
3648 if (args.length () == 1)
3661 DEFUN (isnumeric, args, ,
3663 @deftypefn {Built-in Function} {} isnumeric (@var{x})\n\
3664 Return true if @var{x} is a numeric object, i.e., an integer, real, or\n\
3667 Logical and character arrays are not considered to be numeric.\n\
3668 @seealso{isinteger, isfloat, isreal, iscomplex, islogical, ischar, iscell, isstruct, isa}\n\
3673 if (args.length () == 1)
3697 DEFUN (isscalar, args, ,
3699 @deftypefn {Built-in Function} {} isscalar (@var{x})\n\
3700 Return true if @var{x} is a scalar.\n\
3701 @seealso{isvector, ismatrix}\n\
3706 if (args.length () == 1)
3707 retval = args(0).
numel () == 1;
3735 @deftypefn {Function File} {} isvector (@var{x})\n\
3736 Return true if @var{x} is a vector.\n\
3738 A vector is a 2-D array where one of the dimensions is equal to 1. As a\n\
3739 consequence a 1x1 array, or scalar, is also a vector.\n\
3740 @seealso{isscalar, ismatrix, size, rows, columns, length}\n\
3745 if (args.length () == 1)
3748 retval = sz.
length () == 2 && (sz(0) == 1 || sz(1) == 1);
3776 DEFUN (isrow, args, ,
3778 @deftypefn {Function File} {} isrow (@var{x})\n\
3779 Return true if @var{x} is a row vector 1xN with non-negative N.\n\
3780 @seealso{iscolumn, isscalar, isvector, ismatrix}\n\
3785 if (args.length () == 1)
3788 retval = sz.
length () == 2 && sz(0) == 1;
3826 DEFUN (iscolumn, args, ,
3828 @deftypefn {Function File} {} iscolumn (@var{x})\n\
3829 Return true if @var{x} is a column vector Nx1 with non-negative N.\n\
3830 @seealso{isrow, isscalar, isvector, ismatrix}\n\
3835 if (args.length () == 1)
3838 retval = sz.
length () == 2 && sz(1) == 1;
3875 DEFUN (ismatrix, args, ,
3877 @deftypefn {Built-in Function} {} ismatrix (@var{a})\n\
3878 Return true if @var{a} is a 2-D array.\n\
3879 @seealso{isscalar, isvector, iscell, isstruct, issparse, isa}\n\
3884 if (args.length () == 1)
3887 retval = (sz.
length () == 2) && (sz(0) >= 0) && (sz(1) >= 0);
3923 DEFUN (issquare, args, ,
3925 @deftypefn {Function File} {} issquare (@var{x})\n\
3926 Return true if @var{x} is a square matrix.\n\
3927 @seealso{isscalar, isvector, ismatrix, size}\n\
3932 if (args.length () == 1)
3935 retval = sz.
length () == 2 && sz(0) == sz(1);
3969 int nargin = args.
length ();
3975 if (nargin > 0 && args(nargin-1).is_string ())
3977 std::string nm = args(nargin-1).string_value ();
3999 for (
int i = 0; i < nargin; i++)
4001 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
4005 error (
"%s: expecting scalar integer arguments", fcn);
4068 if (val == 1 && dims.
length () == 2 && dims (0) == 1)
4069 retval =
Range (1.0, 0.0, dims (1));
4080 error (
"%s: invalid class name", fcn);
4095 int nargin = args.
length ();
4101 if (nargin > 0 && args(nargin-1).is_string ())
4103 std::string nm = args(nargin-1).string_value ();
4125 for (
int i = 0; i < nargin; i++)
4127 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
4131 error (
"%s: expecting scalar integer arguments", fcn);
4161 error (
"%s: invalid class name", fcn);
4175 int nargin = args.
length ();
4181 if (nargin > 0 && args(nargin-1).is_string ())
4183 std::string nm = args(nargin-1).string_value ();
4205 for (
int i = 0; i < nargin; i++)
4207 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
4211 error (
"%s: expecting scalar integer arguments", fcn);
4233 retval =
FloatNDArray (dims, static_cast<float> (val));
4241 error (
"%s: invalid class name", fcn);
4256 int nargin = args.
length ();
4262 if (nargin > 0 && args(nargin-1).is_string ())
4264 std::string nm = args(nargin-1).string_value ();
4286 for (
int i = 0; i < nargin; i++)
4288 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
4292 error (
"%s: expecting scalar integer arguments", fcn);
4315 static_cast<FloatComplex> (val));
4323 error (
"%s: invalid class name", fcn);
4337 int nargin = args.
length ();
4352 dims.resize (nargin);
4354 for (
int i = 0; i < nargin; i++)
4356 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
4360 error (
"%s: expecting scalar integer arguments", fcn);
4370 dims.chop_trailing_singletons ();
4384 DEFUN (ones, args, ,
4386 @deftypefn {Built-in Function} {} ones (@var{n})\n\
4387 @deftypefnx {Built-in Function} {} ones (@var{m}, @var{n})\n\
4388 @deftypefnx {Built-in Function} {} ones (@var{m}, @var{n}, @var{k}, @dots{})\n\
4389 @deftypefnx {Built-in Function} {} ones ([@var{m} @var{n} @dots{}])\n\
4390 @deftypefnx {Built-in Function} {} ones (@dots{}, @var{class})\n\
4391 Return a matrix or N-dimensional array whose elements are all 1.\n\
4393 If invoked with a single scalar integer argument @var{n}, return a square\n\
4394 @nospell{NxN} matrix.\n\
4396 If invoked with two or more scalar integer arguments, or a vector of integer\n\
4397 values, return an array with the given dimensions.\n\
4399 To create a constant matrix whose values are all the same use an expression\n\
4403 val_matrix = val * ones (m, n)\n\
4406 The optional argument @var{class} specifies the class of the return array\n\
4407 and defaults to double. For example:\n\
4410 val = ones (m,n, \"uint8\")\n\
4435 DEFUN (zeros, args, ,
4437 @deftypefn {Built-in Function} {} zeros (@var{n})\n\
4438 @deftypefnx {Built-in Function} {} zeros (@var{m}, @var{n})\n\
4439 @deftypefnx {Built-in Function} {} zeros (@var{m}, @var{n}, @var{k}, @dots{})\n\
4440 @deftypefnx {Built-in Function} {} zeros ([@var{m} @var{n} @dots{}])\n\
4441 @deftypefnx {Built-in Function} {} zeros (@dots{}, @var{class})\n\
4442 Return a matrix or N-dimensional array whose elements are all 0.\n\
4444 If invoked with a single scalar integer argument, return a square\n\
4445 @nospell{NxN} matrix.\n\
4447 If invoked with two or more scalar integer arguments, or a vector of integer\n\
4448 values, return an array with the given dimensions.\n\
4450 The optional argument @var{class} specifies the class of the return array\n\
4451 and defaults to double. For example:\n\
4454 val = zeros (m,n, \"uint8\")\n\
4481 @c List other form of function in documentation index\n\
4484 @deftypefn {Built-in Function} {} Inf\n\
4485 @deftypefnx {Built-in Function} {} Inf (@var{n})\n\
4486 @deftypefnx {Built-in Function} {} Inf (@var{n}, @var{m})\n\
4487 @deftypefnx {Built-in Function} {} Inf (@var{n}, @var{m}, @var{k}, @dots{})\n\
4488 @deftypefnx {Built-in Function} {} Inf (@dots{}, @var{class})\n\
4489 Return a scalar, matrix or N-dimensional array whose elements are all equal\n\
4490 to the IEEE representation for positive infinity.\n\
4492 Infinity is produced when results are too large to be represented using the\n\
4493 IEEE floating point format for numbers. Two common examples which produce\n\
4494 infinity are division by zero and overflow.\n\
4499 @result{} Inf Inf\n\
4503 When called with no arguments, return a scalar with the value @samp{Inf}.\n\
4505 When called with a single argument, return a square matrix with the dimension\n\
4508 When called with more than one scalar argument the first two arguments are\n\
4509 taken as the number of rows and columns and any further arguments specify\n\
4510 additional matrix dimensions.\n\
4512 The optional argument @var{class} specifies the return type and may be\n\
4513 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4514 @seealso{isinf, NaN}\n\
4542 @c List other form of function in documentation index\n\
4545 @deftypefn {Built-in Function} {} NaN\n\
4546 @deftypefnx {Built-in Function} {} NaN (@var{n})\n\
4547 @deftypefnx {Built-in Function} {} NaN (@var{n}, @var{m})\n\
4548 @deftypefnx {Built-in Function} {} NaN (@var{n}, @var{m}, @var{k}, @dots{})\n\
4549 @deftypefnx {Built-in Function} {} NaN (@dots{}, @var{class})\n\
4550 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4551 to the IEEE symbol NaN (Not a Number).\n\
4553 NaN is the result of operations which do not produce a well defined numerical\n\
4554 result. Common operations which produce a NaN are arithmetic with infinity\n\
4556 ($\\infty - \\infty$), zero divided by zero ($0/0$),\n\
4559 (Inf - Inf), zero divided by zero (0/0),\n\
4561 and any operation involving another NaN value (5 + NaN).\n\
4563 Note that NaN always compares not equal to NaN (NaN != NaN). This behavior\n\
4564 is specified by the IEEE standard for floating point arithmetic. To find\n\
4565 NaN values, use the @code{isnan} function.\n\
4567 When called with no arguments, return a scalar with the value @samp{NaN}.\n\
4569 When called with a single argument, return a square matrix with the dimension\n\
4572 When called with more than one scalar argument the first two arguments are\n\
4573 taken as the number of rows and columns and any further arguments specify\n\
4574 additional matrix dimensions.\n\
4577 The optional argument @var{class} specifies the return type and may be\n\
4578 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4579 @seealso{isnan, Inf}\n\
4607 @deftypefn {Built-in Function} {} e\n\
4608 @deftypefnx {Built-in Function} {} e (@var{n})\n\
4609 @deftypefnx {Built-in Function} {} e (@var{n}, @var{m})\n\
4610 @deftypefnx {Built-in Function} {} e (@var{n}, @var{m}, @var{k}, @dots{})\n\
4611 @deftypefnx {Built-in Function} {} e (@dots{}, @var{class})\n\
4612 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4613 to the base of natural logarithms.\n\
4617 $e$ satisfies the equation $\\log (e) = 1$.\n\
4620 @samp{e} satisfies the equation @code{log} (e) = 1.\n\
4623 When called with no arguments, return a scalar with the value @math{e}.\n\
4625 When called with a single argument, return a square matrix with the dimension\n\
4628 When called with more than one scalar argument the first two arguments are\n\
4629 taken as the number of rows and columns and any further arguments specify\n\
4630 additional matrix dimensions.\n\
4632 The optional argument @var{class} specifies the return type and may be\n\
4633 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4634 @seealso{log, exp, pi, I}\n\
4640 double e_val = exp (1.0);
4648 @deftypefn {Built-in Function} {} eps\n\
4649 @deftypefnx {Built-in Function} {} eps (@var{x})\n\
4650 @deftypefnx {Built-in Function} {} eps (@var{n}, @var{m})\n\
4651 @deftypefnx {Built-in Function} {} eps (@var{n}, @var{m}, @var{k}, @dots{})\n\
4652 @deftypefnx {Built-in Function} {} eps (@dots{}, @var{class})\n\
4653 Return a scalar, matrix or N-dimensional array whose elements are all eps,\n\
4654 the machine precision.\n\
4656 More precisely, @code{eps} is the relative spacing between any two adjacent\n\
4657 numbers in the machine's floating point system. This number is obviously\n\
4658 system dependent. On machines that support IEEE floating point arithmetic,\n\
4659 @code{eps} is approximately\n\
4661 $2.2204\\times10^{-16}$ for double precision and $1.1921\\times10^{-7}$\n\
4664 2.2204e-16 for double precision and 1.1921e-07\n\
4666 for single precision.\n\
4668 When called with no arguments, return a scalar with the value\n\
4669 @code{eps (1.0)}.\n\
4671 Given a single argument @var{x}, return the distance between @var{x} and the\n\
4672 next largest value.\n\
4674 When called with more than one argument the first two arguments are taken as\n\
4675 the number of rows and columns and any further arguments specify additional\n\
4676 matrix dimensions. The optional argument @var{class} specifies the return\n\
4677 type and may be either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4678 @seealso{realmax, realmin, intmax, bitmax}\n\
4681 int nargin = args.
length ();
4684 if (nargin == 1 && ! args(0).is_string ())
4686 if (args(0).is_single_type ())
4696 float val = ::fabsf (
x(i));
4700 epsval(i) =
powf (2.0, -149e0);
4704 gnulib::frexpf (val, &expon);
4706 static_cast<float> (expon - 24));
4722 double val = ::fabs (
x(i));
4726 epsval(i) =
pow (2.0, -1074e0);
4730 gnulib::frexp (val, &expon);
4732 static_cast<double> (expon - 53));
4740 retval =
fill_matrix (args, std::numeric_limits<double>::epsilon (),
4741 std::numeric_limits<float>::epsilon (),
"eps");
4774 @deftypefn {Built-in Function} {} pi\n\
4775 @deftypefnx {Built-in Function} {} pi (@var{n})\n\
4776 @deftypefnx {Built-in Function} {} pi (@var{n}, @var{m})\n\
4777 @deftypefnx {Built-in Function} {} pi (@var{n}, @var{m}, @var{k}, @dots{})\n\
4778 @deftypefnx {Built-in Function} {} pi (@dots{}, @var{class})\n\
4779 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4780 to the ratio of the circumference of a circle to its\n\
4782 diameter($\\pi$).\n\
4788 Internally, @code{pi} is computed as @samp{4.0 * atan (1.0)}.\n\
4790 When called with no arguments, return a scalar with the value of\n\
4798 When called with a single argument, return a square matrix with the dimension\n\
4801 When called with more than one scalar argument the first two arguments are\n\
4802 taken as the number of rows and columns and any further arguments specify\n\
4803 additional matrix dimensions.\n\
4805 The optional argument @var{class} specifies the return type and may be\n\
4806 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4811 double pi_val = M_PI;
4813 double pi_val = 4.0 *
atan (1.0);
4819 DEFUN (realmax, args, ,
4821 @deftypefn {Built-in Function} {} realmax\n\
4822 @deftypefnx {Built-in Function} {} realmax (@var{n})\n\
4823 @deftypefnx {Built-in Function} {} realmax (@var{n}, @var{m})\n\
4824 @deftypefnx {Built-in Function} {} realmax (@var{n}, @var{m}, @var{k}, @dots{})\n\
4825 @deftypefnx {Built-in Function} {} realmax (@dots{}, @var{class})\n\
4826 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4827 to the largest floating point number that is representable.\n\
4829 The actual value is system dependent. On machines that support IEEE\n\
4830 floating point arithmetic, @code{realmax} is approximately\n\
4832 $1.7977\\times10^{308}$ for double precision and $3.4028\\times10^{38}$\n\
4835 1.7977e+308 for double precision and 3.4028e+38\n\
4837 for single precision.\n\
4839 When called with no arguments, return a scalar with the value\n\
4840 @code{realmax (@qcode{\"double\"})}.\n\
4842 When called with a single argument, return a square matrix with the dimension\n\
4845 When called with more than one scalar argument the first two arguments are\n\
4846 taken as the number of rows and columns and any further arguments specify\n\
4847 additional matrix dimensions.\n\
4849 The optional argument @var{class} specifies the return type and may be\n\
4850 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4851 @seealso{realmin, intmax, bitmax, eps}\n\
4858 DEFUN (realmin, args, ,
4860 @deftypefn {Built-in Function} {} realmin\n\
4861 @deftypefnx {Built-in Function} {} realmin (@var{n})\n\
4862 @deftypefnx {Built-in Function} {} realmin (@var{n}, @var{m})\n\
4863 @deftypefnx {Built-in Function} {} realmin (@var{n}, @var{m}, @var{k}, @dots{})\n\
4864 @deftypefnx {Built-in Function} {} realmin (@dots{}, @var{class})\n\
4865 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4866 to the smallest normalized floating point number that is representable.\n\
4868 The actual value is system dependent. On machines that support\n\
4869 IEEE floating point arithmetic, @code{realmin} is approximately\n\
4871 $2.2251\\times10^{-308}$ for double precision and $1.1755\\times10^{-38}$\n\
4874 2.2251e-308 for double precision and 1.1755e-38\n\
4876 for single precision.\n\
4878 When called with no arguments, return a scalar with the value\n\
4879 @code{realmin (@qcode{\"double\"})}.\n\
4881 When called with a single argument, return a square matrix with the dimension\n\
4884 When called with more than one scalar argument the first two arguments are\n\
4885 taken as the number of rows and columns and any further arguments specify\n\
4886 additional matrix dimensions.\n\
4888 The optional argument @var{class} specifies the return type and may be\n\
4889 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4890 @seealso{realmax, intmin, eps}\n\
4899 @c List other forms of function in documentation index\n\
4904 @deftypefn {Built-in Function} {} I\n\
4905 @deftypefnx {Built-in Function} {} I (@var{n})\n\
4906 @deftypefnx {Built-in Function} {} I (@var{n}, @var{m})\n\
4907 @deftypefnx {Built-in Function} {} I (@var{n}, @var{m}, @var{k}, @dots{})\n\
4908 @deftypefnx {Built-in Function} {} I (@dots{}, @var{class})\n\
4909 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4910 to the pure imaginary unit, defined as\n\
4915 @code{sqrt (-1)}.\n\
4918 I, and its equivalents i, j, and J, are functions so any of the names may\n\
4919 be reused for other purposes (such as i for a counter variable).\n\
4921 When called with no arguments, return a scalar with the value @math{i}.\n\
4923 When called with a single argument, return a square matrix with the dimension\n\
4926 When called with more than one scalar argument the first two arguments are\n\
4927 taken as the number of rows and columns and any further arguments specify\n\
4928 additional matrix dimensions.\n\
4930 The optional argument @var{class} specifies the return type and may be\n\
4931 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4932 @seealso{e, pi, log, exp}\n\
4944 @deftypefn {Built-in Function} {} NA\n\
4945 @deftypefnx {Built-in Function} {} NA (@var{n})\n\
4946 @deftypefnx {Built-in Function} {} NA (@var{n}, @var{m})\n\
4947 @deftypefnx {Built-in Function} {} NA (@var{n}, @var{m}, @var{k}, @dots{})\n\
4948 @deftypefnx {Built-in Function} {} NA (@dots{}, @var{class})\n\
4949 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4950 to the special constant used to designate missing values.\n\
4952 Note that NA always compares not equal to NA (NA != NA).\n\
4953 To find NA values, use the @code{isna} function.\n\
4955 When called with no arguments, return a scalar with the value @samp{NA}.\n\
4957 When called with a single argument, return a square matrix with the dimension\n\
4960 When called with more than one scalar argument the first two arguments are\n\
4961 taken as the number of rows and columns and any further arguments specify\n\
4962 additional matrix dimensions.\n\
4964 The optional argument @var{class} specifies the return type and may be\n\
4965 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4978 DEFUN (
false, args, ,
4980 @deftypefn {Built-in Function} {} false (@var{x})\n\
4981 @deftypefnx {Built-in Function} {} false (@var{n}, @var{m})\n\
4982 @deftypefnx {Built-in Function} {} false (@var{n}, @var{m}, @var{k}, @dots{})\n\
4983 Return a matrix or N-dimensional array whose elements are all logical 0.\n\
4985 If invoked with a single scalar integer argument, return a square\n\
4986 matrix of the specified size.\n\
4988 If invoked with two or more scalar integer arguments, or a vector of integer\n\
4989 values, return an array with given dimensions.\n\
4996 DEFUN (
true, args, ,
4998 @deftypefn {Built-in Function} {} true (@var{x})\n\
4999 @deftypefnx {Built-in Function} {} true (@var{n}, @var{m})\n\
5000 @deftypefnx {Built-in Function} {} true (@var{n}, @var{m}, @var{k}, @dots{})\n\
5001 Return a matrix or N-dimensional array whose elements are all logical 1.\n\
5003 If invoked with a single scalar integer argument, return a square\n\
5004 matrix of the specified size.\n\
5006 If invoked with two or more scalar integer arguments, or a vector of integer\n\
5007 values, return an array with given dimensions.\n\
5020 typename MT::element_type one (1);
5022 if (nr == 1 && nc == 1)
5028 typename MT::element_type zero (0);
5032 if (nr > 0 && nc > 0)
5036 for (
int i = 0; i < n; i++)
5046 #define INSTANTIATE_EYE(T) \
5047 template octave_value identity_matrix<T> (int, int)
5119 error (
"eye: invalid class name");
5127 #undef INT_EYE_MATRIX
5131 @deftypefn {Built-in Function} {} eye (@var{n})\n\
5132 @deftypefnx {Built-in Function} {} eye (@var{m}, @var{n})\n\
5133 @deftypefnx {Built-in Function} {} eye ([@var{m} @var{n}])\n\
5134 @deftypefnx {Built-in Function} {} eye (@dots{}, @var{class})\n\
5135 Return an identity matrix.\n\
5137 If invoked with a single scalar argument @var{n}, return a square\n\
5138 @nospell{NxN} identity matrix.\n\
5140 If supplied two scalar arguments (@var{m}, @var{n}), @code{eye} takes them\n\
5141 to be the number of rows and columns. If given a vector with two elements,\n\
5142 @code{eye} uses the values of the elements as the number of rows and\n\
5143 columns, respectively. For example:\n\
5154 The following expressions all produce the same result:\n\
5162 eye (size ([1, 2; 3, 4]))\n\
5166 The optional argument @var{class}, allows @code{eye} to return an array of\n\
5167 the specified type, like\n\
5170 val = zeros (n,m, \"uint8\")\n\
5173 Calling @code{eye} with no arguments is equivalent to calling it with an\n\
5174 argument of 1. Any negative dimensions are treated as zero. These odd\n\
5175 definitions are for compatibility with @sc{matlab}.\n\
5176 @seealso{speye, ones, zeros}\n\
5181 int nargin = args.
length ();
5187 if (nargin > 0 && args(nargin-1).is_string ())
5189 std::string nm = args(nargin-1).string_value ();
5250 typedef typename MT::column_vector_type CVT;
5251 typedef typename MT::element_type T;
5257 T bs = octave_value_extract<T> (base);
5260 T ls = octave_value_extract<T> (limit);
5265 CVT lv = octave_value_extract<CVT> (limit);
5266 CVT bv (lv.length (), bs);
5272 CVT bv = octave_value_extract<CVT> (base);
5275 T ls = octave_value_extract<T> (limit);
5276 CVT lv (bv.length (), ls);
5281 CVT lv = octave_value_extract<CVT> (limit);
5291 @deftypefn {Built-in Function} {} linspace (@var{base}, @var{limit})\n\
5292 @deftypefnx {Built-in Function} {} linspace (@var{base}, @var{limit}, @var{n})\n\
5293 Return a row vector with @var{n} linearly spaced elements between\n\
5294 @var{base} and @var{limit}.\n\
5296 If the number of elements is greater than one, then the endpoints @var{base}\n\
5297 and @var{limit} are always included in the range. If @var{base} is greater\n\
5298 than @var{limit}, the elements are stored in decreasing order. If the\n\
5299 number of points is not specified, a value of 100 is used.\n\
5301 The @code{linspace} function always returns a row vector if both @var{base}\n\
5302 and @var{limit} are scalars. If one, or both, of them are column vectors,\n\
5303 @code{linspace} returns a matrix.\n\
5305 For compatibility with @sc{matlab}, return the second argument (@var{limit})\n\
5306 if fewer than two values are requested.\n\
5307 @seealso{logspace}\n\
5312 int nargin = args.
length ();
5316 if (nargin != 2 && nargin != 3)
5343 retval = do_linspace<FloatComplexMatrix> (arg_1, arg_2, npoints);
5345 retval = do_linspace<FloatMatrix> (arg_1, arg_2, npoints);
5351 retval = do_linspace<ComplexMatrix> (arg_1, arg_2, npoints);
5353 retval = do_linspace<Matrix> (arg_1, arg_2, npoints);
5357 error (
"linspace: N must be an integer");
5383 DEFUN (resize, args, ,
5385 @deftypefn {Built-in Function} {} resize (@var{x}, @var{m})\n\
5386 @deftypefnx {Built-in Function} {} resize (@var{x}, @var{m}, @var{n}, @dots{})\n\
5387 @deftypefnx {Built-in Function} {} resize (@var{x}, [@var{m} @var{n} @dots{}])\n\
5388 Resize @var{x} cutting off elements as necessary.\n\
5390 In the result, element with certain indices is equal to the corresponding\n\
5391 element of @var{x} if the indices are within the bounds of @var{x};\n\
5392 otherwise, the element is set to zero.\n\
5394 In other words, the statement\n\
5397 y = resize (x, dv)\n\
5401 is equivalent to the following code:\n\
5405 y = zeros (dv, class (x));\n\
5406 sz = min (dv, size (x));\n\
5407 for i = 1:length (sz)\n\
5408 idx@{i@} = 1:sz(i);\n\
5410 y(idx@{:@}) = x(idx@{:@});\n\
5415 but is performed more efficiently.\n\
5417 If only @var{m} is supplied, and it is a scalar, the dimension of the\n\
5418 result is @var{m}-by-@var{m}.\n\
5419 If @var{m}, @var{n}, @dots{} are all scalars, then the dimensions of\n\
5420 the result are @var{m}-by-@var{n}-by-@dots{}.\n\
5421 If given a vector as input, then the\n\
5422 dimensions of the result are given by the elements of that vector.\n\
5424 An object can be resized to more dimensions than it has;\n\
5425 in such case the missing dimensions are assumed to be 1.\n\
5426 Resizing an object to fewer dimensions is not possible.\n\
5427 @seealso{reshape, postpad, prepad, cat}\n\
5431 int nargin = args.
length ();
5436 int ndim = vec.
length ();
5447 for (
int i = 0; i < ndim; i++)
5450 retval = retval.
resize (dv,
true);
5453 else if (nargin > 2)
5462 retval = retval.
resize (dv,
true);
5473 DEFUN (reshape, args, ,
5475 @deftypefn {Built-in Function} {} reshape (@var{A}, @var{m}, @var{n}, @dots{})\n\
5476 @deftypefnx {Built-in Function} {} reshape (@var{A}, [@var{m} @var{n} @dots{}])\n\
5477 @deftypefnx {Built-in Function} {} reshape (@var{A}, @dots{}, [], @dots{})\n\
5478 @deftypefnx {Built-in Function} {} reshape (@var{A}, @var{size})\n\
5479 Return a matrix with the specified dimensions (@var{m}, @var{n}, @dots{})\n\
5480 whose elements are taken from the matrix @var{A}.\n\
5482 The elements of the matrix are accessed in column-major order (like Fortran\n\
5483 arrays are stored).\n\
5485 The following code demonstrates reshaping a 1x4 row vector into a 2x2 square\n\
5490 reshape ([1, 2, 3, 4], 2, 2)\n\
5497 Note that the total number of elements in the original matrix\n\
5498 (@code{prod (size (@var{A}))}) must match the total number of elements\n\
5499 in the new matrix (@code{prod ([@var{m} @var{n} @dots{}])}).\n\
5501 A single dimension of the return matrix may be left unspecified and Octave\n\
5502 will determine its size automatically. An empty matrix ([]) is used to flag\n\
5503 the unspecified dimension.\n\
5504 @seealso{resize, vec, postpad, cat, squeeze}\n\
5509 int nargin = args.
length ();
5517 if (new_size.
length () < 2)
5519 error (
"reshape: SIZE must have 2 or more dimensions");
5527 if (new_size(i) < 0)
5529 error (
"reshape: SIZE must be non-negative");
5533 new_dims(i) = new_size(i);
5536 else if (nargin > 2)
5541 for (
int i = 1; i < nargin; i++)
5543 if (args(i).is_empty ())
5547 error (
"reshape: only a single dimension can be unknown");
5558 new_dims(i-1) = args(i).idx_type_value ();
5562 else if (new_dims(i-1) < 0)
5564 error (
"reshape: SIZE must be non-negative");
5575 new_dims(empty_dim-1) = 0;
5581 if (a_nel != size_empty_dim * nel)
5582 error (
"reshape: SIZE is not divisible by the product of known dimensions (= %d)",
5585 new_dims(empty_dim-1) = size_empty_dim;
5596 retval = args(0).reshape (new_dims);
5627 @deftypefn {Built-in Function} {@var{v} =} vec (@var{x})\n\
5628 @deftypefnx {Built-in Function} {@var{v} =} vec (@var{x}, @var{dim})\n\
5629 Return the vector obtained by stacking the columns of the matrix @var{x}\n\
5630 one above the other.\n\
5632 Without @var{dim} this is equivalent to @code{@var{x}(:)}.\n\
5634 If @var{dim} is supplied, the dimensions of @var{v} are set to @var{dim}\n\
5635 with all elements along the last dimension. This is equivalent to\n\
5636 @code{shiftdim (@var{x}(:), 1-@var{dim})}.\n\
5637 @seealso{vech, resize, cat}\n\
5643 int nargin = args.
length ();
5645 if (nargin < 1 || nargin > 2)
5650 dim = args(1).idx_type_value ();
5653 error (
"vec: DIM must be greater than zero");
5667 for (
int i = 0; i < dim-1; i++)
5670 new_dims(dim-1) = retval.
numel ();
5672 retval = retval.
reshape (new_dims);
5694 DEFUN (squeeze, args, ,
5696 @deftypefn {Built-in Function} {} squeeze (@var{x})\n\
5697 Remove singleton dimensions from @var{x} and return the result.\n\
5699 Note that for compatibility with @sc{matlab}, all objects have\n\
5700 a minimum of two dimensions and row vectors are left unchanged.\n\
5701 @seealso{reshape}\n\
5707 retval = args(0).squeeze ();
5714 DEFUN (full, args, ,
5716 @deftypefn {Built-in Function} {@var{FM} =} full (@var{SM})\n\
5717 Return a full storage matrix from a sparse, diagonal, or permutation matrix,\n\
5719 @seealso{sparse, issparse}\n\
5725 retval = args(0).full_value ();
5736 @deftypefn {Built-in Function} {} norm (@var{A})\n\
5737 @deftypefnx {Built-in Function} {} norm (@var{A}, @var{p})\n\
5738 @deftypefnx {Built-in Function} {} norm (@var{A}, @var{p}, @var{opt})\n\
5739 Compute the p-norm of the matrix @var{A}.\n\
5741 If the second argument is missing, @code{p = 2} is assumed.\n\
5743 If @var{A} is a matrix (or sparse matrix):\n\
5746 @item @var{p} = @code{1}\n\
5747 1-norm, the largest column sum of the absolute values of @var{A}.\n\
5749 @item @var{p} = @code{2}\n\
5750 Largest singular value of @var{A}.\n\
5752 @item @var{p} = @code{Inf} or @qcode{\"inf\"}\n\
5753 @cindex infinity norm\n\
5754 Infinity norm, the largest row sum of the absolute values of @var{A}.\n\
5756 @item @var{p} = @qcode{\"fro\"}\n\
5757 @cindex Frobenius norm\n\
5758 Frobenius norm of @var{A}, @code{sqrt (sum (diag (@var{A}' * @var{A})))}.\n\
5760 @item other @var{p}, @code{@var{p} > 1}\n\
5761 @cindex general p-norm\n\
5762 maximum @code{norm (A*x, p)} such that @code{norm (x, p) == 1}\n\
5765 If @var{A} is a vector or a scalar:\n\
5768 @item @var{p} = @code{Inf} or @qcode{\"inf\"}\n\
5769 @code{max (abs (@var{A}))}.\n\
5771 @item @var{p} = @code{-Inf}\n\
5772 @code{min (abs (@var{A}))}.\n\
5774 @item @var{p} = @qcode{\"fro\"}\n\
5775 Frobenius norm of @var{A}, @code{sqrt (sumsq (abs (A)))}.\n\
5777 @item @var{p} = 0\n\
5778 Hamming norm - the number of nonzero elements.\n\
5780 @item other @var{p}, @code{@var{p} > 1}\n\
5781 p-norm of @var{A}, @code{(sum (abs (@var{A}) .^ @var{p})) ^ (1/@var{p})}.\n\
5783 @item other @var{p} @code{@var{p} < 1}\n\
5784 the p-pseudonorm defined as above.\n\
5787 If @var{opt} is the value @qcode{\"rows\"}, treat each row as a vector and\n\
5788 compute its norm. The result is returned as a column vector.\n\
5789 Similarly, if @var{opt} is @qcode{\"columns\"} or @qcode{\"cols\"} then\n\
5790 compute the norms of each column and return a row vector.\n\
5791 @seealso{cond, svd}\n\
5796 int nargin = args.
length ();
5798 if (nargin >= 1 && nargin <= 3)
5802 if (x_arg.
ndims () == 2)
5804 enum { sfmatrix, sfcols, sfrows, sffrob, sfinf } strflag = sfmatrix;
5805 if (nargin > 1 && args(nargin-1).is_string ())
5807 std::string str = args(nargin-1).string_value ();
5808 if (str ==
"cols" || str ==
"columns")
5810 else if (str ==
"rows")
5812 else if (str ==
"fro")
5814 else if (str ==
"inf")
5817 error (
"norm: unrecognized option: %s", str.c_str ());
5831 if ((strflag == sfcols || strflag == sfrows))
5833 if (str ==
"cols" || str ==
"columns" || str ==
"rows")
5834 error (
"norm: invalid combination of options");
5835 else if (str ==
"fro")
5837 else if (str ==
"inf")
5840 error (
"norm: unrecognized option: %s", str.c_str ());
5843 error (
"norm: invalid combination of options");
5853 retval(0) =
xnorm (x_arg, p_arg);
5872 error (
"norm: only valid for 2-D objects");
5965 @deftypefn {Built-in Function} {@var{z} =} not (@var{x})\n\
5966 Return the logical NOT of @var{x}.\n\
5968 This function is equivalent to the operator syntax @w{@code{! x}}.\n\
5969 @seealso{and, or, xor}\n\
5975 DEFUN (uplus, args, ,
5977 @deftypefn {Built-in Function} {} uplus (@var{x})\n\
5978 This function and @w{@tcode{+ x}} are equivalent.\n\
5979 @seealso{uminus, plus, minus}\n\
5985 DEFUN (uminus, args, ,
5987 @deftypefn {Built-in Function} {} uminus (@var{x})\n\
5988 This function and @w{@tcode{- x}} are equivalent.\n\
5989 @seealso{uplus, minus}\n\
5997 @deftypefn {Built-in Function} {} transpose (@var{x})\n\
5998 Return the transpose of @var{x}.\n\
6000 This function and @tcode{x.'} are equivalent.\n\
6001 @seealso{ctranspose}\n\
6027 DEFUN (ctranspose, args, ,
6029 @deftypefn {Built-in Function} {} ctranspose (@var{x})\n\
6030 Return the complex conjugate transpose of @var{x}.\n\
6032 This function and @tcode{x'} are equivalent.\n\
6033 @seealso{transpose}\n\
6079 int nargin = args.
length ();
6094 for (
int i = 2; i < nargin; i++)
6095 retval.
assign (aop, args(i));
6102 DEFUN (plus, args, ,
6104 @deftypefn {Built-in Function} {} plus (@var{x}, @var{y})\n\
6105 @deftypefnx {Built-in Function} {} plus (@var{x1}, @var{x2}, @dots{})\n\
6106 This function and @w{@tcode{x + y}} are equivalent.\n\
6108 If more arguments are given, the summation is applied\n\
6109 cumulatively from left to right:\n\
6112 (@dots{}((x1 + x2) + x3) + @dots{})\n\
6115 At least one argument is required.\n\
6116 @seealso{minus, uplus}\n\
6123 DEFUN (minus, args, ,
6125 @deftypefn {Built-in Function} {} minus (@var{x}, @var{y})\n\
6126 This function and @w{@tcode{x - y}} are equivalent.\n\
6127 @seealso{plus, uminus}\n\
6133 DEFUN (mtimes, args, ,
6135 @deftypefn {Built-in Function} {} mtimes (@var{x}, @var{y})\n\
6136 @deftypefnx {Built-in Function} {} mtimes (@var{x1}, @var{x2}, @dots{})\n\
6137 Return the matrix multiplication product of inputs.\n\
6139 This function and @w{@tcode{x * y}} are equivalent.\n\
6140 If more arguments are given, the multiplication is applied\n\
6141 cumulatively from left to right:\n\
6144 (@dots{}((x1 * x2) * x3) * @dots{})\n\
6147 At least one argument is required.\n\
6148 @seealso{times, plus, minus, rdivide, mrdivide, mldivide, mpower}\n\
6155 DEFUN (mrdivide, args, ,
6157 @deftypefn {Built-in Function} {} mrdivide (@var{x}, @var{y})\n\
6158 Return the matrix right division of @var{x} and @var{y}.\n\
6160 This function and @w{@tcode{x / y}} are equivalent.\n\
6161 @seealso{mldivide, rdivide, plus, minus}\n\
6167 DEFUN (mpower, args, ,
6169 @deftypefn {Built-in Function} {} mpower (@var{x}, @var{y})\n\
6170 Return the matrix power operation of @var{x} raised to the @var{y} power.\n\
6172 This function and @w{@tcode{x ^ y}} are equivalent.\n\
6173 @seealso{power, mtimes, plus, minus}\n\
6179 DEFUN (mldivide, args, ,
6181 @deftypefn {Built-in Function} {} mldivide (@var{x}, @var{y})\n\
6182 Return the matrix left division of @var{x} and @var{y}.\n\
6184 This function and @w{@tcode{x @xbackslashchar{} y}} are equivalent.\n\
6185 @seealso{mrdivide, ldivide, rdivide}\n\
6193 @deftypefn {Built-in Function} {} lt (@var{x}, @var{y})\n\
6194 This function is equivalent to @w{@code{x < y}}.\n\
6195 @seealso{le, eq, ge, gt, ne}\n\
6203 @deftypefn {Built-in Function} {} le (@var{x}, @var{y})\n\
6204 This function is equivalent to @w{@code{x <= y}}.\n\
6205 @seealso{eq, ge, gt, ne, lt}\n\
6213 @deftypefn {Built-in Function} {} eq (@var{x}, @var{y})\n\
6214 Return true if the two inputs are equal.\n\
6216 This function is equivalent to @w{@code{x == y}}.\n\
6217 @seealso{ne, isequal, le, ge, gt, ne, lt}\n\
6225 @deftypefn {Built-in Function} {} ge (@var{x}, @var{y})\n\
6226 This function is equivalent to @w{@code{x >= y}}.\n\
6227 @seealso{le, eq, gt, ne, lt}\n\
6235 @deftypefn {Built-in Function} {} gt (@var{x}, @var{y})\n\
6236 This function is equivalent to @w{@code{x > y}}.\n\
6237 @seealso{le, eq, ge, ne, lt}\n\
6245 @deftypefn {Built-in Function} {} ne (@var{x}, @var{y})\n\
6246 Return true if the two inputs are not equal.\n\
6248 This function is equivalent to @w{@code{x != y}}.\n\
6249 @seealso{eq, isequal, le, ge, lt}\n\
6255 DEFUN (times, args, ,
6257 @deftypefn {Built-in Function} {} times (@var{x}, @var{y})\n\
6258 @deftypefnx {Built-in Function} {} times (@var{x1}, @var{x2}, @dots{})\n\
6259 Return the element-by-element multiplication product of inputs.\n\
6261 This function and @w{@tcode{x .* y}} are equivalent.\n\
6262 If more arguments are given, the multiplication is applied\n\
6263 cumulatively from left to right:\n\
6266 (@dots{}((x1 .* x2) .* x3) .* @dots{})\n\
6269 At least one argument is required.\n\
6270 @seealso{mtimes, rdivide}\n\
6277 DEFUN (rdivide, args, ,
6279 @deftypefn {Built-in Function} {} rdivide (@var{x}, @var{y})\n\
6280 Return the element-by-element right division of @var{x} and @var{y}.\n\
6282 This function and @w{@tcode{x ./ y}} are equivalent.\n\
6283 @seealso{ldivide, mrdivide, times, plus}\n\
6289 DEFUN (power, args, ,
6291 @deftypefn {Built-in Function} {} power (@var{x}, @var{y})\n\
6292 Return the element-by-element operation of @var{x} raised to the\n\
6295 This function and @w{@tcode{x .^ y}} are equivalent.\n\
6297 If several complex results are possible, returns the one with smallest\n\
6298 non-negative argument (angle). Use @code{realpow}, @code{realsqrt},\n\
6299 @code{cbrt}, or @code{nthroot} if a real result is preferred.\n\
6301 @seealso{mpower, realpow, realsqrt, cbrt, nthroot}\n\
6307 DEFUN (ldivide, args, ,
6309 @deftypefn {Built-in Function} {} ldivide (@var{x}, @var{y})\n\
6310 Return the element-by-element left division of @var{x} and @var{y}.\n\
6312 This function and @w{@tcode{x .@xbackslashchar{} y}} are equivalent.\n\
6313 @seealso{rdivide, mldivide, times, plus}\n\
6321 @deftypefn {Built-in Function} {@var{z} =} and (@var{x}, @var{y})\n\
6322 @deftypefnx {Built-in Function} {@var{z} =} and (@var{x1}, @var{x2}, @dots{})\n\
6323 Return the logical AND of @var{x} and @var{y}.\n\
6325 This function is equivalent to the operator syntax @w{@code{x & y}}. If\n\
6326 more than two arguments are given, the logical AND is applied cumulatively\n\
6327 from left to right:\n\
6330 (@dots{}((x1 & x2) & x3) & @dots{})\n\
6333 At least one argument is required.\n\
6334 @seealso{or, not, xor}\n\
6343 @deftypefn {Built-in Function} {@var{z} =} or (@var{x}, @var{y})\n\
6344 @deftypefnx {Built-in Function} {@var{z} =} or (@var{x1}, @var{x2}, @dots{})\n\
6345 Return the logical OR of @var{x} and @var{y}.\n\
6347 This function is equivalent to the operator syntax @w{@code{x | y}}. If\n\
6348 more than two arguments are given, the logical OR is applied cumulatively\n\
6349 from left to right:\n\
6352 (@dots{}((x1 | x2) | x3) | @dots{})\n\
6355 At least one argument is required.\n\
6356 @seealso{and, not, xor}\n\
6363 DEFUN (colon, args, ,
6365 @deftypefn {Built-in Function} {@var{r} =} colon (@var{base}, @var{limit})\n\
6366 @deftypefnx {Built-in Function} {@var{r} =} colon (@var{base}, @var{increment}, @var{limit})\n\
6367 Return the result of the colon expression corresponding to @var{base},\n\
6368 @var{limit}, and optionally, @var{increment}.\n\
6370 This function is equivalent to the operator syntax @w{@code{base : limit}}\n\
6371 or @w{@code{base : increment : limit}}.\n\
6375 int nargin = args.
length ();
6384 retval =
do_colon_op (args(0), args(1), args (2));
6397 DEFUN (tic, args, nargout,
6399 @deftypefn {Built-in Function} {} tic ()\n\
6400 @deftypefnx {Built-in Function} {@var{id} =} tic ()\n\
6401 @deftypefnx {Built-in Function} {} toc ()\n\
6402 @deftypefnx {Built-in Function} {} toc (@var{id})\n\
6403 @deftypefnx {Built-in Function} {@var{val} =} toc (@dots{})\n\
6404 Set or check a wall-clock timer.\n\
6406 Calling @code{tic} without an output argument sets the internal timer state.\n\
6407 Subsequent calls to @code{toc} return the number of seconds since the timer\n\
6414 # many computations later@dots{}\n\
6415 elapsed_time = toc ();\n\
6420 will set the variable @code{elapsed_time} to the number of seconds since\n\
6421 the most recent call to the function @code{tic}.\n\
6423 If called with one output argument, @code{tic} returns a scalar\n\
6424 of type @code{uint64} that may be later passed to @code{toc}.\n\
6428 id = tic; sleep (5); toc (id)\n\
6433 Calling @code{tic} and @code{toc} this way allows nested timing calls.\n\
6435 If you are more interested in the CPU time that your process used, you\n\
6436 should use the @code{cputime} function instead. The @code{tic} and\n\
6437 @code{toc} functions report the actual wall clock time that elapsed\n\
6438 between the calls. This may include time spent processing other jobs or\n\
6439 doing nothing at all.\n\
6440 @seealso{toc, cputime}\n\
6445 int nargin = args.
length ();
6448 warning (
"tic: ignoring extra arguments");
6457 double frac = modf (tmp, &ip);
6458 uint64_t microsecs =
static_cast<uint64_t
> (CLOCKS_PER_SEC * frac);
6459 microsecs += CLOCKS_PER_SEC *
static_cast<uint64_t
> (ip);
6463 tic_toc_timestamp = tmp;
6468 DEFUN (toc, args, nargout,
6470 @deftypefn {Built-in Function} {} toc ()\n\
6471 @deftypefnx {Built-in Function} {} toc (@var{id})\n\
6472 @deftypefnx {Built-in Function} {@var{val} =} toc (@dots{})\n\
6473 @seealso{tic, cputime}\n\
6478 int nargin = args.
length ();
6492 uint64_t val =
id.value ();
6495 = (
static_cast<double> (val / CLOCKS_PER_SEC)
6496 + static_cast<double> (val % CLOCKS_PER_SEC)
6503 error (
"toc: invalid ID");
6509 error (
"toc called before timer set");
6519 octave_stdout <<
"Elapsed time is " << tmp <<
" seconds.\n";
6534 DEFUN (cputime, args, ,
6536 @deftypefn {Built-in Function} {[@var{total}, @var{user}, @var{system}] =} cputime ();\n\
6537 Return the CPU time used by your Octave session.\n\
6539 The first output is the total time spent executing your process and is equal\n\
6540 to the sum of second and third outputs, which are the number of CPU seconds\n\
6541 spent executing in user mode and the number of CPU seconds spent executing\n\
6542 in system mode, respectively.\n\
6544 If your system does not have a way to report CPU time usage, @code{cputime}\n\
6545 returns 0 for each of its output values.\n\
6547 Note that because Octave used some CPU time to start, it is reasonable\n\
6548 to check to see if @code{cputime} works by checking to see if the total\n\
6549 CPU time used is nonzero.\n\
6550 @seealso{tic, toc}\n\
6554 int nargin = args.
length ();
6559 warning (
"tic: ignoring extra arguments");
6561 #if defined (HAVE_GETRUSAGE)
6567 usr =
static_cast<double> (ru.ru_utime.tv_sec) +
6568 static_cast<double> (ru.ru_utime.tv_usec) * 1e-6;
6570 sys =
static_cast<double> (ru.ru_stime.tv_sec) +
6571 static_cast<double> (ru.ru_stime.tv_usec) * 1e-6;
6579 unsigned long ticks;
6580 unsigned long seconds;
6581 unsigned long fraction;
6583 ticks = t.tms_utime + t.tms_cutime;
6584 fraction = ticks % CLOCKS_PER_SEC;
6585 seconds = ticks / CLOCKS_PER_SEC;
6587 usr =
static_cast<double> (seconds) + static_cast<double>(fraction) /
6588 static_cast<double>(CLOCKS_PER_SEC);
6590 ticks = t.tms_stime + t.tms_cstime;
6591 fraction = ticks % CLOCKS_PER_SEC;
6592 seconds = ticks / CLOCKS_PER_SEC;
6594 sys =
static_cast<double> (seconds) + static_cast<double>(fraction) /
6595 static_cast<double>(CLOCKS_PER_SEC);
6601 retval(0) = sys + usr;
6606 DEFUN (sort, args, nargout,
6608 @deftypefn {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x})\n\
6609 @deftypefnx {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x}, @var{dim})\n\
6610 @deftypefnx {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x}, @var{mode})\n\
6611 @deftypefnx {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x}, @var{dim}, @var{mode})\n\
6612 Return a copy of @var{x} with the elements arranged in increasing order.\n\
6614 For matrices, @code{sort} orders the elements within columns\n\
6620 sort ([1, 2; 2, 3; 3, 1])\n\
6627 If the optional argument @var{dim} is given, then the matrix is sorted\n\
6628 along the dimension defined by @var{dim}. The optional argument @code{mode}\n\
6629 defines the order in which the values will be sorted. Valid values of\n\
6630 @code{mode} are @qcode{\"ascend\"} or @qcode{\"descend\"}.\n\
6632 The @code{sort} function may also be used to produce a matrix\n\
6633 containing the original row indices of the elements in the sorted\n\
6634 matrix. For example:\n\
6638 [s, i] = sort ([1, 2; 2, 3; 3, 1])\n\
6639 @result{} s = 1 1\n\
6642 @result{} i = 1 3\n\
6648 For equal elements, the indices are such that equal elements are listed\n\
6649 in the order in which they appeared in the original list.\n\
6651 Sorting of complex entries is done first by magnitude (@code{abs (@var{z})})\n\
6652 and for any ties by phase angle (@code{angle (z)}). For example:\n\
6656 sort ([1+i; 1; 1-i])\n\
6663 NaN values are treated as being greater than any other value and are sorted\n\
6664 to the end of the list.\n\
6666 The @code{sort} function may also be used to sort strings and cell arrays\n\
6667 of strings, in which case ASCII dictionary order (uppercase 'A' precedes\n\
6668 lowercase 'a') of the strings is used.\n\
6670 The algorithm used in @code{sort} is optimized for the sorting of partially\n\
6672 @seealso{sortrows, issorted}\n\
6677 int nargin = args.
length ();
6680 if (nargin < 1 || nargin > 3)
6686 bool return_idx = nargout > 1;
6693 if (args(1).is_string ())
6695 std::string mode = args(1).string_value ();
6696 if (mode ==
"ascend")
6698 else if (mode ==
"descend")
6702 error (
"sort: MODE must be either \"ascend\" or \"descend\"");
6707 dim = args(1).nint_value () - 1;
6712 if (args(1).is_string ())
6718 if (! args(2).is_string ())
6720 error (
"sort: MODE must be a string");
6723 std::string mode = args(2).string_value ();
6724 if (mode ==
"ascend")
6726 else if (mode ==
"descend")
6730 error (
"sort: MODE must be either \"ascend\" or \"descend\"");
6736 if (nargin == 1 || args(1).is_string ())
6745 error (
"sort: DIM must be a valid dimension");
6756 retval(0) = arg.
sort (sidx, dim, smode);
6760 retval(0) = arg.
sort (dim, smode);
6946 DEFUN (__sort_rows_idx__, args, ,
6948 @deftypefn {Built-in Function} {} __sort_rows_idx__ (@var{a}, @var{mode})\n\
6949 Undocumented internal function.\n\
6954 int nargin = args.
length ();
6957 if (nargin < 1 || nargin > 2 || (nargin == 2 && ! args(1).is_string ()))
6965 std::string mode = args(1).string_value ();
6966 if (mode ==
"ascend")
6968 else if (mode ==
"descend")
6972 error (
"__sort_rows_idx__: MODE must be either \"ascend\" or \"descend\"");
6980 error (
"__sort_rows_idx__: sparse matrices not yet supported");
6981 if (arg.
ndims () == 2)
6988 error (
"__sort_rows_idx__: needs a 2-dimensional object");
7004 if (mode ==
"ascending")
7006 else if (mode ==
"descending")
7008 else if (mode ==
"either")
7011 error (
"issorted: MODE must be \"ascending\", \"descending\", or \"either\"");
7014 error (
"issorted: expecting %s argument to be a string", argn);
7019 DEFUN (issorted, args, ,
7021 @deftypefn {Built-in Function} {} issorted (@var{a})\n\
7022 @deftypefnx {Built-in Function} {} issorted (@var{a}, @var{mode})\n\
7023 @deftypefnx {Built-in Function} {} issorted (@var{a}, \"rows\", @var{mode})\n\
7024 Return true if the array is sorted according to @var{mode}, which\n\
7025 may be either @qcode{\"ascending\"}, @qcode{\"descending\"}, or\n\
7026 @qcode{\"either\"}.\n\
7028 By default, @var{mode} is @qcode{\"ascending\"}. NaNs are treated in the\n\
7029 same manner as @code{sort}.\n\
7031 If the optional argument @qcode{\"rows\"} is supplied, check whether\n\
7032 the array is sorted by rows as output by the function @code{sortrows}\n\
7033 (with no options).\n\
7035 This function does not support sparse matrices.\n\
7036 @seealso{sort, sortrows}\n\
7041 int nargin = args.
length ();
7043 if (nargin < 1 || nargin > 3)
7049 bool by_rows =
false;
7060 if (args(1).is_string ())
7062 std::string tmp = args(1).string_value ();
7069 error (
"issorted: second argument must be a string");
7080 error (
"issorted: sparse matrices not yet supported");
7081 if (arg.
ndims () == 2)
7084 error (
"issorted: A must be a 2-dimensional object");
7089 retval = args(0).is_sorted (smode) !=
UNSORTED;
7091 error (
"issorted: needs a vector");
7131 DEFUN (nth_element, args, ,
7133 @deftypefn {Built-in Function} {} nth_element (@var{x}, @var{n})\n\
7134 @deftypefnx {Built-in Function} {} nth_element (@var{x}, @var{n}, @var{dim})\n\
7135 Select the n-th smallest element of a vector, using the ordering defined by\n\
7138 The result is equivalent to @code{sort(@var{x})(@var{n})}.\n\
7140 @var{n} can also be a contiguous range, either ascending @code{l:u}\n\
7141 or descending @code{u:-1:l}, in which case a range of elements is returned.\n\
7143 If @var{x} is an array, @code{nth_element} operates along the dimension\n\
7144 defined by @var{dim}, or the first non-singleton dimension if @var{dim} is\n\
7147 Programming Note: nth_element encapsulates the C++ standard library\n\
7148 algorithms nth_element and partial_sort. On average, the complexity of the\n\
7149 operation is O(M*log(K)), where @w{@code{M = size (@var{x}, @var{dim})}} and\n\
7150 @w{@code{K = length (@var{n})}}. This function is intended for cases where\n\
7151 the ratio K/M is small; otherwise, it may be better to use @code{sort}.\n\
7152 @seealso{sort, min, max}\n\
7156 int nargin = args.
length ();
7158 if (nargin == 2 || nargin == 3)
7165 dim = args(2).int_value (
true) - 1;
7167 error (
"nth_element: DIM must be a valid dimension");
7191 #define MAKE_INT_BRANCH(X) \
7193 retval = argx.X ## _array_value ().nth_element (n, dim); \
7204 #undef MAKE_INT_BRANCH
7218 template <
class NDT>
7223 typedef typename NDT::element_type T;
7226 else if (idx.
extent (n) > n)
7227 error (
"accumarray: index out of range");
7231 if (vals.numel () == 1)
7232 retval.idx_add (idx, vals (0));
7233 else if (vals.numel () == idx.
length (n))
7234 retval.idx_add (idx, vals);
7236 error (
"accumarray: dimensions mismatch");
7241 DEFUN (__accumarray_sum__, args, ,
7243 @deftypefn {Built-in Function} {} __accumarray_sum__ (@var{idx}, @var{vals}, @var{n})\n\
7244 Undocumented internal function.\n\
7248 int nargin = args.
length ();
7249 if (nargin >= 2 && nargin <= 3 && args(0).is_numeric_type ())
7254 n = args(2).idx_type_value (
true);
7294 template <
class NDT>
7298 const typename NDT::element_type& zero_val)
7300 typedef typename NDT::element_type T;
7303 else if (idx.
extent (n) > n)
7304 error (
"accumarray: index out of range");
7313 if (vals.numel () == 1)
7314 (retval.*op) (idx, NDT (
dim_vector (l, 1), vals(0)));
7315 else if (vals.numel () == l)
7316 (retval.*op) (idx, vals);
7318 error (
"accumarray: dimensions mismatch");
7328 int nargin = args.
length ();
7329 if (nargin >= 3 && nargin <= 4 && args(0).is_numeric_type ())
7334 n = args(3).idx_type_value (
true);
7361 #define MAKE_INT_BRANCH(X) \
7363 retval = do_accumarray_minmax (idx, vals.X ## _array_value (), \
7365 zero.X ## _scalar_value ()); \
7376 #undef MAKE_INT_BRANCH
7392 DEFUN (__accumarray_min__, args, ,
7394 @deftypefn {Built-in Function} {} __accumarray_min__ (@var{idx}, @var{vals}, @var{zero}, @var{n})\n\
7395 Undocumented internal function.\n\
7401 DEFUN (__accumarray_max__, args, ,
7403 @deftypefn {Built-in Function} {} __accumarray_max__ (@var{idx}, @var{vals}, @var{zero}, @var{n})\n\
7404 Undocumented internal function.\n\
7410 template <
class NDT>
7415 typedef typename NDT::element_type T;
7418 else if (idx.
extent (n) > n)
7419 error (
"accumdim: index out of range");
7426 else if (dim >= rdv.
length ())
7431 NDT retval (rdv, T ());
7433 if (idx.
length () != vals_dim(dim))
7434 error (
"accumdim: dimension mismatch");
7436 retval.idx_add_nd (idx, vals, dim);
7441 DEFUN (__accumdim_sum__, args, ,
7443 @deftypefn {Built-in Function} {} __accumdim_sum__ (@var{idx}, @var{vals}, @var{dim}, @var{n})\n\
7444 Undocumented internal function.\n\
7448 int nargin = args.
length ();
7449 if (nargin >= 2 && nargin <= 4 && args(0).is_numeric_type ())
7454 dim = args(2).int_value () - 1;
7458 n = args(3).idx_type_value (
true);
7492 template <
class NDT>
7495 const NDT& tval,
const NDT& fval)
7497 typedef typename NDT::element_type T;
7501 bool tscl = tval.numel () == 1;
7502 bool fscl = fval.numel () == 1;
7504 if ((! tscl && tval.dims () != dv)
7505 || (! fscl && fval.dims () != dv))
7506 error (
"merge: MASK, TVAL, and FVAL dimensions must match");
7509 T *rv = retval.fortran_vec ();
7512 const T *tv = tval.data ();
7513 const T *fv = fval.data ();
7514 const bool *mv = mask.
data ();
7523 rv[i] = mv[i] ? ts : fs;
7529 rv[i] = mv[i] ? ts : fv[i];
7538 rv[i] = mv[i] ? tv[i] : fs;
7543 rv[i] = mv[i] ? tv[i] : fv[i];
7551 #define MAKE_INT_BRANCH(INTX) \
7552 else if (tval.is_ ## INTX ## _type () && fval.is_ ## INTX ## _type ()) \
7554 retval = do_merge (mask, \
7555 tval.INTX ## _array_value (), \
7556 fval.INTX ## _array_value ()); \
7559 DEFUN (merge, args, ,
7561 @deftypefn {Built-in Function} {} merge (@var{mask}, @var{tval}, @var{fval})\n\
7562 @deftypefnx {Built-in Function} {} ifelse (@var{mask}, @var{tval}, @var{fval})\n\
7563 Merge elements of @var{true_val} and @var{false_val}, depending on the\n\
7564 value of @var{mask}.\n\
7566 If @var{mask} is a logical scalar, the other two arguments can be arbitrary\n\
7567 values. Otherwise, @var{mask} must be a logical array, and @var{tval},\n\
7568 @var{fval} should be arrays of matching class, or cell arrays. In the\n\
7569 scalar mask case, @var{tval} is returned if @var{mask} is true, otherwise\n\
7570 @var{fval} is returned.\n\
7572 In the array mask case, both @var{tval} and @var{fval} must be either\n\
7573 scalars or arrays with dimensions equal to @var{mask}. The result is\n\
7574 constructed as follows:\n\
7578 result(mask) = tval(mask);\n\
7579 result(! mask) = fval(! mask);\n\
7583 @var{mask} can also be arbitrary numeric type, in which case it is first\n\
7584 converted to logical.\n\
7585 @seealso{logical, diff}\n\
7588 int nargin = args.
length ();
7591 if (nargin == 3 && (args(0).is_bool_type () || args(0).is_numeric_type ()))
7596 retval = mask_val.
is_true () ? args(1) : args(2);
7630 sq_string ?
'\'' :
'"');
7649 error (
"merge: cannot merge %s with %s with array mask",
7662 #undef MAKE_INT_BRANCH
7664 template <
class SparseT>
7669 SparseT retval = array;
7673 while (order > 0 && k > 0)
7675 idx_vector col1 (
':'), col2 (
':'), sl1 (1, k), sl2 (0, k-1);
7676 retval = SparseT (retval.index (col1, sl1))
7677 - SparseT (retval.index (col2, sl2));
7678 assert (retval.columns () == k-1);
7686 while (order > 0 && k > 0)
7688 idx_vector col1 (
':'), col2 (
':'), sl1 (1, k), sl2 (0, k-1);
7689 retval = SparseT (retval.index (sl1, col1))
7690 - SparseT (retval.index (sl2, col2));
7691 assert (retval.rows () == k-1);
7712 if (dv(dim) <= order)
7723 retval =
do_diff (array, order, dim - 1);
7726 else if (dv(dim) == 1)
7730 retval =
do_diff (array, dv(dim) - 1, dim);
7731 order -= dv(dim) - 1;
7788 DEFUN (diff, args, ,
7790 @deftypefn {Built-in Function} {} diff (@var{x})\n\
7791 @deftypefnx {Built-in Function} {} diff (@var{x}, @var{k})\n\
7792 @deftypefnx {Built-in Function} {} diff (@var{x}, @var{k}, @var{dim})\n\
7793 If @var{x} is a vector of length @math{n}, @code{diff (@var{x})} is the\n\
7794 vector of first differences\n\
7796 $x_2 - x_1, \\ldots{}, x_n - x_{n-1}$.\n\
7799 @var{x}(2) - @var{x}(1), @dots{}, @var{x}(n) - @var{x}(n-1).\n\
7802 If @var{x} is a matrix, @code{diff (@var{x})} is the matrix of column\n\
7803 differences along the first non-singleton dimension.\n\
7805 The second argument is optional. If supplied, @code{diff (@var{x},\n\
7806 @var{k})}, where @var{k} is a non-negative integer, returns the\n\
7807 @var{k}-th differences. It is possible that @var{k} is larger than\n\
7808 the first non-singleton dimension of the matrix. In this case,\n\
7809 @code{diff} continues to take the differences along the next\n\
7810 non-singleton dimension.\n\
7812 The dimension along which to take the difference can be explicitly\n\
7813 stated with the optional variable @var{dim}. In this case the\n\
7814 @var{k}-th order differences are calculated along this dimension.\n\
7815 In the case where @var{k} exceeds @code{size (@var{x}, @var{dim})}\n\
7816 an empty matrix is returned.\n\
7817 @seealso{sort, merge}\n\
7820 int nargin = args.
length ();
7823 if (nargin < 1 || nargin > 3)
7825 else if (! (args(0).is_numeric_type () || args(0).is_bool_type ()))
7826 error (
"diff: X must be numeric or logical");
7834 if (args(1).is_scalar_type ())
7835 order = args(1).idx_type_value (
true,
false);
7836 else if (! args(1).is_zero_by_zero ())
7837 error (
"order K must be a scalar or []");
7839 error (
"order K must be non-negative");
7844 dim = args(2).int_value (
true,
false);
7845 if (!
error_state && (dim < 1 || dim > args(0).ndims ()))
7846 error (
"DIM must be a valid dimension");
7852 retval =
do_diff (args(0), order, dim);
7877 assert (rep.
ndims () == 2 && rep.
rows () == 2);
7886 error (
"repelems: second row must contain non-negative numbers");
7893 retval.
clear (1, l);
7899 std::fill_n (dest, k, src.
checkelem (rep(0, i) - 1));
7906 DEFUN (repelems, args, ,
7908 @deftypefn {Built-in Function} {} repelems (@var{x}, @var{r})\n\
7909 Construct a vector of repeated elements from @var{x}.\n\
7911 @var{r} is a 2x@var{N} integer matrix specifying which elements to repeat and\n\
7912 how often to repeat each element. Entries in the first row, @var{r}(1,j),\n\
7913 select an element to repeat. The corresponding entry in the second row,\n\
7914 @var{r}(2,j), specifies the repeat count. If @var{x} is a matrix then the\n\
7915 columns of @var{x} are imagined to be stacked on top of each other for\n\
7916 purposes of the selection index. A row vector is always returned.\n\
7918 Conceptually the result is calculated as follows:\n\
7923 for i = 1:columns (@var{r})\n\
7924 y = [y, @var{x}(@var{r}(1,i)*ones(1, @var{r}(2,i)))];\n\
7928 @seealso{repmat, cat}\n\
7937 const Matrix rm = args(1).matrix_value ();
7940 else if (rm.
rows () != 2 || rm.
ndims () != 2)
7942 error (
"repelems: R must be a matrix with two rows");
7952 if (static_cast<double> (rx) != rm(i))
7954 error (
"repelems: R must be a matrix of integers");
7963 #define BTYP_BRANCH(X, EX) \
7965 retval = do_repelems (x.EX ## _value (), r); \
7999 DEFUN (base64_encode, args, ,
8001 @deftypefn {Built-in Function} {@var{s} =} base64_encode (@var{x})\n\
8002 Encode a double matrix or array @var{x} into the base64 format string\n\
8005 @seealso{base64_decode}\n\
8009 int nargin = args.
length ();
8015 if (! args(0).is_numeric_type ())
8016 error (
"base64_encode: encoding is supported only for numeric arrays");
8017 else if (args(0).is_complex_type ()
8018 || args(0).is_sparse_type ())
8019 error (
"base64_encode: encoding complex or sparse data is not supported");
8020 else if (args(0).is_integer_type ())
8022 #define MAKE_INT_BRANCH(X) \
8023 if (args(0).is_ ## X ## _type ()) \
8025 const X##NDArray in = args(0). X## _array_value (); \
8027 in.numel () * sizeof (X## _t) / sizeof (char); \
8029 reinterpret_cast<const char*> (in.data ()); \
8032 && octave_base64_encode (inc, inlen, &out)) \
8034 retval(0) = octave_value (out); \
8047 #undef MAKE_INT_BRANCH
8052 else if (args(0).is_single_type ())
8056 inlen = in.
numel () *
sizeof (
float) /
sizeof (
char);
8058 inc =
reinterpret_cast<const char*
> (in.
data ());
8071 inlen = in.
numel () *
sizeof (
double) /
sizeof (
char);
8073 inc =
reinterpret_cast<const char*
> (in.
data ());
8101 DEFUN (base64_decode, args, ,
8103 @deftypefn {Built-in Function} {@var{x} =} base64_decode (@var{s})\n\
8104 @deftypefnx {Built-in Function} {@var{x} =} base64_decode (@var{s}, @var{dims})\n\
8105 Decode the double matrix or array @var{x} from the base64 encoded string\n\
8108 The optional input parameter @var{dims} should be a vector containing the\n\
8109 dimensions of the decoded array.\n\
8110 @seealso{base64_encode}\n\
8115 int nargin = args.
length ();
8117 if (nargin < 1 || nargin > 2)
8126 args(1).octave_idx_type_vector_value ();
8136 const std::string str = args(0).string_value ();
float lo_ieee_float_na_value(void)
sortmode is_sorted_rows(sortmode mode=UNSORTED) const
uint8NDArray uint8_array_value(void) const
static TYPE do_single_type_concat(const octave_value_list &args, int dim)
void gripe_implicit_conversion(const char *id, const char *from, const char *to)
SparseComplexMatrix cumsum(int dim=-1) const
static Array< T > do_repelems(const Array< T > &src, const Array< octave_idx_type > &rep)
OCTAVE_API ColumnVector xrownorms(const Matrix &m, double p)
boolNDArray all(int dim=-1) const
bool is_object(void) const
void get_dimensions(const octave_value &a, const char *warn_for, dim_vector &dim)
octave_idx_type nnz(void) const
NDArray cumsum(int dim=-1) const
NDArray dsum(int dim=-1) const
ComplexNDArray complex_array_value(bool frc_str_conv=false) const
bool is_range(void) const
static NDT do_merge(const Array< bool > &mask, const NDT &tval, const NDT &fval)
octave_idx_type cols(void) const
NDArray cumsum(int dim=-1) const
octave_idx_type length(octave_idx_type n=0) const
charNDArray char_array_value(bool frc_str_conv=false) const
static octave_map cat(int dim, octave_idx_type n, const octave_scalar_map *map_list)
intNDArray< octave_int64 > int64NDArray
bool is_real_type(void) const
void gripe_wrong_type_arg(const char *name, const char *s, bool is_error)
octave_idx_type rows(void) const
static octave_value fill_matrix(const octave_value_list &args, int val, const char *fcn)
const octave_base_value const Array< octave_idx_type > & ra_idx
octave_value reshape(const dim_vector &dv) const
octave_idx_type rows(void) const
template octave_value identity_matrix< uint64NDArray >(int, int)
octave_value do_cat_op(const octave_value &v1, const octave_value &v2, const Array< octave_idx_type > &ra_idx)
template octave_value identity_matrix< uint16NDArray >(int, int)
FloatNDArray diff(octave_idx_type order=1, int dim=-1) const
dim_vector dims(void) const
int8NDArray int8_array_value(void) const
bool is_vector(void) const
std::string get_concat_class(const std::string &c1, const std::string &c2)
static octave_value find_method(const std::string &name, const std::string &dispatch_type)
bool is_uint16_type(void) const
OCTINTERP_API void print_usage(void)
SparseMatrix sum(int dim=-1) const
octave_idx_type numel(void) const
Number of elements in the array.
void map_2_xlog2(const Array< T > &x, Array< T > &f, Array< ET > &e)
template octave_value identity_matrix< boolNDArray >(int, int)
int16NDArray int16_array_value(void) const
FloatNDArray cumsum(int dim=-1) const
octave_idx_type length(void) const
intNDArray< octave_uint32 > uint32NDArray
static NDT do_accumarray_sum(const idx_vector &idx, const NDT &vals, octave_idx_type n=-1)
octave_value do_unary_op(octave_value::unary_op op, const octave_value &v)
bool is_scalar_type(void) const
octave_value diag(octave_idx_type k=0) const
intNDArray< octave_uint8 > uint8NDArray
bool is_numeric_type(void) const
bool is_defined(void) const
intNDArray< octave_uint16 > uint16NDArray
octave_int< T > mod(const octave_int< T > &x, const octave_int< T > &y)
NDArray prod(int dim=-1) const
int int_value(bool req_int=false, bool frc_str_conv=false) const
void resize(int n, int fill_value=0)
uint64NDArray uint64_array_value(void) const
static void transpose(octave_idx_type N, const octave_idx_type *ridx, const octave_idx_type *cidx, octave_idx_type *ridx2, octave_idx_type *cidx2)
SparseComplexMatrix prod(int dim=-1) const
#define DEFUN(name, args_name, nargout_name, doc)
octave_value identity_matrix(int nr, int nc)
void error(const char *fmt,...)
float float_value(bool frc_str_conv=false) const
#define MAKE_INT_BRANCH(X)
int32NDArray int32_array_value(void) const
double lo_ieee_inf_value(void)
bool is_int8_type(void) const
static octave_value_list do_accumarray_minmax_fun(const octave_value_list &args, bool ismin)
Array< octave_idx_type > sort_rows_idx(sortmode mode=ASCENDING) const
static Sparse< T > cat(int dim, octave_idx_type n, const Sparse< T > *sparse_list)
boolNDArray any(int dim=-1) const
octave_value resize(const dim_vector &dv, bool fill=false) const
octave_idx_type * cidx(void)
double double_value(void) const
static void single_type_concat(Array< T > &result, const octave_value_list &args, int dim)
double lo_ieee_nan_value(void)
octave_idx_type nzmax(void) const
static NDT do_accumarray_minmax(const idx_vector &idx, const NDT &vals, octave_idx_type n, bool ismin, const typename NDT::element_type &zero_val)
bool is_int32_type(void) const
ComplexNDArray dsum(int dim=-1) const
intNDArray< octave_int16 > int16NDArray
double scalar_value(bool frc_str_conv=false) const
Array< double > octave_base64_decode(const std::string &str)
octave_idx_type idx_type_value(bool req_int=false, bool frc_str_conv=false) const
template octave_value identity_matrix< int8NDArray >(int, int)
static void check_dimensions(octave_idx_type &nr, octave_idx_type &nc, const char *warnfor)
boolNDArray bool_array_value(bool warn=false) const
int first_non_singleton(int def=0) const
octave_idx_type rows(void) const
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
FloatNDArray sum(int dim=-1) const
double lo_ieee_na_value(void)
octave_idx_type nnz(void) const
template octave_value identity_matrix< uint32NDArray >(int, int)
SparseMatrix sum(int dim=-1) const
FloatComplexNDArray cumsum(int dim=-1) const
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
int64NDArray int64_array_value(void) const
Cell cell_value(void) const
ComplexNDArray dprod(int dim=-1) const
bool is_float_type(void) const
Array< std::string > cellstr_value(void) const
const dim_vector & dims(void) const
Return a const-reference so that dims ()(i) works efficiently.
octave_value single_subsref(const std::string &type, const octave_value_list &idx)
static sortmode get_sort_mode_option(const octave_value &arg, const char *argn)
bool isvector(const T &array)
#define DEFALIAS(alias, name)
octave_value_list do_multi_index_op(int nargout, const octave_value_list &idx)
template octave_value identity_matrix< int16NDArray >(int, int)
octave_idx_type columns(void) const
FloatNDArray float_array_value(bool frc_str_conv=false) const
ComplexNDArray prod(int dim=-1) const
ComplexNDArray sum(int dim=-1) const
SparseBoolMatrix all(int dim=-1) const
F77_RET_T const double const double * f
intNDArray< octave_int8 > int8NDArray
bool is_sparse_type(void) const
octave_idx_type numel(const octave_value_list &idx)
bool is_bool_type(void) const
octave_int< T > powf(const float &a, const octave_int< T > &b)
std::string string_value(bool force=false) const
static octave_value unary_op_defun_body(octave_value::unary_op op, const octave_value_list &args)
static NDT do_accumdim_sum(const idx_vector &idx, const NDT &vals, int dim=-1, octave_idx_type n=-1)
static Array< T > cat(int dim, octave_idx_type n, const Array< T > *array_list)
Concatenation along a specified (0-based) dimension, equivalent to cat().
FloatComplex float_complex_value(bool frc_str_conv=false) const
Range range_value(void) const
float lo_ieee_float_inf_value(void)
OCTAVE_API RowVector xcolnorms(const Matrix &m, double p)
FloatComplexNDArray float_complex_array_value(bool frc_str_conv=false) const
bool is_string(void) const
bool is_double_type(void) const
const T * data(void) const
double norm(const ColumnVector &v)
SparseBoolMatrix sparse_bool_matrix_value(bool warn=false) const
bool is_complex_type(void) const
octave_value & assign(assign_op op, const std::string &type, const std::list< octave_value_list > &idx, const octave_value &rhs)
static bool all_scalar_1x1(const octave_value_list &args)
octave_int< T > pow(const octave_int< T > &a, const octave_int< T > &b)
bool is_int64_type(void) const
bool is_cellstr(void) const
#define panic_impossible()
static octave_value attempt_type_conversion(const octave_value &ov, std::string dtype)
octave_value abs(void) const
OCTAVE_API double xnorm(const ColumnVector &x, double p)
static octave_value do_cat(const octave_value_list &xargs, int dim, std::string fname)
dim_vector redim(int n) const
octave_value do_class_concat(const octave_value_list &ovl, std::string cattype, int dim)
octave_int< uint64_t > octave_uint64
ComplexNDArray xsum(int dim=-1) const
FloatNDArray prod(int dim=-1) const
octave_idx_type length(void) const
SparseMatrix cumsum(int dim=-1) const
size_t size(T const (&)[z])
static octave_value do_hypot(const octave_value &x, const octave_value &y)
SparseComplexMatrix sparse_complex_matrix_value(bool frc_str_conv=false) const
static dim_vector alloc(int n)
dim_vector dims(void) const
SparseBoolMatrix any(int dim=-1) const
static octave_map do_single_type_concat_map(const octave_value_list &args, int dim)
float lo_ieee_float_nan_value(void)
T & xelem(octave_idx_type n)
bool is_int16_type(void) const
ComplexMatrix linspace(const ComplexColumnVector &x1, const ComplexColumnVector &x2, octave_idx_type n)
static octave_value do_linspace(const octave_value &base, const octave_value &limit, octave_idx_type n)
ComplexNDArray cumsum(int dim=-1) const
void warning(const char *fmt,...)
Handles the reference counting for all the derived classes.
OCTAVE_API double xfrobnorm(const Matrix &x)
octave_value sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
charNDArray max(char d, const charNDArray &m)
bool octave_base64_encode(const char *inc, const size_t inlen, char **out)
octave_idx_type length(void) const
Number of elements in the array.
bool is_empty(void) const
octave_idx_type extent(octave_idx_type n) const
intNDArray< octave_int32 > int32NDArray
SparseMatrix atan2(const double &x, const SparseMatrix &y)
This is a simple wrapper template that will subclass an Array type or any later type derived from ...
bool is_sq_string(void) const
NDArray array_value(bool frc_str_conv=false) const
octave_idx_type * ridx(void)
NDArray sum(int dim=-1) const
octave_value_list ovl(const octave_value &a0)
static octave_value binary_op_defun_body(octave_value::binary_op op, const octave_value_list &args)
template octave_value identity_matrix< int32NDArray >(int, int)
T & checkelem(octave_idx_type n)
bool bool_value(bool warn=false) const
Array< octave_value > array_value(void) const
bool is_uint8_type(void) const
static double tic_toc_timestamp
octave_idx_type dims_to_numel(const dim_vector &dims, const octave_value_list &idx)
SparseComplexMatrix sum(int dim=-1) const
template octave_value identity_matrix< uint8NDArray >(int, int)
Complex complex_value(bool frc_str_conv=false) const
std::string get_dispatch_type(const octave_value_list &args, builtin_type_t &builtin_type)
static octave_value binary_assoc_op_defun_body(octave_value::binary_op op, octave_value::assign_op aop, const octave_value_list &args)
octave_int< T > rem(const octave_int< T > &x, const octave_int< T > &y)
std::string class_name(void) const
SparseMatrix prod(int dim=-1) const
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
FloatComplexNDArray sum(int dim=-1) const
bool is_uint64_type(void) const
Array< T > reshape(octave_idx_type nr, octave_idx_type nc) const
void resize(octave_idx_type n, const octave_value &rfv=octave_value())
ComplexNDArray diff(octave_idx_type order=1, int dim=-1) const
Array< T > nth_element(const idx_vector &n, int dim=0) const
Returns the n-th element in increasing order, using the same ordering as used for sort...
std::complex< float > FloatComplex
template octave_value identity_matrix< int64NDArray >(int, int)
static data_type string_to_data_type(const std::string &s)
static octave_value do_diff(const octave_value &array, octave_idx_type order, int dim=-1)
SparseMatrix sparse_matrix_value(bool frc_str_conv=false) const
FloatComplexNDArray prod(int dim=-1) const
static SparseT do_sparse_diff(const SparseT &array, octave_idx_type order, int dim)
std::complex< double > Complex
const T * fortran_vec(void) const
bool is_single_type(void) const
#define BTYP_BRANCH(X, EX)
double double_value(bool frc_str_conv=false) const
bool is_uint32_type(void) const
FloatComplexNDArray diff(octave_idx_type order=1, int dim=-1) const
void chop_trailing_singletons(void)
octave_value do_colon_op(const octave_value &base, const octave_value &increment, const octave_value &limit, bool is_for_cmd_expr)
uint32NDArray uint32_array_value(void) const
builtin_type_t builtin_type(void) const
static void single_type_concat_map(octave_map &result, const octave_value_list &args, int dim)
Complex atan(const Complex &x)
#define INSTANTIATE_EYE(T)
uint16NDArray uint16_array_value(void) const
static octave_value do_permute(const octave_value_list &args, bool inv)
octave_idx_type columns(void) const
NDArray sum(int dim=-1) const
NDArray dprod(int dim=-1) const
NDArray diff(octave_idx_type order=1, int dim=-1) const
#define DATA_REDUCTION(FCN)
return octave_value(v1.char_array_value().concat(v2.char_array_value(), ra_idx),((a1.is_sq_string()||a2.is_sq_string())? '\'': '"'))
NDArray xsum(int dim=-1) const
F77_RET_T const double * x
charNDArray min(char d, const charNDArray &m)
void maybe_warn_string_concat(bool all_dq_strings_p, bool all_sq_strings_p)
octave_value do_binary_op(octave_value::binary_op op, const octave_value &v1, const octave_value &v2)
intNDArray diff(octave_idx_type order=1, int dim=-1) const
intNDArray< octave_uint64 > uint64NDArray
bool is_integer_type(void) const