Skip to content

Commit 7719c80

Browse files
committed
Add global settings class
1 parent a450d3f commit 7719c80

File tree

15 files changed

+209
-24
lines changed

15 files changed

+209
-24
lines changed

README.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
[![PHP from Packagist](https://img.shields.io/packagist/php-v/rubix/tensor.svg?style=flat&colorB=8892BF)](https://www.php.net/) [![Latest Stable Version](https://img.shields.io/packagist/v/rubix/tensor.svg?style=flat&colorB=orange)](https://packagist.org/packages/rubix/tensor) [![Build](https://github.com/RubixML/Tensor/workflows/Build/badge.svg)](https://github.com/RubixML/Tensor/actions) [![Downloads from Packagist](https://img.shields.io/packagist/dt/rubix/tensor.svg?style=flat&colorB=red)](https://packagist.org/packages/rubix/tensor) [![GitHub](https://img.shields.io/github/license/RubixML/Tensor)](https://github.com/RubixML/Tensor/blob/master/LICENSE.md)
44

5-
Tensor is a library and extension that provides objects for scientific computing in [PHP](https://php.net). It is used by libraries such as [Rubix ML](https://rubixml.com) to build and accelerate machine learning algorithms such as neural networks.
5+
Tensor is a library and extension that provides objects for scientific computing in [PHP](https://php.net). The multithreaded extension is especially suited for applications where efficient computing of large sets of numbers is required. In some cases, the extension is 340X faster than the same operation in PHPland. Tensor is used by libraries such as [Rubix ML](https://rubixml.com) to build and accelerate machine learning algorithms such as neural networks and linear regressions.
66

77
## Installation
88
Install Tensor into your project with [Composer](https://getcomposer.org/):
@@ -61,7 +61,9 @@ php -m | grep tensor
6161

6262
## Performance Comparison
6363

64-
![Tensor Performance](https://raw.githubusercontent.com/RubixML/Tensor/master/docs/images/tensor-performance-mnist.png)
64+
![Tensor Performance MNIST](https://raw.githubusercontent.com/RubixML/Tensor/master/docs/images/tensor-performance-mnist.png)
65+
66+
![Tensor Performance Benchmarks](https://raw.githubusercontent.com/RubixML/Tensor/master/docs/images/tensor-performance-benchmarks.png)
6567

6668
## Funding
6769
Rubix ML is funded by donations from the community. You can become a sponsor by making a contribution to one of our funding sources below.

benchmarks/LinearAlgebra/MatmulBench.php

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,15 @@ class MatmulBench
2222

2323
public function setUp() : void
2424
{
25-
$this->a = Matrix::uniform(1000, 1000);
25+
$this->a = Matrix::uniform(500, 500);
2626

27-
$this->b = Matrix::uniform(1000, 1000);
27+
$this->b = Matrix::uniform(500, 500);
2828
}
2929

3030
/**
3131
* @Subject
3232
* @Iterations(5)
33-
* @OutputTimeUnit("milliseconds", precision=3)
33+
* @OutputTimeUnit("seconds", precision=3)
3434
*/
3535
public function matmul() : void
3636
{

benchmarks/LinearAlgebra/VectorDotProductBench.php

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,15 @@ class VectorDotProductBench
2222

2323
public function setUp() : void
2424
{
25-
$this->a = Vector::uniform(100000);
25+
$this->a = Vector::uniform(10000000);
2626

27-
$this->b = Vector::uniform(100000);
27+
$this->b = Vector::uniform(10000000);
2828
}
2929

3030
/**
3131
* @Subject
3232
* @Iterations(5)
33-
* @OutputTimeUnit("milliseconds", precision=3)
33+
* @OutputTimeUnit("seconds", precision=3)
3434
*/
3535
public function dot() : void
3636
{

config.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
"include/comparison.c",
1414
"include/linear_algebra.c"
1515
],
16-
"extra-libs": "-lopenblas -llapacke -lgfortran",
16+
"extra-libs": "-lopenblas -llapacke -lgfortran -lpthread",
1717
"globals": {
1818
"num_threads": {
1919
"type": "int",
26 KB
Loading

ext/config.m4

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ if test "$PHP_TENSOR" = "yes"; then
44

55

66

7-
if ! test "x-lopenblas -llapacke -lgfortran" = "x"; then
8-
PHP_EVAL_LIBLINE(-lopenblas -llapacke -lgfortran, TENSOR_SHARED_LIBADD)
7+
if ! test "x-lopenblas -llapacke -lgfortran -lpthread" = "x"; then
8+
PHP_EVAL_LIBLINE(-lopenblas -llapacke -lgfortran -lpthread, TENSOR_SHARED_LIBADD)
99
fi
1010

1111
AC_DEFINE(HAVE_TENSOR, 1, [Whether you have Tensor])
@@ -22,7 +22,8 @@ if test "$PHP_TENSOR" = "yes"; then
2222
tensor/decompositions/lu.zep.c
2323
tensor/matrix.zep.c
2424
tensor/reductions/ref.zep.c
25-
tensor/reductions/rref.zep.c include/indexing.c
25+
tensor/reductions/rref.zep.c
26+
tensor/settings.zep.c include/indexing.c
2627
include/arithmetic.c
2728
include/comparison.c
2829
include/linear_algebra.c"

ext/config.w32

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ if (PHP_TENSOR != "no") {
1010
AC_DEFINE("ZEPHIR_USE_PHP_JSON", 1, "Whether PHP json extension is present at compile time");
1111
}
1212
ADD_SOURCES(configure_module_dirname + "/include", "indexing.c arithmetic.c comparison.c linear_algebra.c", "tensor");
13-
ADD_SOURCES(configure_module_dirname + "/tensor", "arithmetic.zep.c arraylike.zep.c comparable.zep.c functional.zep.c statistical.zep.c trigonometric.zep.c tensor.zep.c vector.zep.c columnvector.zep.c matrix.zep.c", "tensor");
13+
ADD_SOURCES(configure_module_dirname + "/tensor", "arithmetic.zep.c arraylike.zep.c comparable.zep.c functional.zep.c statistical.zep.c trigonometric.zep.c tensor.zep.c vector.zep.c columnvector.zep.c matrix.zep.c settings.zep.c", "tensor");
1414
ADD_SOURCES(configure_module_dirname + "/tensor/decompositions", "cholesky.zep.c lu.zep.c", "tensor");
1515
ADD_SOURCES(configure_module_dirname + "/tensor/reductions", "ref.zep.c rref.zep.c", "tensor");
1616
ADD_FLAG("CFLAGS_TENSOR", "/D ZEPHIR_RELEASE /Oi /Ot /Oy /Ob2 /Gs /GF /Gy /GL");

ext/include/arithmetic.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
#ifndef TENSOR_TENSOR_ARITHMETIC_H
2-
#define TENSOR_TENSOR_ARITHMETIC_H
1+
#ifndef TENSOR_ARITHMETIC_H
2+
#define TENSOR_ARITHMETIC_H
33

44
#include <Zend/zend.h>
55

@@ -17,4 +17,4 @@ void tensor_subtract_scalar(zval * return_value, zval * a, zval * b);
1717
void tensor_pow_scalar(zval * return_value, zval * a, zval * b);
1818
void tensor_mod_scalar(zval * return_value, zval * a, zval * b);
1919

20-
#endif
20+
#endif

ext/include/comparison.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
#ifndef TENSOR_ARRAY_COMPARISON_H
2-
#define TENSOR_ARRAY_COMPARISON_H
1+
#ifndef TENSOR_COMPARISON_H
2+
#define TENSOR_COMPARISON_H
33

44
#include <Zend/zend.h>
55

ext/include/linear_algebra.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,6 @@ void tensor_inverse(zval * return_value, zval * a)
9696
unsigned int i, j;
9797
Bucket * row;
9898
zval rowB, b;
99-
lapack_int status;
10099

101100
zend_zephir_globals_def * zephir_globals = ZEPHIR_VGLOBAL;
102101

@@ -110,15 +109,17 @@ void tensor_inverse(zval * return_value, zval * a)
110109

111110
double * va = emalloc(n * n * sizeof(double));
112111

113-
for (i = 0; i < n; i++) {
112+
for (i = 0; i < n; ++i) {
114113
row = Z_ARR(ba[i].val)->arData;
115114

116-
for (j = 0; j < n; j++) {
115+
for (j = 0; j < n; ++j) {
117116
va[i * n + j] = zephir_get_doubleval(&row[j].val);
118117
}
119118
}
120119

121-
int pivots[n + 1];
120+
lapack_int status;
121+
122+
int pivots[n];
122123

123124
status = LAPACKE_dgetrf(LAPACK_ROW_MAJOR, n, n, va, n, pivots);
124125

@@ -134,10 +135,10 @@ void tensor_inverse(zval * return_value, zval * a)
134135

135136
array_init_size(&b, n);
136137

137-
for (i = 0; i < n; i++) {
138+
for (i = 0; i < n; ++i) {
138139
array_init_size(&rowB, n);
139140

140-
for (j = 0; j < n; j++) {
141+
for (j = 0; j < n; ++j) {
141142
add_next_index_double(&rowB, va[i * n + j]);
142143
}
143144

0 commit comments

Comments
 (0)