Skip to content

Commit 7641787

Browse files
committed
deepxIR:支持简化联合IR
1 parent c143a25 commit 7641787

13 files changed

Lines changed: 113 additions & 80 deletions

File tree

excuter/common/src/stdutil/num.cpp

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
#include "num.hpp"
2+
#include <string>
3+
#include <cctype>
4+
5+
bool is_positive_integer(const std::string& str) {
6+
try {
7+
std::stoi(str);
8+
return true;
9+
} catch (...) {
10+
return false;
11+
}
12+
}
13+
14+
bool is_float(const std::string& str) {
15+
try {
16+
std::stof(str);
17+
return true;
18+
} catch (...) {
19+
return false;
20+
}
21+
}
22+

excuter/common/src/stdutil/num.hpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#ifndef STDUTIL_NUM_HPP
2+
#define STDUTIL_NUM_HPP
3+
4+
#include <string>
5+
6+
bool is_positive_integer(const std::string& str);
7+
bool is_float(const std::string& str);
8+
9+
#endif // STDUTIL_NUM_HPP

excuter/common/src/stdutil/vector.hpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
#ifndef DEEPX_VECTOR_HPP
2-
#define DEEPX_VECTOR_HPP
1+
#ifndef STDUTIL_VECTOR_HPP
2+
#define STDUTIL_VECTOR_HPP
33

44
#include <vector>
55
#include <ostream>
@@ -17,4 +17,4 @@ std::ostream& operator<<(std::ostream& os, const std::vector<T>& vec) {
1717
os << "]";
1818
return os;
1919
}
20-
#endif // DEEPX_VECTOR_HPP
20+
#endif // STDUTIL_VECTOR_HPP

excuter/op-mem-ompsimd/src/deepx/op/init.hpp

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
#include "deepx/op/op.hpp"
55
#include "deepx/tensorfunc/init.hpp"
6+
#include "stdutil/num.hpp"
67
namespace deepx::op{
78
template<typename T>
89
class Uniform : public OpT<T>{
@@ -18,9 +19,15 @@ namespace deepx::op{
1819
}
1920
void forward(mem::Mem &mem) override{
2021
auto output = mem.gettensor<T>(this->returns[0]).get();
21-
T low = mem.getarg<T>(this->args[0]);
22-
T high = mem.getarg<T>(this->args[1]);
23-
tensorfunc::uniform(*output,low,high);
22+
if (is_float(this->args[0])){
23+
T low = std::stof(this->args[0]);
24+
T high = std::stof(this->args[1]);
25+
tensorfunc::uniform(*output,low,high);
26+
}else{
27+
T low = mem.getarg<T>(this->args[0]);
28+
T high = mem.getarg<T>(this->args[1]);
29+
tensorfunc::uniform(*output,low,high);
30+
}
2431
}
2532
void backward(mem::Mem &mem) override{
2633
throw std::runtime_error("Uniform op does not support backward");
@@ -41,8 +48,13 @@ namespace deepx::op{
4148
}
4249
void forward(mem::Mem &mem) override{
4350
auto output = mem.gettensor<T>(this->returns[0]).get();
44-
T value = mem.getarg<T>(this->args[0]);
45-
tensorfunc::constant(*output,value);
51+
if (is_float(this->args[0])){
52+
T value = std::stof(this->args[0]);
53+
tensorfunc::constant(*output,value);
54+
}else{
55+
T value = mem.getarg<T>(this->args[0]);
56+
tensorfunc::constant(*output,value);
57+
}
4658
}
4759
void backward(mem::Mem &mem) override{
4860
throw std::runtime_error("Constant op does not support backward");
@@ -63,9 +75,15 @@ namespace deepx::op{
6375
}
6476
void forward(mem::Mem &mem) override{
6577
auto output = mem.gettensor<T>(this->returns[0]).get();
66-
T start = mem.getarg<T>(this->args[0]);
67-
T step = mem.getarg<T>(this->args[1]);
68-
tensorfunc::arange(*output,start,step);
78+
if (is_float(this->args[0])){
79+
T start = std::stof(this->args[0]);
80+
T step = std::stof(this->args[1]);
81+
tensorfunc::arange(*output,start,step);
82+
}else{
83+
T start = mem.getarg<T>(this->args[0]);
84+
T step = mem.getarg<T>(this->args[1]);
85+
tensorfunc::arange(*output,start,step);
86+
}
6987
}
7088
void backward(mem::Mem &mem) override{
7189
throw std::runtime_error("Arange op does not support backward");

excuter/op-mem-ompsimd/src/deepx/op/new.hpp

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
#include "deepx/op/op.hpp"
55
#include "deepx/mem/mem.hpp"
66
#include "deepx/tensorfunc/new.hpp"
7+
#include "stdutil/num.hpp"
8+
79
namespace deepx::op{
810
template<typename T>
911
class NewTensor : public OpT<T>{
@@ -19,9 +21,18 @@ namespace deepx::op{
1921
}
2022
void forward(mem::Mem &mem) override{
2123
string name= this->returns[0];
22-
vector<int> shape=mem.getvector<int32_t>(this->args[0]);
23-
Tensor<T> t=tensorfunc::New<T>(shape);
24-
mem.addtensor(name,t);
24+
if (this->args.size()==1&&is_positive_integer(this->args[0])){
25+
vector<int> shape=mem.getvector<int32_t>(this->args[0]);
26+
Tensor<T> t=tensorfunc::New<T>(shape);
27+
mem.addtensor(name,t);
28+
}else if (this->args.size()>1){
29+
vector<int> shape;
30+
for (int i = 0; i < this->args.size(); i++) {
31+
shape.push_back(atoi(this->args[i].c_str()));
32+
}
33+
Tensor<T> t=tensorfunc::New<T>(shape);
34+
mem.addtensor(name,t);
35+
}
2536
}
2637
void backward(mem::Mem &mem) override{
2738
throw std::runtime_error("New op does not support backward");

front/py/deepx/autograd/graph.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,11 +56,11 @@ def add_vector(self, name,data,inputs=[]):
5656
self.nodes.append(node)
5757
return node
5858

59-
def add_tensor(self, name,data,inputs=[]):
59+
def add_tensor(self, name,t,inputs=[]):
6060
self.tensor_counter += 1
6161
if name == "":
6262
name = f"tensor_{self.tensor_counter}"
63-
node=DataNode(name, "tensor", data)
63+
node=DataNode(name, "tensor", t)
6464
for input in inputs:
6565
node.add_input(input)
6666
self.nodes.append(node)

front/py/deepx/tensor/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
'Device','DeviceType',
1818
'Shape',
1919
'Tensor',
20-
'zeros', 'ones', 'arange','rand','randn','eye',
20+
'full','zeros', 'ones', 'arange','rand','randn','eye',
2121
'add', 'mul',
2222
# 'mul', 'div',
2323
# 'matmul', 'dot',

front/py/deepx/tensor/dtype.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
'int32': np.int32,
1111
'int64': np.int64,
1212
}
13+
default_dtype = 'float32'
1314

1415
def infer_dtype(data: Any) -> str:
1516
"""

front/py/deepx/tensor/elementwise.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@ def add(a:Tensor,b:Tensor,out:Tensor):
1414

1515
@tensor_method
1616
def add_(self, other):
17-
result = Tensor(dtype=self.dtype,shape=self.shape)
18-
result._node = self.graph.add_tensor("", self)
17+
result = Tensor(dtype=self.dtype,shape=self.shape)
1918
add(self,other,result)
2019
return result
2120

front/py/deepx/tensor/init.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,21 @@
11
from .tensor import Tensor, tensor_method
22
import numpy as np
3+
from .deepxir import DeepxIR
34

4-
def zeros(*size, dtype=None, device=None):
5-
"""创建指定大小的全0张量
6-
7-
参数:
8-
*size: 张量形状,可以是多个整数或单个形状元组
9-
dtype: 数据类型
10-
device: 设备类型,如'cpu'或'cuda'
11-
"""
12-
if len(size) == 1 and isinstance(size[0], (tuple, list)):
13-
size = size[0]
14-
data = np.zeros(size)
15-
return Tensor(data=data, dtype=dtype, device=device)
5+
def full(*shape, fill_value=0, dtype=None, device=None):
6+
if len(shape) == 1 and isinstance(shape[0], (tuple, list)):
7+
shape = shape[0]
8+
t=Tensor(data=None, shape=shape, dtype=dtype, device=device)
9+
if t.graph.eager:
10+
ir=DeepxIR("constant", t.dtype, [fill_value], [t.node.name])
11+
print(ir)
12+
return t
13+
14+
def zeros(*shape, dtype=None, device=None):
15+
return full(*shape, fill_value=0, dtype=dtype, device=device)
1616

1717
def ones(*size, dtype=None, device=None):
18-
"""创建指定大小的全1张量"""
19-
if len(size) == 1 and isinstance(size[0], (tuple, list)):
20-
size = size[0]
21-
data = np.ones(size)
22-
return Tensor(data=data, dtype=dtype, device=device)
18+
return full(*size, fill_value=1, dtype=dtype, device=device)
2319

2420
def rand(*size, dtype=None, device=None):
2521
"""创建指定大小的[0,1)均匀分布随机张量"""

0 commit comments

Comments
 (0)