Libtorch 与 Pytorch常用写法对比

 1 #include<iostream>
 2 #include<torch/torch.h>
 3 #include<torch/script.h>
 4 
 5 int main()
 6 {
 7     torch::Tensor t1 = torch::rand({4, 5});
 8 
 9     // print shape
10     t1.print();
11     std::cout << "t1.sizes = " << t1.sizes() << std::endl;
12     // print the tensor
13     std::cout << "t1 = " << t1 << std::endl;             std::cout << std::endl;
14     float a1 = t1[1][2].item().toFloat();
15     std::cout << "t1[1][2] = " << a1 << std::endl; std::cout << std::endl;
16 
17     // 访问某一列
18     torch::Tensor t2 = t1.select(0, 3); // 0 = dimY, 1 = dimX
19     // print the tensor
20     std::cout << "t2 = " << t2 << std::endl; std::cout << std::endl;
21 
22     // 批量处理
23     torch::Tensor t3 = torch::rand({ 3, 5 });
24     std::cout << "t3 = " << t3 << std::endl;             std::cout << std::endl;
25     torch::Tensor t4 = t3.ge(0.5);
26     std::cout << "t4 = " << t4 << std::endl;             std::cout << std::endl;
27     torch::Tensor t5 = (t3 > 0.5); // 0 or 1, as bool vec
28     std::cout << "t5 = " << t5 << std::endl;             std::cout << std::endl;
29     
30     torch::Tensor t6 = torch::masked_select(t3, t5);
31     std::cout << "t6 = " << t6 << std::endl;             std::cout << std::endl;
32 
33     torch::Tensor t7 = t3.masked_select(t5);
34     std::cout << "t7 = " << t7 << std::endl;             std::cout << std::endl;
35 
36     // 创建一个mask,筛选行
37     // torch::from_blob 针对torch::kBool居然有bug
38     /*std::vector<int> v4{1, 0, 1};
39     torch::Tensor mask4 = torch::from_blob(v4.data(), { 3, 1 }, torch::kBool);
40     std::cout << "mask4 = " << mask4 << std::endl;*/
41     torch::Tensor mask5 = torch::tensor({ {1}, {0}, {1} }, torch::kBool);
42     std::cout << "mask5 = " << mask5 << std::endl;             std::cout << std::endl;
43     std::cout << "mask5.size() = " << mask5.sizes() << std::endl;             std::cout << std::endl;
44 
45     torch::Tensor t5_ = torch::rand({3, 6});
46     std::cout << "t5_ = " << t5_ << std::endl;             std::cout << std::endl;
47     auto t6_ = torch::masked_select(t5_, mask5).view({-1, 6}); // view展平是必须的
48     std::cout << "t6_ = " << t6_ << std::endl;             std::cout << std::endl;
49 
50     // 构建一个张量
51     torch::Tensor t8 = torch::rand({6, 6});
52     std::cout << "t8 = " << t8 << std::endl;             std::cout << std::endl;
53     std::cout << "t8.slice(0, 2, 4) = " << t8.slice(0, 2, 4) << std::endl;             std::cout << std::endl;
54     
55     // 行/列最大值
56     torch::Tensor t9 = torch::rand({ 3, 4 });
57     std::cout << "t9 = " << t9 << std::endl;             std::cout << std::endl;
58     std::tuple<torch::Tensor, torch::Tensor> max_info = torch::max(t9, 0); // 0:代表列, slice那里0代表行
59     // 访问元组
60     torch::Tensor max_vals = std::get<0>(max_info);
61     torch::Tensor idxs = std::get<1>(max_info);
62     std::cout << "max_vals = " << max_vals << std::endl;             std::cout << std::endl;
63     std::cout << "idxs = " << idxs << std::endl;             std::cout << std::endl;
64 
65     // 拼接
66     torch::Tensor t10 = torch::cat({ t3, t9 }, 1);
67     std::cout << "t10 = " << t10 << std::endl;             std::cout << std::endl;
68     system("pause");
69     return 1;
70 }
LearningPytorch.py
 1 import numpy as np
 2 import torch
 3 
 4 if __name__ == "__main__":
 5     ## e.g. 2.4
 6     # 声明tensor
 7     t1 = torch.tensor([[1, 2, 3], [2, 3, 4]])
 8     print('t1.dtype = ', t1.dtype)
 9     print('t1.shape = ', t1.shape)
10     t1 = torch.tensor(range(10))  # 转换迭代器为张量
11     # numpy -> tensor
12     t1 = torch.tensor(np.array([1, 2, 3]))
13     t2 = torch.from_numpy(np.array([1, 2, 3]))
14     # tensor -> numpy
15     t3 = t2.numpy()
16 
17     # 随机tensor
18     t1 = torch.randn(3, 3) * 10  # randn:正态分布, rand:均匀分布
19     t2 = t1.to(torch.int8)
20 
21     ## e.g. 2.5 随机数
22     t1 = torch.rand(3, 3)  # 3*3矩阵,元素服从[0, 1)均值分布
23     t1 = torch.randn(2, 3, 4)  # 2*3*4, 高斯分布
24     t1 = torch.zeros(2, 2, 2)
25     t1 = torch.ones(3, 4, 5) * 4
26     t1 = torch.eye(4)
27     t1 = torch.randint(1, 5, (3, 3))  # # 生成[0, 10)之间均匀分布整数的3×3矩阵
28 
29     ## e.g. 2.6 随机数
30     t1 = torch.randn(3, 3)
31 
32     # 复制t1的shape
33     t2 = torch.zeros_like(t1)  # 生成一个元素全为0的张量,形状和给定张量t1相同
34     t2 = torch.ones_like(t1)
35     t2 = torch.randn_like(t1)  # 正太分布
36 
37     # 复制t1的类型
38     t3 = t1.new_tensor([1, 2, 3])  # 根据Python列表生成张量,注意这里输出的是单精度浮点数
39     t3 = t1.new_zeros(3, 3)  # 生成相同类型且元素全为0的张量
40     t3 = t1.new_ones(3, 3)  # 生成相同类型且元素全为1的张量
41 
42     # e.g. 2.8 设备
43     t1 = torch.randn(3, 3, device="cpu")  # 获取存储在CPU上的一个张量
44     t1 = torch.randn(3, 3, device="cuda:0")  # 获取存储在0号GPU上的一个张量
45     t1 = torch.randn(3, 3, device="cuda:0").device  # 获取当前张量的设备
46     t1 = torch.randn(3, 3, device="cuda:0").cpu().device  # 张量从1号GPU转移到CPU
47     t1 = torch.randn(3, 3, device="cuda:0").cuda(0).device  # 张量保持设备不变
48 
49     # e.g. 2.9 指针、维度
50     t1 = torch.randn(3, 4, 5)
51     nd = t1.ndimension()  # channels = 3;获取维度的数目
52     ne = t1.nelement()  # c*w*h = 60;获取该张量的总元素数目
53     sz = t1.size()  # torch.Size([3, 4, 5])
54     c = t1.size(0)  # 获取该张量维度0的大小,调用方法
55     t = torch.randn(12)  # 产生大小为12的向量
56     t1 = t.view(3, 4)  # 向量改变形状为3×4的矩阵
57     t1 = t1.view(-1, 4)  # 第一个维度为-1,PyTorch会自动计算该维度的具体值
58     t1.view(4, 3)[0, 0] = 99.0  # 访问一个元素、遍历
59     ptr = t1.data_ptr()  # 获取tensor数据指针
60     t1.view(3, 4).data_ptr()  # 获取张量的数据指针
61     t1.view(4, 3).contiguous().data_ptr()  # 同上,不改变
62     t1.view(3, 4).transpose(0, 1).data_ptr()  # transpose方法交换两个维度
63     t1.view(3, 4).transpose(0, 1).contiguous().data_ptr()  # 步长和维度不兼容,重新生成张量(即:会重新分配内存)
64 
65     # e.g. 2.10 mask
66     t1 = torch.randn(2, 3, 4)
67     t2 = t1[1, 2, 3]
68     t2 = t1[:, 1:, 1:3]
69     mask = t1 > 0  # t1中元素大于0 的mask,mask.shape 等于 t1.shape
70     t2 = t1[mask]  # torch.Size([15]), t2是一个n*1的行向量
71 
72     # e.g. 2.11 sqrt && sum
73     t1 = torch.randint(1, 9, (3, 3))
74     t1 = t1.to(torch.float)
75     t2 = t1.sqrt()  # 操作不改变t1的值
76     t3 = torch.sqrt(t1)  # 操作不改变t1的值
77     t1.sqrt_()  # 平方根原地操作,修改自己的值
78     sum1 = torch.sum(t1)  # 默认对所有的元素求和
79     sum2 = torch.sum(t1, 0)  # 对第0维的元素求和
80     sum3 = torch.sum(t1, [0, 1])  # 对第0、1维的元素求和
81 
82     mean1 = t1.mean()  # 对所有元素求平均,也可以用torch.mean函数
83     mean2 = t1.mean(0)  # 对第0维的元素求平均
84     mean3 = torch.mean(t1, [0, 1])  # 对第0、1维元素求平均, mean.shape = 1*1
85 
86     # e.g. 2.12 加减乘除(其实都重载了运算符,自己取试一试)
87     t1 = torch.rand(2, 3)
88     t2 = torch.rand(2, 3)
89     t3 = t1.add(t2)
90     t4 = t1.sub(t2)
91     t5 = t1.mul(t2) # 对应元素相乘,非矩阵乘法
92     t6 = t1 * t2
93     t1.add_(t2)  # 四则运算,改变参与运算张量(t2)的值
94 
95 
96     var = torch.__version__
View Code
LearningPytorch.cpp
  1 #include<torch/torch.h>
  2 #include<torch/script.h>
  3 #include<iostream>
  4 using namespace std; // 项目中建议不要使用
  5 
  6 void printTitle(const string& title)
  7 {
  8     cout << endl;
  9     cout << "******【" << title << "】******" << endl;
 10 }
 11 
 12 int main()
 13 {
 14     // e.g. 2.4
 15     {
 16         printTitle("e.g. 2.4");
 17         // 声明tensor
 18         torch::Tensor t1 = torch::tensor({ { 1, 2, 3 }, { 2, 3, 4 } }, torch::kByte);
 19         cout << "t1.dtype() = " << t1.dtype() << endl; // __int64
 20         t1.print();
 21         cout << "t1 = " << t1 << endl;
 22         t1 = torch::range(1, 10, torch::kByte);
 23 
 24         // 随机tensor
 25         t1 = torch::randn({ 3, 3 }, torch::kFloat) * 10;
 26         cout << "t1 = " << t1 << endl;
 27         torch::Tensor t2 = t1.to(torch::kInt8);
 28         cout << "t2 = " << t2 << endl;
 29     }
 30     // e.g. 2.5 随机
 31     {
 32         printTitle("e.g. 2.5 随机");
 33         torch::Tensor t1 = torch::rand({ 3, 3 }, torch::kFloat32);
 34         t1 = torch::randn({ 2, 3, 4 });
 35         t1 = torch::zeros({ 2, 2, 2 }, torch::kUInt8);
 36         t1 = torch::ones({ 3, 4 }) * 9;
 37         t1 = torch::eye(3, torch::kFloat);
 38         t1 = torch::randint(0, 4, { 3, 3 });
 39         cout << "t1 = " << t1 << endl;
 40 
 41     }
 42     // e.g. 2.6 随机
 43     {
 44         printTitle("e.g. 2.6 随机");
 45         torch::Tensor t1 = torch::rand({ 3, 3 }, torch::kFloat32);
 46         // copy the shape of t1
 47         torch::Tensor t2 = torch::zeros_like(t1);
 48         t2 = torch::ones_like(t1);
 49         t2 = torch::randn_like(t1);
 50 
 51         // copy the dtype of t1
 52         torch::Tensor t3 = t1.new_zeros({ 3, 3 }); // 生成相同类型且元素全为0的张量
 53         t3 = torch::ones(t1.sizes(), t1.dtype()); // 和opencv一样
 54         t3 = torch::zeros(t1.sizes(), t1.dtype());
 55 
 56         cout << "t2 = " << t2 << endl;
 57         cout << "t3 = " << t3 << endl;
 58     }
 59     // e.g. 2.8 设备
 60     {
 61         printTitle("e.g. 2.8 设备");
 62         torch::Tensor t1 = torch::randn({ 3, 3 }, torch::Device("cpu"));
 63         cout << "t1 = " << t1 << endl;
 64         auto device = torch::Device("cuda:0");
 65         torch::Tensor t2 = torch::randn({ 3, 3 }, torch::kF32).to(device);
 66         cout << "t2 = " << t2 << endl;
 67         cout << "t2.device = " << t2.device() << endl;
 68     }
 69     // e.g. 2.9 指针
 70     {
 71         printTitle("e.g. 2.9 指针");
 72         torch::Tensor t1 = torch::randn({ 3, 4, 5 });
 73         cout << t1 << endl;
 74         int nd = t1.ndimension(); // channels = 3;  获取维度的数目
 75         int nc = t1.size(0); // c
 76         int nw = t1.size(1); // w
 77         int nh = t1.size(2); // h
 78         cout << nd << " " << nc << endl;
 79         auto sz = t1.sizes(); // [c w h]
 80         cout << "sz = " << sz << endl;
 81         t1 = torch::randn({ 12 });
 82         torch::Tensor t2 = t1.view({ -1, 3 }); // 将其第二个维度变为3,第一个维度会自动计算,不过不能整除就会报错
 83         t2[0][0] = 99;    // 访问元素
 84         cout << "t2 = " << t2 << endl;
 85         float* t2_ptr = (float*)t2.data_ptr(); // 获取指针
 86         cout << "t2_ptr = " << t2_ptr << endl;
 87         void* t22_ptr = (void*)t2.data_ptr(); // 指针指针,地址不变
 88         cout << "t22_ptr = " << t22_ptr << endl;
 89         auto t222_ptr = t2.contiguous().data_ptr(); // 指针指针,地址不变
 90         cout << "t222_ptr = " << t222_ptr << endl;
 91         auto t2222_ptr = t2.transpose(0, 1).contiguous().data_ptr(); // 步长和维度不兼容,重新生成张量(即:会重新分配内存)
 92         cout << "t2222_ptr = " << t2222_ptr << endl;
 93     }
 94     // e.g. 2.10 mask
 95     {
 96         printTitle("e.g. 2.10 mask");
 97         torch::Tensor t1 = torch::randn({ 2, 3, 4 });
 98         cout << "t1 = " << t1 << endl;
 99         torch::Tensor ele = t1[1][2][3];
100         cout << "ele = " << ele << endl;
101         double ele_ = ele.item().toDouble(); // tensor 转 double
102         cout << "ele_ = " << ele_ << endl;
103         torch::Tensor mask = t1.ge(0);
104         cout << "mask = " << mask << endl;
105         torch::Tensor t2 = t1.masked_select(mask); // t2 是一个向量
106         cout << "t2 =" << t2 << endl;
107     }
108     // e.g. 2.11 sqrt && sum
109     {
110         printTitle("e.g. 2.11 sqrt");
111         torch::Tensor t1 = torch::randint(1, 9, { 3, 3 });
112         cout << "t1 = " << t1 << endl;
113         torch::Tensor t2 = t1.to(torch::kFloat32);
114         torch::Tensor t3 = t2.sqrt(); // 操作不改变t2的值
115         t3 = torch::sqrt(t2); // 操作不改变t2的值
116         cout << "t3 = " << t3 << endl;
117         t2.sqrt_();        // 平方根原地操作,修改自己的值
118         cout << "t2 = " << t2 << endl;
119 
120         // 也可以调用默认的sum()成员函数
121         cout << "t1 = " << t1 << endl;
122         torch::Tensor sum1 = torch::sum(t1);    // 默认对所有的元素求和
123         torch::Tensor sum2 = torch::sum(t1, 0); //  对第0维的元素求和,即:按列进行求和
124         torch::Tensor sum3 = torch::sum(t1, { 1,0 }); // 写成{0, 1}会报编译错
125         cout << "sum3 = " << sum3.item().toFloat() << endl;
126 
127         torch::Tensor mean1 = t1.mean();  // 对所有元素求平均,也可以用torch.mean函数
128         torch::Tensor mean2 = t1.mean(0); // 对第0维的元素求平均
129         // 写成{0, 1}会报编译错,同上
130         torch::Tensor mean3 = torch::mean(t1, { 1, 0 }); // 对第0、1维元素求平均, mean.shape = 1*1
131         cout << "mean1 = " << mean1.item().toFloat() << endl;
132         cout << "mean2 = " << mean2 << endl;
133         cout << "mean3 = " << mean3 << endl;
134     }
135     // e.g. 2.12 对应元素加、减、乘、除(其实都重载了运算符,自己取试一试)
136     {
137         printTitle("e.g. 2.12 ");
138         torch::Tensor t1 = torch::rand({ 2, 3 });
139         torch::Tensor t2 = torch::rand({ 2, 3 });
140         torch::Tensor t3 = t1 + t2;
141         torch::Tensor t4 = t1.sub(t2);
142         torch::Tensor t5 = t1.mul(t2);
143         torch::Tensor t6 = t1.div(2);
144         cout << "t1 = " << t1 << endl;
145         cout << "t2 = " << t2 << endl;
146         cout << "t3 = " << t3 << endl;
147         cout << "t4 = " << t4 << endl;
148         cout << "t5 = " << t5 << endl;
149         cout << "t6 = " << t6 << endl;
150         t6.add_(1); // 会修改t6中的值
151         cout << "t6 = " << t6 << endl;
152     }
153     // e.g. 2.13 min max argmax
154     {
155         printTitle("e.g. 2.13 min max argmax");
156         torch::Tensor t1 = torch::randn({ 3, 4 }, torch::kFloat64);
157         cout << "t1 = " << t1 << endl;
158         torch::Tensor mask_argmax = torch::argmax(t1, 0); // 返回的是沿着第0个维度,极大值所在位置
159         cout << "mask_argmax = " << mask_argmax << endl;
160         // max
161         std::tuple<torch::Tensor, torch::Tensor> maxVals = torch::max(t1, -1); //  函数调用,返回的是沿着最后一个维度,包含极大值和极大值所在位置的元组
162         torch::Tensor mask_max = std::get<0>(maxVals); // max val
163         torch::Tensor mask_max_idx = std::get<1>(maxVals); // index of maxVal
164         cout << "mask_max = " << mask_max << endl;
165         cout << "mask_max_idx = " << mask_max_idx << endl;
166         // min
167         std::tuple<torch::Tensor, torch::Tensor> minVals = t1.min(0); // 内置方法调用,返回的是沿着第0个维度,包含极小值和极小值所在位置的元组
168         torch::Tensor mask_min = std::get<0>(minVals); // min val
169         torch::Tensor mask_min_idx = std::get<1>(minVals);// index of minVal
170         cout << "mask_min = " << mask_min << endl;
171         cout << "mask_min_idx = " << mask_min_idx << endl;
172         // sort
173         std::tuple<torch::Tensor, torch::Tensor> sortVals = t1.sort(-1); // 沿着最后一个维度排序,返回排序后的张量和张量元素在该维度的原始位置
174         torch::Tensor tensorVal = std::get<0>(sortVals);
175         torch::Tensor tensorValIdx = std::get<1>(sortVals);
176         cout << "tensorVal = " << tensorVal << endl;
177         cout << "tensorValIdx = " << tensorValIdx << endl;
178     }
179     // e.g. 2.14 矩阵乘法
180     {
181         printTitle("e.g. 2.14 矩阵乘法");
182         torch::Tensor t1 = torch::tensor({ {1, 2}, {3, 4} }, torch::kFloat64); // 2×2
183         torch::Tensor t2 = torch::tensor({ {1, 1, 1}, {2, 3, 1} }, torch::kFloat64); // 2×3
184         auto t3 = t1.mm(t2); // 矩阵乘法, torch::mm
185         cout << "t1 = " << t1 << endl;
186         cout << "t2 = " << t2 << endl;
187         cout << "t3 = " << t3 << endl;
188         //
189         t1 = torch::randn({ 2, 3, 4 });
190         t2 = torch::randn({ 2, 4, 3 });
191         torch::Tensor t4 = t1.bmm(t2); // (迷你)批次矩阵乘法,返回结果为2×3×3,函数形式
192         cout << "t1 = " << t1 << endl;
193         cout << "t2 = " << t2 << endl;
194         cout << "t4 = " << t4 << endl;
195     }
196     // e.g. 2.16 Tensor堆叠、拼接
197     {
198         printTitle("e.g. 2.16 Tensor堆叠、拼接");
199         auto t1 = torch::randn({ 2, 3 });
200         auto t2 = torch::randn({ 2, 3 });
201         auto t3 = torch::stack({ t1, t2 }, -1); // 沿着最后一个维度做堆叠,返回大小为2×2×3的张量
202         cout << "t1.sizes() = " << t1.sizes() << endl;
203         cout << "t2.sizes() = " << t2.sizes() << endl;
204         cout << "t3.sizes() = " << t3.sizes() << endl;
205     }
206     // e.g. 2.17 2.18  拓展、压缩维度
207     {
208         printTitle("e.g. 2.17 2.18 拓展维度");
209         torch::Tensor t1 = torch::rand({ 3, 4 });
210         cout << "t1.sizes() = " << t1.sizes() << endl;
211         auto t11 =    t1.unsqueeze(-1); // 扩增最后一个维度
212         cout << "t11.sizes() = " << t11.sizes() << endl;
213         auto t12 = t1.unsqueeze(-1).unsqueeze(-1); // 继续扩增最后一个维度
214         cout << "t12.sizes() = " << t12.sizes() << endl;
215         auto t13 = t1.unsqueeze(1); // 在第1个维度插入新一个维度 -> 3*4*1
216         cout << "t13.sizes() = " << t13.sizes() << endl;
217 
218         auto t2 = torch::rand({ 1, 3, 4, 1 });
219         cout << "t2.sizes() = " << t2.sizes() << endl;
220         auto t21 = t2.squeeze(); // 压缩所有大小为1的维度
221         cout << "t21.sizes() = " << t21.sizes() << endl;
222     }
223     // e.g. 2.18 
224     return 1;
225 }
View Code


 
 
 
 
CV&DL
原文地址:https://www.cnblogs.com/winslam/p/14664042.html