svMultiPhysics
Loading...
Searching...
No Matches
Tensor4.h
1// SPDX-FileCopyrightText: Copyright (c) Stanford University, The Regents of the University of California, and others.
2// SPDX-License-Identifier: BSD-3-Clause
3
4#ifndef TENSOR4_H
5#define TENSOR4_H
6
7#include <cstring>
8#include <iostream>
9
10#ifdef ENABLE_ARRAY_INDEX_CHECKING
11#define Tensor4_check_enabled
12#endif
13
14/// @brief The Tensor4 template class implements a simple interface to 4th order tensors.
15//
16template<typename T>
17class Tensor4
18{
19 static bool show_index_check_message;
20
21 public:
22 std::string name_ = "";
23 int ni_ = 0;
24 int nj_ = 0;
25 int nk_ = 0;
26 int nl_ = 0;
27 int p1_ = 0;
28 int p2_ = 0;
29 int size_ = 0;
30 T *data_ = nullptr;
31
32 Tensor4()
33 {
34 std::string name_ = "";
35 int ni_ = 0;
36 int nj_ = 0;
37 int nk_ = 0;
38 int nl_ = 0;
39 int p1_ = 0;
40 int p2_ = 0;
41 int size_ = 0;
42 T *data_ = nullptr;
43 };
44
45 Tensor4(const int num_i, const int num_j, const int num_k, const int num_l)
46 {
47 allocate(num_i, num_j, num_k, num_l);
48 }
49
50 ~Tensor4()
51 {
52 //std::cout << "- - - - - Tensor4 dtor - - - - - " << std::endl;
53 if (data_ != nullptr) {
54 //std::cout << "[Tensor4 dtor] delete[] data: " << data_ << std::endl;
55 delete [] data_;
56 data_ = nullptr;
57 }
58 }
59
60 // Copy
61 Tensor4(const Tensor4& rhs)
62 {
63 if (rhs.ni_ <= 0 || rhs.nj_ <= 0 || rhs.nk_ <= 0 || rhs.nl_ <= 0) {
64 return;
65 }
66 allocate(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
67 memcpy(data_, rhs.data_, size_*sizeof(T));
68 }
69
70 int num_i() { return ni_; }
71 int num_j() { return nj_; }
72 int num_k() { return nk_; }
73 int num_l() { return nl_; }
74
75 // Tensor4 assignment.
76 //
77 Tensor4& operator=(const Tensor4& rhs)
78 {
79 if (this == &rhs) {
80 return *this;
81 }
82
83 if (rhs.ni_ <= 0 || rhs.nj_ <= 0 || rhs.nk_ <= 0 || rhs.nl_ <= 0) {
84 return *this;
85 }
86
87 if (ni_ != rhs.ni_ || nj_ != rhs.nj_ || nk_ != rhs.nk_ || nl_ != rhs.nl_ <= 0) {
88 clear();
89 //delete [] data_;
90 //data_ = nullptr;
91 allocate(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
92 }
93
94 memcpy(data_, rhs.data_, sizeof(T) * size_);
95 return *this;
96 }
97
98 Tensor4& operator=(const double value)
99 {
100 for (int i = 0; i < size_; i++) {
101 data_[i] = value;
102 }
103 return *this;
104 }
105
106 void write(const std::string& label, bool memory=true, T offset={}) const
107 {
108 int n = 0;
109 char file_prefix[1000];
110 for (auto c : label) {
111 if (c == '[') {
112 continue;
113 }
114 if (c == ']') {
115 file_prefix[n] = '_';
116 n += 1;
117 continue;
118 }
119 if (c == ' ') {
120 continue;
121 }
122 if (c == ':') {
123 c = '_';
124 }
125 file_prefix[n] = c;
126 n += 1;
127 }
128 file_prefix[n] = '\0';
129
130 FILE* fp;
131 std::string file_name;
132
133 // Write binary file.
134 file_name = std::string(file_prefix) + "_cm.bin";
135 std::cout << "[Tensor4::write] file_name: " << file_name << std::endl;
136 fp = fopen(file_name.c_str(), "wb");
137 fwrite(&size_, sizeof(int), 1, fp);
138 fwrite(data_, sizeof(double), size_, fp);
139 fclose(fp);
140 }
141
142 friend std::ostream& operator << (std::ostream& out, const Tensor4<T>& lhs)
143 {
144 for (int i = 0; i < lhs.size_; i++) {
145 out << lhs.data_[i];
146 if (i != lhs.size_-1) {
147 out << ", ";
148 }
149 }
150 return out;
151 }
152
153 //----------
154 // allocate
155 //----------
156 //
157 void allocate(const int num_i, const int num_j, const int num_k, const int num_l)
158 {
159 //std::cout << "----- Tensor4::allocate -----" << std::endl;
160 ni_ = num_i;
161 nj_ = num_j;
162 nk_ = num_k;
163 nl_ = num_l;
164 p1_ = num_i * num_j;
165 p2_ = p1_ * num_l;
166 size_ = ni_ * nj_ * nk_ * nl_;
167 data_ = new T [size_];
168 memset(data_, 0, sizeof(T)*size_);
169 //std::cout << "[Tensor4::allocate] data_: " << data_ << std::endl;
170 }
171
172 //-------------
173 // check_index
174 //-------------
175 //
176 void check_index(const int i, const int j, const int k, const int l) const
177 {
178 if (show_index_check_message) {
179 std::cout << "[Tensor4] **********************************" << std::endl;
180 std::cout << "[Tensor4] WARNING: Index checking is enabled " << std::endl;
181 std::cout << "[Tensor4] **********************************" << std::endl;
182 show_index_check_message = false;
183 }
184
185 if (data_ == nullptr) {
186 throw std::runtime_error(name_+"Accessing null data in Tensor4.");
187 }
188 if ((i < 0) || (i >= ni_) || (j < 0) || (j >= nj_) || (k < 0) || (k >= nk_) || (l < 0) || (l >= nl_)) {
189 auto i_str = std::to_string(ni_);
190 auto j_str = std::to_string(nj_);
191 auto k_str = std::to_string(nk_);
192 auto l_str = std::to_string(nl_);
193 auto dims = i_str + " x " + j_str + " x " + k_str + " x " + l_str;
194 auto index_str = " " + std::to_string(i) + "," + std::to_string(j) + "," + std::to_string(k) + "," + std::to_string(l);
195 throw std::runtime_error("Index (i,j,k,l)=" + index_str + " is out of bounds for " + dims + " array.");
196 }
197 }
198
199 //-------
200 // clear
201 //-------
202 // Free the array data.
203 //
204 // This is to replicate the Fortran DEALLOCATE().
205 //
206 void clear()
207 {
208 //std::cout << "----- Tensor4::erase -----" << std::endl;
209 if (data_ != nullptr) {
210 //std::cout << "[Tensor4::erase] data_: " << data_ << std::endl;
211 delete [] data_;
212 }
213
214 ni_ = 0;
215 nj_ = 0;
216 nk_ = 0;
217 nl_ = 0;
218 p1_ = 0;
219 p2_ = 0;
220 size_ = 0;
221 data_ = nullptr;
222 }
223
224 //--------
225 // resize
226 //--------
227 // Resize the array.
228 //
229 void resize(const int num_i, const int num_j, const int num_k, const int num_l)
230 {
231 if (data_ != nullptr) {
232 //std::cout << "[Tensor4::resize] data_: " << data_ << std::endl;
233 delete [] data_;
234 data_ = nullptr;
235 }
236
237 allocate(num_i, num_j, num_k, num_l);
238 }
239
240 /////////////////////////
241 // O p e r a t o r s //
242 /////////////////////////
243
244 const T& operator()(const int i, const int j, const int k, const int l) const
245 {
246 #ifdef Tensor4_check_enabled
247 check_index(i, j, k , l);
248 #endif
249 return data_[i + j*ni_ + p1_*k + p2_*l ];
250 }
251
252 T& operator()(const int i, const int j, const int k, const int l)
253 {
254 #ifdef Tensor4_check_enabled
255 check_index(i, j, k , l);
256 #endif
257 return data_[i + j*ni_ + p1_*k + p2_*l ];
258 }
259
260 ///////////////////////////////////////////////////
261 // M a t h e m a t i c a l o p e r a t o r s //
262 ///////////////////////////////////////////////////
263
264 // Multiply by a scalar.
265 //
266 Tensor4<T> operator*(const T value) const
267 {
268 Tensor4<T> result(ni_, nj_, nk_, nl_);
269 for (int i = 0; i < size_; i++) {
270 result.data_[i] = value * data_[i];
271 }
272 return result;
273 }
274
275 friend const Tensor4<T> operator * (const T value, const Tensor4& rhs)
276 {
277 if (rhs.data_ == nullptr) {
278 throw std::runtime_error("Null data for rhs Tensor4.");
279 }
280 Tensor4<T> result(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
281 for (int i = 0; i < rhs.size_; i++) {
282 result.data_[i] = value * rhs.data_[i];
283 }
284 return result;
285 }
286
287 // Compound add assignment.
288 //
289 Tensor4<T> operator+=(const Tensor4<T>& rhs) const
290 {
291 for (int i = 0; i < size_; i++) {
292 data_[i] += rhs.data_[i];
293 }
294 return *this;
295 }
296
297 // Compound subtract assignment.
298 //
299 Tensor4<T> operator-=(const Tensor4<T>& rhs) const
300 {
301 for (int i = 0; i < size_; i++) {
302 data_[i] -= rhs.data_[i];
303 }
304 return *this;
305 }
306
307 // Add and subtract arrays.
308 //
309 Tensor4<T> operator+(const Tensor4<T>& rhs) const
310 {
311 Tensor4<T> result(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
312 for (int i = 0; i < size_; i++) {
313 result.data_[i] = data_[i] + rhs.data_[i];
314 }
315 return result;
316 }
317
318 Tensor4<T> operator-(const Tensor4<T>& rhs) const
319 {
320 Tensor4<T> result(rhs.ni_, rhs.nj_, rhs.nk_, rhs.nl_);
321 for (int i = 0; i < size_; i++) {
322 result.data_[i] = data_[i] - rhs.data_[i];
323 }
324 return result;
325 }
326
327};
328
329#endif
330
The Tensor4 template class implements a simple interface to 4th order tensors.
Definition Tensor4.h:18