@@ -54,7 +54,8 @@ class Memory : public Extern {
5454 uint64_t sizeInByte;
5555 };
5656
57- static Memory* createMemory (Store* store, uint64_t initialSizeInByte, uint64_t maximumSizeInByte, bool isShared);
57+ static Memory* createMemory (Store* store, uint64_t initialSizeInByte, uint64_t maximumSizeInByte,
58+ bool isShared, bool is64);
5859
5960 ~Memory ();
6061
@@ -88,6 +89,11 @@ class Memory : public Extern {
8889 return m_isShared;
8990 }
9091
92+ bool is64 () const
93+ {
94+ return m_is64;
95+ }
96+
9197 bool grow (uint64_t growSizeInByte);
9298
9399 template <typename T>
@@ -98,6 +104,14 @@ class Memory : public Extern {
98104 memcpyEndianAware (out, m_buffer, sizeof (T), m_sizeInByte, 0 , offset + addend, sizeof (T));
99105 }
100106
107+ template <typename T>
108+ void loadM64 (ExecutionState& state, uint64_t offset, uint64_t addend, T* out) const
109+ {
110+ checkAccessM64 (state, offset, sizeof (T), addend);
111+
112+ memcpyEndianAware (out, m_buffer, sizeof (T), m_sizeInByte, 0 , static_cast <size_t >(offset + addend), sizeof (T));
113+ }
114+
101115 template <typename T>
102116 void load (ExecutionState& state, uint32_t offset, T* out) const
103117 {
@@ -109,14 +123,31 @@ class Memory : public Extern {
109123#endif
110124 }
111125
126+ template <typename T>
127+ void loadM64 (ExecutionState& state, uint64_t offset, T* out) const
128+ {
129+ checkAccessM64 (state, offset, sizeof (T));
130+ #if defined(WALRUS_BIG_ENDIAN)
131+ *out = *(reinterpret_cast <T*>(&m_buffer[m_sizeInByte - sizeof (T) - offset]));
132+ #else
133+ *out = *(reinterpret_cast <T*>(&m_buffer[offset]));
134+ #endif
135+ }
136+
112137#ifdef CPU_ARM32
113138
114- #define defineUnalignedLoad (TYPE ) \
115- template <typename T = TYPE> \
116- void load (ExecutionState& state, uint32_t offset, TYPE* out) const \
117- { \
118- checkAccess (state, offset, sizeof (TYPE)); \
119- memcpyEndianAware (out, m_buffer, sizeof (TYPE), m_sizeInByte, 0 , offset, sizeof (TYPE)); \
139+ #define defineUnalignedLoad (TYPE ) \
140+ template <typename T = TYPE> \
141+ void load (ExecutionState& state, uint32_t offset, TYPE* out) const \
142+ { \
143+ checkAccess (state, offset, sizeof (TYPE)); \
144+ memcpyEndianAware (out, m_buffer, sizeof (TYPE), m_sizeInByte, 0 , offset, sizeof (TYPE)); \
145+ } \
146+ template <typename T = TYPE> \
147+ void loadM64 (ExecutionState& state, uint64_t offset, TYPE* out) const \
148+ { \
149+ checkAccessM64 (state, offset, sizeof (TYPE)); \
150+ memcpyEndianAware (out, m_buffer, sizeof (TYPE), m_sizeInByte, 0 , static_cast <size_t >(offset), sizeof (TYPE)); \
120151 }
121152 defineUnalignedLoad (uint64_t );
122153 defineUnalignedLoad (int64_t );
@@ -133,6 +164,14 @@ class Memory : public Extern {
133164 memcpyEndianAware (m_buffer, &val, m_sizeInByte, sizeof (T), offset + addend, 0 , sizeof (T));
134165 }
135166
167+ template <typename T>
168+ void storeM64 (ExecutionState& state, uint64_t offset, uint64_t addend, const T& val) const
169+ {
170+ checkAccessM64 (state, offset, sizeof (T), addend);
171+
172+ memcpyEndianAware (m_buffer, &val, m_sizeInByte, sizeof (T), static_cast <size_t >(offset + addend), 0 , sizeof (T));
173+ }
174+
136175 template <typename T>
137176 void store (ExecutionState& state, uint32_t offset, const T& val) const
138177 {
@@ -144,6 +183,17 @@ class Memory : public Extern {
144183#endif
145184 }
146185
186+ template <typename T>
187+ void storeM64 (ExecutionState& state, uint64_t offset, const T& val) const
188+ {
189+ checkAccessM64 (state, offset, sizeof (T));
190+ #if defined(WALRUS_BIG_ENDIAN)
191+ *(reinterpret_cast <T*>(&m_buffer[m_sizeInByte - sizeof (T) - offset])) = val;
192+ #else
193+ *(reinterpret_cast <T*>(&m_buffer[offset])) = val;
194+ #endif
195+ }
196+
147197 enum AtomicRmwOp {
148198 Add,
149199 Sub,
@@ -161,6 +211,14 @@ class Memory : public Extern {
161211 *out = shared->load (std::memory_order_relaxed);
162212 }
163213
214+ template <typename T>
215+ void atomicLoadM64 (ExecutionState& state, uint64_t offset, uint64_t addend, T* out) const
216+ {
217+ checkAtomicAccessM64 (state, offset, sizeof (T), addend);
218+ std::atomic<T>* shared = reinterpret_cast <std::atomic<T>*>(m_buffer + (offset + addend));
219+ *out = shared->load (std::memory_order_relaxed);
220+ }
221+
164222 template <typename T>
165223 void atomicStore (ExecutionState& state, uint32_t offset, uint32_t addend, const T& val) const
166224 {
@@ -169,31 +227,28 @@ class Memory : public Extern {
169227 shared->store (val);
170228 }
171229
230+ template <typename T>
231+ void atomicStoreM64 (ExecutionState& state, uint64_t offset, uint64_t addend, const T& val) const
232+ {
233+ checkAtomicAccessM64 (state, offset, sizeof (T), addend);
234+ std::atomic<T>* shared = reinterpret_cast <std::atomic<T>*>(m_buffer + (offset + addend));
235+ shared->store (val);
236+ }
237+
172238 template <typename T>
173239 void atomicRmw (ExecutionState& state, uint32_t offset, uint32_t addend, const T& val, T* out, AtomicRmwOp operation) const
174240 {
175241 checkAtomicAccess (state, offset, sizeof (T), addend);
176242 std::atomic<T>* shared = reinterpret_cast <std::atomic<T>*>(m_buffer + (offset + addend));
177- switch (operation) {
178- case Add:
179- *out = shared->fetch_add (val);
180- break ;
181- case Sub:
182- *out = shared->fetch_sub (val);
183- break ;
184- case And:
185- *out = shared->fetch_and (val);
186- break ;
187- case Or:
188- *out = shared->fetch_or (val);
189- break ;
190- case Xor:
191- *out = shared->fetch_xor (val);
192- break ;
193- case Xchg:
194- *out = shared->exchange (val);
195- break ;
196- }
243+ doAtomicRmw (shared, val, out, operation);
244+ }
245+
246+ template <typename T>
247+ void atomicRmwM64 (ExecutionState& state, uint64_t offset, uint64_t addend, const T& val, T* out, AtomicRmwOp operation) const
248+ {
249+ checkAtomicAccessM64 (state, offset, sizeof (T), addend);
250+ std::atomic<T>* shared = reinterpret_cast <std::atomic<T>*>(m_buffer + (offset + addend));
251+ doAtomicRmw (shared, val, out, operation);
197252 }
198253
199254 template <typename T>
@@ -205,6 +260,15 @@ class Memory : public Extern {
205260 *out = expect;
206261 }
207262
263+ template <typename T>
264+ void atomicRmwCmpxchgM64 (ExecutionState& state, uint64_t offset, uint64_t addend, T expect, const T& replace, T* out) const
265+ {
266+ checkAtomicAccessM64 (state, offset, sizeof (T), addend);
267+ std::atomic<T>* shared = reinterpret_cast <std::atomic<T>*>(m_buffer + (offset + addend));
268+ shared->compare_exchange_weak (expect, replace);
269+ *out = expect;
270+ }
271+
208272 template <typename T>
209273 void atomicWait (ExecutionState& state, Store* store, uint32_t offset, uint32_t addend, const T& expect, int64_t timeOut, uint32_t * out) const
210274 {
@@ -216,6 +280,17 @@ class Memory : public Extern {
216280 atomicWait (state, store, m_buffer + (offset + addend), expect, timeOut, out);
217281 }
218282
283+ template <typename T>
284+ void atomicWaitM64 (ExecutionState& state, Store* store, uint64_t offset, uint64_t addend, const T& expect, int64_t timeOut, uint32_t * out) const
285+ {
286+ checkAtomicAccessM64 (state, offset, sizeof (T), addend);
287+ if (UNLIKELY (!m_isShared)) {
288+ throwUnsharedMemoryException (state);
289+ }
290+
291+ atomicWait (state, store, m_buffer + (offset + addend), expect, timeOut, out);
292+ }
293+
219294 template <typename T>
220295 void atomicWait (ExecutionState& state, Store* store, uint8_t * absoluteAddress, const T& expect, int64_t timeOut, uint32_t * out) const
221296 {
@@ -270,6 +345,17 @@ class Memory : public Extern {
270345 atomicNotify (store, m_buffer + (offset + addend), count, out);
271346 }
272347
348+ void atomicNotifyM64 (ExecutionState& state, Store* store, uint64_t offset, uint64_t addend, const uint32_t & count, uint32_t * out) const
349+ {
350+ checkAtomicAccessM64 (state, offset, 4 , addend);
351+ if (UNLIKELY (!m_isShared)) {
352+ *out = 0 ;
353+ return ;
354+ }
355+
356+ atomicNotify (store, m_buffer + (offset + addend), count, out);
357+ }
358+
273359 void atomicNotify (Store* store, uint8_t * absoluteAddress, const uint32_t & count, uint32_t * out) const
274360 {
275361 Waiter* waiter = store->getWaiter (static_cast <void *>(absoluteAddress));
@@ -287,12 +373,18 @@ class Memory : public Extern {
287373
288374#ifdef CPU_ARM32
289375
290- #define defineUnalignedStore (TYPE ) \
291- template <typename T = TYPE> \
292- void store (ExecutionState& state, uint32_t offset, const TYPE& val) const \
293- { \
294- checkAccess (state, offset, sizeof (TYPE)); \
295- memcpyEndianAware (m_buffer, &val, m_sizeInByte, sizeof (TYPE), offset, 0 , sizeof (TYPE)); \
376+ #define defineUnalignedStore (TYPE ) \
377+ template <typename T = TYPE> \
378+ void store (ExecutionState& state, uint32_t offset, const TYPE& val) const \
379+ { \
380+ checkAccess (state, offset, sizeof (TYPE)); \
381+ memcpyEndianAware (m_buffer, &val, m_sizeInByte, sizeof (TYPE), offset, 0 , sizeof (TYPE)); \
382+ } \
383+ template <typename T = TYPE> \
384+ void storeM64 (ExecutionState& state, uint64_t offset, const TYPE& val) const \
385+ { \
386+ checkAccessM64 (state, offset, sizeof (TYPE)); \
387+ memcpyEndianAware (m_buffer, &val, m_sizeInByte, sizeof (TYPE), static_cast <size_t >(offset), 0 , sizeof (TYPE)); \
296388 }
297389 defineUnalignedStore (uint64_t );
298390 defineUnalignedStore (int64_t );
@@ -307,20 +399,31 @@ class Memory : public Extern {
307399
308400 inline bool checkAccess (uint32_t offset, uint32_t size, uint32_t addend = 0 , Memory* dstMem = nullptr ) const
309401 {
402+ ASSERT (!is64 ());
310403 if (dstMem == nullptr ) {
311404 return !UNLIKELY (!((uint64_t )offset + (uint64_t )addend + (uint64_t )size <= m_sizeInByte));
312405 } else {
313406 return !UNLIKELY (!((uint64_t )offset + (uint64_t )addend + (uint64_t )size <= dstMem->m_sizeInByte ));
314407 }
315408 }
316409
410+ inline bool checkAccessM64 (uint64_t offset, uint64_t size, uint64_t addend = 0 , Memory* dstMem = nullptr ) const
411+ {
412+ ASSERT (is64 ());
413+ if (dstMem == nullptr ) {
414+ return !UNLIKELY (!(offset + addend + size <= m_sizeInByte));
415+ } else {
416+ return !UNLIKELY (!(offset + addend + size <= dstMem->m_sizeInByte ));
417+ }
418+ }
419+
317420 void initMemory (DataSegment* source, uint32_t dstStart, uint32_t srcStart, uint32_t srcSize);
318421 void copyMemory (uint32_t dstStart, uint32_t srcStart, uint32_t size);
319422 void copyMemory (Memory* dstMemory, uint32_t dstStart, uint32_t srcStart, uint32_t size);
320423 void fillMemory (uint32_t start, uint8_t value, uint32_t size);
321424
322425private:
323- Memory (uint64_t initialSizeInByte, uint64_t maximumSizeInByte, bool isShared);
426+ Memory (uint64_t initialSizeInByte, uint64_t maximumSizeInByte, bool isShared, bool is64 );
324427
325428 void throwRangeException (ExecutionState& state, uint32_t offset, uint32_t addend, uint32_t size) const ;
326429
@@ -331,7 +434,40 @@ class Memory : public Extern {
331434 }
332435 }
333436
437+ inline void checkAccessM64 (ExecutionState& state, uint64_t offset, uint64_t size, uint64_t addend = 0 , Memory* dstMem = nullptr ) const
438+ {
439+ if (!this ->checkAccessM64 (offset, size, addend, dstMem)) {
440+ throwRangeException (state, offset, addend, size);
441+ }
442+ }
443+
444+ template <typename T>
445+ void doAtomicRmw (std::atomic<T>* shared, const T& val, T* out, AtomicRmwOp operation) const
446+ {
447+ switch (operation) {
448+ case Add:
449+ *out = shared->fetch_add (val);
450+ break ;
451+ case Sub:
452+ *out = shared->fetch_sub (val);
453+ break ;
454+ case And:
455+ *out = shared->fetch_and (val);
456+ break ;
457+ case Or:
458+ *out = shared->fetch_or (val);
459+ break ;
460+ case Xor:
461+ *out = shared->fetch_xor (val);
462+ break ;
463+ case Xchg:
464+ *out = shared->exchange (val);
465+ break ;
466+ }
467+ }
468+
334469 void checkAtomicAccess (ExecutionState& state, uint32_t offset, uint32_t size, uint32_t addend = 0 ) const ;
470+ void checkAtomicAccessM64 (ExecutionState& state, uint64_t offset, uint64_t size, uint64_t addend = 0 ) const ;
335471 void throwUnsharedMemoryException (ExecutionState& state) const ;
336472
337473 uint64_t m_sizeInByte;
@@ -340,6 +476,7 @@ class Memory : public Extern {
340476 uint8_t * m_buffer;
341477 TargetBuffer* m_targetBuffers;
342478 bool m_isShared;
479+ bool m_is64;
343480};
344481
345482} // namespace Walrus
0 commit comments