1 | /* |
2 | * Copyright (C) 2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | |
27 | #pragma once |
28 | |
29 | #include "BytecodeIndex.h" |
30 | #include "Instruction.h" |
31 | #include <wtf/Vector.h> |
32 | |
33 | namespace JSC { |
34 | |
35 | class InstructionStream { |
36 | WTF_MAKE_FAST_ALLOCATED; |
37 | |
38 | using InstructionBuffer = Vector<uint8_t, 0, UnsafeVectorOverflow>; |
39 | |
40 | friend class InstructionStreamWriter; |
41 | friend class CachedInstructionStream; |
42 | public: |
43 | size_t sizeInBytes() const; |
44 | |
45 | using Offset = unsigned; |
46 | |
47 | private: |
48 | template<class InstructionBuffer> |
49 | class BaseRef { |
50 | WTF_MAKE_FAST_ALLOCATED; |
51 | |
52 | friend class InstructionStream; |
53 | |
54 | public: |
55 | BaseRef(const BaseRef<InstructionBuffer>& other) |
56 | : m_instructions(other.m_instructions) |
57 | , m_index(other.m_index) |
58 | { } |
59 | |
60 | void operator=(const BaseRef<InstructionBuffer>& other) |
61 | { |
62 | m_instructions = other.m_instructions; |
63 | m_index = other.m_index; |
64 | } |
65 | |
66 | inline const Instruction* operator->() const { return unwrap(); } |
67 | inline const Instruction* ptr() const { return unwrap(); } |
68 | |
69 | bool operator!=(const BaseRef<InstructionBuffer>& other) const |
70 | { |
71 | return &m_instructions != &other.m_instructions || m_index != other.m_index; |
72 | } |
73 | |
74 | BaseRef next() const |
75 | { |
76 | return BaseRef { m_instructions, m_index + ptr()->size() }; |
77 | } |
78 | |
79 | inline Offset offset() const { return m_index; } |
80 | inline BytecodeIndex index() const { return BytecodeIndex(offset()); } |
81 | |
82 | bool isValid() const |
83 | { |
84 | return m_index < m_instructions.size(); |
85 | } |
86 | |
87 | private: |
88 | inline const Instruction* unwrap() const { return reinterpret_cast<const Instruction*>(&m_instructions[m_index]); } |
89 | |
90 | protected: |
91 | BaseRef(InstructionBuffer& instructions, size_t index) |
92 | : m_instructions(instructions) |
93 | , m_index(index) |
94 | { } |
95 | |
96 | InstructionBuffer& m_instructions; |
97 | Offset m_index; |
98 | }; |
99 | |
100 | public: |
101 | using Ref = BaseRef<const InstructionBuffer>; |
102 | |
103 | class MutableRef : public BaseRef<InstructionBuffer> { |
104 | friend class InstructionStreamWriter; |
105 | |
106 | protected: |
107 | using BaseRef<InstructionBuffer>::BaseRef; |
108 | |
109 | public: |
110 | Ref freeze() const { return Ref { m_instructions, m_index }; } |
111 | inline Instruction* operator->() { return unwrap(); } |
112 | inline Instruction* ptr() { return unwrap(); } |
113 | inline operator Ref() |
114 | { |
115 | return Ref { m_instructions, m_index }; |
116 | } |
117 | |
118 | private: |
119 | inline Instruction* unwrap() { return reinterpret_cast<Instruction*>(&m_instructions[m_index]); } |
120 | }; |
121 | |
122 | private: |
123 | class iterator : public Ref { |
124 | friend class InstructionStream; |
125 | |
126 | public: |
127 | using Ref::Ref; |
128 | |
129 | Ref& operator*() |
130 | { |
131 | return *this; |
132 | } |
133 | |
134 | iterator& operator+=(size_t size) |
135 | { |
136 | m_index += size; |
137 | return *this; |
138 | } |
139 | |
140 | iterator& operator++() |
141 | { |
142 | return *this += ptr()->size(); |
143 | } |
144 | }; |
145 | |
146 | public: |
147 | inline iterator begin() const |
148 | { |
149 | return iterator { m_instructions, 0 }; |
150 | } |
151 | |
152 | inline iterator end() const |
153 | { |
154 | return iterator { m_instructions, m_instructions.size() }; |
155 | } |
156 | |
157 | inline const Ref at(BytecodeIndex index) const { return at(index.offset()); } |
158 | inline const Ref at(Offset offset) const |
159 | { |
160 | ASSERT(offset < m_instructions.size()); |
161 | return Ref { m_instructions, offset }; |
162 | } |
163 | |
164 | inline size_t size() const |
165 | { |
166 | return m_instructions.size(); |
167 | } |
168 | |
169 | const void* rawPointer() const |
170 | { |
171 | return m_instructions.data(); |
172 | } |
173 | |
174 | bool contains(Instruction *) const; |
175 | |
176 | protected: |
177 | explicit InstructionStream(InstructionBuffer&&); |
178 | |
179 | InstructionBuffer m_instructions; |
180 | }; |
181 | |
182 | class InstructionStreamWriter : public InstructionStream { |
183 | friend class BytecodeRewriter; |
184 | public: |
185 | InstructionStreamWriter() |
186 | : InstructionStream({ }) |
187 | { } |
188 | |
189 | inline MutableRef ref(Offset offset) |
190 | { |
191 | ASSERT(offset < m_instructions.size()); |
192 | return MutableRef { m_instructions, offset }; |
193 | } |
194 | |
195 | void seek(unsigned position) |
196 | { |
197 | ASSERT(position <= m_instructions.size()); |
198 | m_position = position; |
199 | } |
200 | |
201 | unsigned position() |
202 | { |
203 | return m_position; |
204 | } |
205 | |
206 | void write(uint8_t byte) |
207 | { |
208 | ASSERT(!m_finalized); |
209 | if (m_position < m_instructions.size()) |
210 | m_instructions[m_position++] = byte; |
211 | else { |
212 | m_instructions.append(byte); |
213 | m_position++; |
214 | } |
215 | } |
216 | |
217 | void write(uint16_t h) |
218 | { |
219 | ASSERT(!m_finalized); |
220 | uint8_t bytes[2]; |
221 | std::memcpy(bytes, &h, sizeof(h)); |
222 | |
223 | // Though not always obvious, we don't have to invert the order of the |
224 | // bytes written here for CPU(BIG_ENDIAN). This is because the incoming |
225 | // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms. |
226 | write(bytes[0]); |
227 | write(bytes[1]); |
228 | } |
229 | |
230 | void write(uint32_t i) |
231 | { |
232 | ASSERT(!m_finalized); |
233 | uint8_t bytes[4]; |
234 | std::memcpy(bytes, &i, sizeof(i)); |
235 | |
236 | // Though not always obvious, we don't have to invert the order of the |
237 | // bytes written here for CPU(BIG_ENDIAN). This is because the incoming |
238 | // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms. |
239 | write(bytes[0]); |
240 | write(bytes[1]); |
241 | write(bytes[2]); |
242 | write(bytes[3]); |
243 | } |
244 | |
245 | void rewind(MutableRef& ref) |
246 | { |
247 | ASSERT(ref.offset() < m_instructions.size()); |
248 | m_instructions.shrink(ref.offset()); |
249 | m_position = ref.offset(); |
250 | } |
251 | |
252 | std::unique_ptr<InstructionStream> finalize() |
253 | { |
254 | m_finalized = true; |
255 | m_instructions.shrinkToFit(); |
256 | return std::unique_ptr<InstructionStream> { new InstructionStream(WTFMove(m_instructions)) }; |
257 | } |
258 | |
259 | MutableRef ref() |
260 | { |
261 | return MutableRef { m_instructions, m_position }; |
262 | } |
263 | |
264 | void swap(InstructionStreamWriter& other) |
265 | { |
266 | std::swap(m_finalized, other.m_finalized); |
267 | std::swap(m_position, other.m_position); |
268 | m_instructions.swap(other.m_instructions); |
269 | } |
270 | |
271 | private: |
272 | class iterator : public MutableRef { |
273 | friend class InstructionStreamWriter; |
274 | |
275 | protected: |
276 | using MutableRef::MutableRef; |
277 | |
278 | public: |
279 | MutableRef& operator*() |
280 | { |
281 | return *this; |
282 | } |
283 | |
284 | iterator& operator+=(size_t size) |
285 | { |
286 | m_index += size; |
287 | return *this; |
288 | } |
289 | |
290 | iterator& operator++() |
291 | { |
292 | return *this += ptr()->size(); |
293 | } |
294 | }; |
295 | |
296 | public: |
297 | iterator begin() |
298 | { |
299 | return iterator { m_instructions, 0 }; |
300 | } |
301 | |
302 | iterator end() |
303 | { |
304 | return iterator { m_instructions, m_instructions.size() }; |
305 | } |
306 | |
307 | private: |
308 | unsigned m_position { 0 }; |
309 | bool m_finalized { false }; |
310 | }; |
311 | |
312 | |
313 | } // namespace JSC |
314 | |