1 | /* |
2 | * Copyright (C) 2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | |
27 | #pragma once |
28 | |
29 | #include "Instruction.h" |
30 | #include <wtf/Vector.h> |
31 | |
32 | namespace JSC { |
33 | |
34 | struct Instruction; |
35 | |
36 | class InstructionStream { |
37 | WTF_MAKE_FAST_ALLOCATED; |
38 | |
39 | using InstructionBuffer = Vector<uint8_t, 0, UnsafeVectorOverflow>; |
40 | |
41 | friend class InstructionStreamWriter; |
42 | friend class CachedInstructionStream; |
43 | public: |
44 | size_t sizeInBytes() const; |
45 | |
46 | using Offset = unsigned; |
47 | |
48 | private: |
49 | template<class InstructionBuffer> |
50 | class BaseRef { |
51 | WTF_MAKE_FAST_ALLOCATED; |
52 | |
53 | friend class InstructionStream; |
54 | |
55 | public: |
56 | BaseRef(const BaseRef<InstructionBuffer>& other) |
57 | : m_instructions(other.m_instructions) |
58 | , m_index(other.m_index) |
59 | { } |
60 | |
61 | void operator=(const BaseRef<InstructionBuffer>& other) |
62 | { |
63 | m_instructions = other.m_instructions; |
64 | m_index = other.m_index; |
65 | } |
66 | |
67 | inline const Instruction* operator->() const { return unwrap(); } |
68 | inline const Instruction* ptr() const { return unwrap(); } |
69 | |
70 | bool operator!=(const BaseRef<InstructionBuffer>& other) const |
71 | { |
72 | return &m_instructions != &other.m_instructions || m_index != other.m_index; |
73 | } |
74 | |
75 | BaseRef next() const |
76 | { |
77 | return BaseRef { m_instructions, m_index + ptr()->size() }; |
78 | } |
79 | |
80 | inline Offset offset() const |
81 | { |
82 | return m_index; |
83 | } |
84 | |
85 | bool isValid() const |
86 | { |
87 | return m_index < m_instructions.size(); |
88 | } |
89 | |
90 | private: |
91 | inline const Instruction* unwrap() const { return reinterpret_cast<const Instruction*>(&m_instructions[m_index]); } |
92 | |
93 | protected: |
94 | BaseRef(InstructionBuffer& instructions, size_t index) |
95 | : m_instructions(instructions) |
96 | , m_index(index) |
97 | { } |
98 | |
99 | InstructionBuffer& m_instructions; |
100 | Offset m_index; |
101 | }; |
102 | |
103 | public: |
104 | using Ref = BaseRef<const InstructionBuffer>; |
105 | |
106 | class MutableRef : public BaseRef<InstructionBuffer> { |
107 | friend class InstructionStreamWriter; |
108 | |
109 | protected: |
110 | using BaseRef<InstructionBuffer>::BaseRef; |
111 | |
112 | public: |
113 | Ref freeze() const { return Ref { m_instructions, m_index }; } |
114 | inline Instruction* operator->() { return unwrap(); } |
115 | inline Instruction* ptr() { return unwrap(); } |
116 | inline operator Ref() |
117 | { |
118 | return Ref { m_instructions, m_index }; |
119 | } |
120 | |
121 | private: |
122 | inline Instruction* unwrap() { return reinterpret_cast<Instruction*>(&m_instructions[m_index]); } |
123 | }; |
124 | |
125 | private: |
126 | class iterator : public Ref { |
127 | friend class InstructionStream; |
128 | |
129 | public: |
130 | using Ref::Ref; |
131 | |
132 | Ref& operator*() |
133 | { |
134 | return *this; |
135 | } |
136 | |
137 | iterator operator++() |
138 | { |
139 | m_index += ptr()->size(); |
140 | return *this; |
141 | } |
142 | }; |
143 | |
144 | public: |
145 | inline iterator begin() const |
146 | { |
147 | return iterator { m_instructions, 0 }; |
148 | } |
149 | |
150 | inline iterator end() const |
151 | { |
152 | return iterator { m_instructions, m_instructions.size() }; |
153 | } |
154 | |
155 | inline const Ref at(Offset offset) const |
156 | { |
157 | ASSERT(offset < m_instructions.size()); |
158 | return Ref { m_instructions, offset }; |
159 | } |
160 | |
161 | inline size_t size() const |
162 | { |
163 | return m_instructions.size(); |
164 | } |
165 | |
166 | const void* rawPointer() const |
167 | { |
168 | return m_instructions.data(); |
169 | } |
170 | |
171 | bool contains(Instruction *) const; |
172 | |
173 | protected: |
174 | explicit InstructionStream(InstructionBuffer&&); |
175 | |
176 | InstructionBuffer m_instructions; |
177 | }; |
178 | |
179 | class InstructionStreamWriter : public InstructionStream { |
180 | friend class BytecodeRewriter; |
181 | public: |
182 | InstructionStreamWriter() |
183 | : InstructionStream({ }) |
184 | { } |
185 | |
186 | inline MutableRef ref(Offset offset) |
187 | { |
188 | ASSERT(offset < m_instructions.size()); |
189 | return MutableRef { m_instructions, offset }; |
190 | } |
191 | |
192 | void seek(unsigned position) |
193 | { |
194 | ASSERT(position <= m_instructions.size()); |
195 | m_position = position; |
196 | } |
197 | |
198 | unsigned position() |
199 | { |
200 | return m_position; |
201 | } |
202 | |
203 | void write(uint8_t byte) |
204 | { |
205 | ASSERT(!m_finalized); |
206 | if (m_position < m_instructions.size()) |
207 | m_instructions[m_position++] = byte; |
208 | else { |
209 | m_instructions.append(byte); |
210 | m_position++; |
211 | } |
212 | } |
213 | |
214 | void write(uint16_t h) |
215 | { |
216 | ASSERT(!m_finalized); |
217 | uint8_t bytes[2]; |
218 | std::memcpy(bytes, &h, sizeof(h)); |
219 | |
220 | // Though not always obvious, we don't have to invert the order of the |
221 | // bytes written here for CPU(BIG_ENDIAN). This is because the incoming |
222 | // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms. |
223 | write(bytes[0]); |
224 | write(bytes[1]); |
225 | } |
226 | |
227 | void write(uint32_t i) |
228 | { |
229 | ASSERT(!m_finalized); |
230 | uint8_t bytes[4]; |
231 | std::memcpy(bytes, &i, sizeof(i)); |
232 | |
233 | // Though not always obvious, we don't have to invert the order of the |
234 | // bytes written here for CPU(BIG_ENDIAN). This is because the incoming |
235 | // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms. |
236 | write(bytes[0]); |
237 | write(bytes[1]); |
238 | write(bytes[2]); |
239 | write(bytes[3]); |
240 | } |
241 | |
242 | void rewind(MutableRef& ref) |
243 | { |
244 | ASSERT(ref.offset() < m_instructions.size()); |
245 | m_instructions.shrink(ref.offset()); |
246 | m_position = ref.offset(); |
247 | } |
248 | |
249 | std::unique_ptr<InstructionStream> finalize() |
250 | { |
251 | m_finalized = true; |
252 | m_instructions.shrinkToFit(); |
253 | return std::unique_ptr<InstructionStream> { new InstructionStream(WTFMove(m_instructions)) }; |
254 | } |
255 | |
256 | MutableRef ref() |
257 | { |
258 | return MutableRef { m_instructions, m_position }; |
259 | } |
260 | |
261 | void swap(InstructionStreamWriter& other) |
262 | { |
263 | std::swap(m_finalized, other.m_finalized); |
264 | std::swap(m_position, other.m_position); |
265 | m_instructions.swap(other.m_instructions); |
266 | } |
267 | |
268 | private: |
269 | class iterator : public MutableRef { |
270 | friend class InstructionStreamWriter; |
271 | |
272 | protected: |
273 | using MutableRef::MutableRef; |
274 | |
275 | public: |
276 | MutableRef& operator*() |
277 | { |
278 | return *this; |
279 | } |
280 | |
281 | iterator operator++() |
282 | { |
283 | m_index += ptr()->size(); |
284 | return *this; |
285 | } |
286 | }; |
287 | |
288 | public: |
289 | iterator begin() |
290 | { |
291 | return iterator { m_instructions, 0 }; |
292 | } |
293 | |
294 | iterator end() |
295 | { |
296 | return iterator { m_instructions, m_instructions.size() }; |
297 | } |
298 | |
299 | private: |
300 | unsigned m_position { 0 }; |
301 | bool m_finalized { false }; |
302 | }; |
303 | |
304 | |
305 | } // namespace JSC |
306 | |