Skip to content

Commit cd17b85

Browse files
committed
Import absl/base/internal/endian.h
1 parent 58ff30b commit cd17b85

File tree

3 files changed

+802
-0
lines changed

3 files changed

+802
-0
lines changed
Lines changed: 267 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,267 @@
1+
// Copyright 2017 The Abseil Authors.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
//
15+
16+
#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
17+
#define ABSL_BASE_INTERNAL_ENDIAN_H_
18+
19+
// The following guarantees declaration of the byte swap functions
20+
#ifdef _MSC_VER
21+
#include <stdlib.h> // NOLINT(build/include)
22+
#elif defined(__APPLE__)
23+
// Mac OS X / Darwin features
24+
#include <libkern/OSByteOrder.h>
25+
#elif defined(__GLIBC__)
26+
#include <byteswap.h> // IWYU pragma: export
27+
#endif
28+
29+
#include <cstdint>
30+
#include "absl/base/config.h"
31+
#include "absl/base/internal/unaligned_access.h"
32+
#include "absl/base/port.h"
33+
34+
namespace absl {
35+
36+
// Use compiler byte-swapping intrinsics if they are available. 32-bit
37+
// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
38+
// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
39+
// For simplicity, we enable them all only for GCC 4.8.0 or later.
40+
#if defined(__clang__) || \
41+
(defined(__GNUC__) && \
42+
((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
43+
inline uint64_t gbswap_64(uint64_t host_int) {
44+
return __builtin_bswap64(host_int);
45+
}
46+
inline uint32_t gbswap_32(uint32_t host_int) {
47+
return __builtin_bswap32(host_int);
48+
}
49+
inline uint16_t gbswap_16(uint16_t host_int) {
50+
return __builtin_bswap16(host_int);
51+
}
52+
53+
#elif defined(_MSC_VER)
54+
inline uint64_t gbswap_64(uint64_t host_int) {
55+
return _byteswap_uint64(host_int);
56+
}
57+
inline uint32_t gbswap_32(uint32_t host_int) {
58+
return _byteswap_ulong(host_int);
59+
}
60+
inline uint16_t gbswap_16(uint16_t host_int) {
61+
return _byteswap_ushort(host_int);
62+
}
63+
64+
#elif defined(__APPLE__)
65+
inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); }
66+
inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); }
67+
inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); }
68+
69+
#else
70+
inline uint64_t gbswap_64(uint64_t host_int) {
71+
#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
72+
// Adapted from /usr/include/byteswap.h. Not available on Mac.
73+
if (__builtin_constant_p(host_int)) {
74+
return __bswap_constant_64(host_int);
75+
} else {
76+
register uint64_t result;
77+
__asm__("bswap %0" : "=r"(result) : "0"(host_int));
78+
return result;
79+
}
80+
#elif defined(__GLIBC__)
81+
return bswap_64(host_int);
82+
#else
83+
return (((x & uint64_t{(0xFF}) << 56) |
84+
((x & uint64_t{(0xFF00}) << 40) |
85+
((x & uint64_t{(0xFF0000}) << 24) |
86+
((x & uint64_t{(0xFF000000}) << 8) |
87+
((x & uint64_t{(0xFF00000000}) >> 8) |
88+
((x & uint64_t{(0xFF0000000000}) >> 24) |
89+
((x & uint64_t{(0xFF000000000000}) >> 40) |
90+
((x & uint64_t{(0xFF00000000000000}) >> 56));
91+
#endif // bswap_64
92+
}
93+
94+
inline uint32_t gbswap_32(uint32_t host_int) {
95+
#if defined(__GLIBC__)
96+
return bswap_32(host_int);
97+
#else
98+
return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) |
99+
((x & 0xFF000000) >> 24));
100+
#endif
101+
}
102+
103+
inline uint16_t gbswap_16(uint16_t host_int) {
104+
#if defined(__GLIBC__)
105+
return bswap_16(host_int);
106+
#else
107+
return uint16_t{((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)};
108+
#endif
109+
}
110+
111+
#endif // intrinics available
112+
113+
#ifdef ABSL_IS_LITTLE_ENDIAN
114+
115+
// Definitions for ntohl etc. that don't require us to include
116+
// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
117+
// than just #defining them because in debug mode, gcc doesn't
118+
// correctly handle the (rather involved) definitions of bswap_32.
119+
// gcc guarantees that inline functions are as fast as macros, so
120+
// this isn't a performance hit.
121+
inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
122+
inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
123+
inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
124+
125+
#elif defined ABSL_IS_BIG_ENDIAN
126+
127+
// These definitions are simpler on big-endian machines
128+
// These are functions instead of macros to avoid self-assignment warnings
129+
// on calls such as "i = ghtnol(i);". This also provides type checking.
130+
inline uint16_t ghtons(uint16_t x) { return x; }
131+
inline uint32_t ghtonl(uint32_t x) { return x; }
132+
inline uint64_t ghtonll(uint64_t x) { return x; }
133+
134+
#else
135+
#error \
136+
"Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
137+
"ABSL_IS_LITTLE_ENDIAN must be defined"
138+
#endif // byte order
139+
140+
inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
141+
inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
142+
inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
143+
144+
// Utilities to convert numbers between the current hosts's native byte
145+
// order and little-endian byte order
146+
//
147+
// Load/Store methods are alignment safe
148+
namespace little_endian {
149+
// Conversion functions.
150+
#ifdef ABSL_IS_LITTLE_ENDIAN
151+
152+
inline uint16_t FromHost16(uint16_t x) { return x; }
153+
inline uint16_t ToHost16(uint16_t x) { return x; }
154+
155+
inline uint32_t FromHost32(uint32_t x) { return x; }
156+
inline uint32_t ToHost32(uint32_t x) { return x; }
157+
158+
inline uint64_t FromHost64(uint64_t x) { return x; }
159+
inline uint64_t ToHost64(uint64_t x) { return x; }
160+
161+
inline constexpr bool IsLittleEndian() { return true; }
162+
163+
#elif defined ABSL_IS_BIG_ENDIAN
164+
165+
inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
166+
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
167+
168+
inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
169+
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
170+
171+
inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
172+
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
173+
174+
inline constexpr bool IsLittleEndian() { return false; }
175+
176+
#endif /* ENDIAN */
177+
178+
// Functions to do unaligned loads and stores in little-endian order.
179+
inline uint16_t Load16(const void *p) {
180+
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
181+
}
182+
183+
inline void Store16(void *p, uint16_t v) {
184+
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
185+
}
186+
187+
inline uint32_t Load32(const void *p) {
188+
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
189+
}
190+
191+
inline void Store32(void *p, uint32_t v) {
192+
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
193+
}
194+
195+
inline uint64_t Load64(const void *p) {
196+
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
197+
}
198+
199+
inline void Store64(void *p, uint64_t v) {
200+
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
201+
}
202+
203+
} // namespace little_endian
204+
205+
// Utilities to convert numbers between the current hosts's native byte
206+
// order and big-endian byte order (same as network byte order)
207+
//
208+
// Load/Store methods are alignment safe
209+
namespace big_endian {
210+
#ifdef ABSL_IS_LITTLE_ENDIAN
211+
212+
inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
213+
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
214+
215+
inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
216+
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
217+
218+
inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
219+
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
220+
221+
inline constexpr bool IsLittleEndian() { return true; }
222+
223+
#elif defined ABSL_IS_BIG_ENDIAN
224+
225+
inline uint16_t FromHost16(uint16_t x) { return x; }
226+
inline uint16_t ToHost16(uint16_t x) { return x; }
227+
228+
inline uint32_t FromHost32(uint32_t x) { return x; }
229+
inline uint32_t ToHost32(uint32_t x) { return x; }
230+
231+
inline uint64_t FromHost64(uint64_t x) { return x; }
232+
inline uint64_t ToHost64(uint64_t x) { return x; }
233+
234+
inline constexpr bool IsLittleEndian() { return false; }
235+
236+
#endif /* ENDIAN */
237+
238+
// Functions to do unaligned loads and stores in big-endian order.
239+
inline uint16_t Load16(const void *p) {
240+
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
241+
}
242+
243+
inline void Store16(void *p, uint16_t v) {
244+
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
245+
}
246+
247+
inline uint32_t Load32(const void *p) {
248+
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
249+
}
250+
251+
inline void Store32(void *p, uint32_t v) {
252+
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
253+
}
254+
255+
inline uint64_t Load64(const void *p) {
256+
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
257+
}
258+
259+
inline void Store64(void *p, uint64_t v) {
260+
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
261+
}
262+
263+
} // namespace big_endian
264+
265+
} // namespace absl
266+
267+
#endif // ABSL_BASE_INTERNAL_ENDIAN_H_

0 commit comments

Comments
 (0)