src/third-party/lzma/CpuArch.h (view raw)
1/* CpuArch.h -- CPU specific code
22015-12-01: Igor Pavlov : Public domain */
3
4#ifndef __CPU_ARCH_H
5#define __CPU_ARCH_H
6
7#include "7zTypes.h"
8
9EXTERN_C_BEGIN
10
11/*
12MY_CPU_LE means that CPU is LITTLE ENDIAN.
13MY_CPU_BE means that CPU is BIG ENDIAN.
14If MY_CPU_LE and MY_CPU_BE are not defined, we don't know about ENDIANNESS of platform.
15
16MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses.
17*/
18
19#if defined(_M_X64) \
20 || defined(_M_AMD64) \
21 || defined(__x86_64__) \
22 || defined(__AMD64__) \
23 || defined(__amd64__)
24 #define MY_CPU_AMD64
25#endif
26
27#if defined(MY_CPU_AMD64) \
28 || defined(_M_IA64) \
29 || defined(__AARCH64EL__) \
30 || defined(__AARCH64EB__)
31 #define MY_CPU_64BIT
32#endif
33
34#if defined(_M_IX86) || defined(__i386__)
35#define MY_CPU_X86
36#endif
37
38#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)
39#define MY_CPU_X86_OR_AMD64
40#endif
41
42#if defined(MY_CPU_X86) \
43 || defined(_M_ARM) \
44 || defined(__ARMEL__) \
45 || defined(__THUMBEL__) \
46 || defined(__ARMEB__) \
47 || defined(__THUMBEB__)
48 #define MY_CPU_32BIT
49#endif
50
51#if defined(_WIN32) && defined(_M_ARM)
52#define MY_CPU_ARM_LE
53#endif
54
55#if defined(_WIN32) && defined(_M_IA64)
56#define MY_CPU_IA64_LE
57#endif
58
59#if defined(MY_CPU_X86_OR_AMD64) \
60 || defined(MY_CPU_ARM_LE) \
61 || defined(MY_CPU_IA64_LE) \
62 || defined(__LITTLE_ENDIAN__) \
63 || defined(__ARMEL__) \
64 || defined(__THUMBEL__) \
65 || defined(__AARCH64EL__) \
66 || defined(__MIPSEL__) \
67 || defined(__MIPSEL) \
68 || defined(_MIPSEL) \
69 || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
70 #define MY_CPU_LE
71#endif
72
73#if defined(__BIG_ENDIAN__) \
74 || defined(__ARMEB__) \
75 || defined(__THUMBEB__) \
76 || defined(__AARCH64EB__) \
77 || defined(__MIPSEB__) \
78 || defined(__MIPSEB) \
79 || defined(_MIPSEB) \
80 || defined(__m68k__) \
81 || defined(__s390__) \
82 || defined(__s390x__) \
83 || defined(__zarch__) \
84 || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
85 #define MY_CPU_BE
86#endif
87
88#if defined(MY_CPU_LE) && defined(MY_CPU_BE)
89Stop_Compiling_Bad_Endian
90#endif
91
92
93#ifdef MY_CPU_LE
94 #if defined(MY_CPU_X86_OR_AMD64) \
95 /* || defined(__AARCH64EL__) */
96 #define MY_CPU_LE_UNALIGN
97 #endif
98#endif
99
100
101#ifdef MY_CPU_LE_UNALIGN
102
103#define GetUi16(p) (*(const UInt16 *)(const void *)(p))
104#define GetUi32(p) (*(const UInt32 *)(const void *)(p))
105#define GetUi64(p) (*(const UInt64 *)(const void *)(p))
106
107#define SetUi16(p, v) { *(UInt16 *)(p) = (v); }
108#define SetUi32(p, v) { *(UInt32 *)(p) = (v); }
109#define SetUi64(p, v) { *(UInt64 *)(p) = (v); }
110
111#else
112
113#define GetUi16(p) ( (UInt16) ( \
114 ((const Byte *)(p))[0] | \
115 ((UInt16)((const Byte *)(p))[1] << 8) ))
116
117#define GetUi32(p) ( \
118 ((const Byte *)(p))[0] | \
119 ((UInt32)((const Byte *)(p))[1] << 8) | \
120 ((UInt32)((const Byte *)(p))[2] << 16) | \
121 ((UInt32)((const Byte *)(p))[3] << 24))
122
123#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
124
125#define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
126 _ppp_[0] = (Byte)_vvv_; \
127 _ppp_[1] = (Byte)(_vvv_ >> 8); }
128
129#define SetUi32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
130 _ppp_[0] = (Byte)_vvv_; \
131 _ppp_[1] = (Byte)(_vvv_ >> 8); \
132 _ppp_[2] = (Byte)(_vvv_ >> 16); \
133 _ppp_[3] = (Byte)(_vvv_ >> 24); }
134
135#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \
136 SetUi32(_ppp2_ , (UInt32)_vvv2_); \
137 SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)); }
138
139#endif
140
141
142#if defined(MY_CPU_LE_UNALIGN) && /* defined(_WIN64) && */ (_MSC_VER >= 1300)
143
144/* Note: we use bswap instruction, that is unsupported in 386 cpu */
145
146#include <stdlib.h>
147
148#pragma intrinsic(_byteswap_ulong)
149#pragma intrinsic(_byteswap_uint64)
150#define GetBe32(p) _byteswap_ulong(*(const UInt32 *)(const Byte *)(p))
151#define GetBe64(p) _byteswap_uint64(*(const UInt64 *)(const Byte *)(p))
152
153#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = _byteswap_ulong(v)
154
155#elif defined(MY_CPU_LE_UNALIGN) && defined (__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
156
157#define GetBe32(p) __builtin_bswap32(*(const UInt32 *)(const Byte *)(p))
158#define GetBe64(p) __builtin_bswap64(*(const UInt64 *)(const Byte *)(p))
159
160#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = __builtin_bswap32(v)
161
162#else
163
164#define GetBe32(p) ( \
165 ((UInt32)((const Byte *)(p))[0] << 24) | \
166 ((UInt32)((const Byte *)(p))[1] << 16) | \
167 ((UInt32)((const Byte *)(p))[2] << 8) | \
168 ((const Byte *)(p))[3] )
169
170#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
171
172#define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
173 _ppp_[0] = (Byte)(_vvv_ >> 24); \
174 _ppp_[1] = (Byte)(_vvv_ >> 16); \
175 _ppp_[2] = (Byte)(_vvv_ >> 8); \
176 _ppp_[3] = (Byte)_vvv_; }
177
178#endif
179
180
181#define GetBe16(p) ( (UInt16) ( \
182 ((UInt16)((const Byte *)(p))[0] << 8) | \
183 ((const Byte *)(p))[1] ))
184
185
186
187#ifdef MY_CPU_X86_OR_AMD64
188
189typedef struct
190{
191 UInt32 maxFunc;
192 UInt32 vendor[3];
193 UInt32 ver;
194 UInt32 b;
195 UInt32 c;
196 UInt32 d;
197} Cx86cpuid;
198
199enum
200{
201 CPU_FIRM_INTEL,
202 CPU_FIRM_AMD,
203 CPU_FIRM_VIA
204};
205
206void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d);
207
208Bool x86cpuid_CheckAndRead(Cx86cpuid *p);
209int x86cpuid_GetFirm(const Cx86cpuid *p);
210
211#define x86cpuid_GetFamily(ver) (((ver >> 16) & 0xFF0) | ((ver >> 8) & 0xF))
212#define x86cpuid_GetModel(ver) (((ver >> 12) & 0xF0) | ((ver >> 4) & 0xF))
213#define x86cpuid_GetStepping(ver) (ver & 0xF)
214
215Bool CPU_Is_InOrder();
216Bool CPU_Is_Aes_Supported();
217
218#endif
219
220EXTERN_C_END
221
222#endif