Support s2 on aarch64 (#2568)
This commit is contained in:
2
thirdparty/download-thirdparty.sh
vendored
2
thirdparty/download-thirdparty.sh
vendored
@ -301,6 +301,8 @@ echo "Finished patching $BRPC_SOURCE"
|
||||
cd $TP_SOURCE_DIR/$S2_SOURCE
|
||||
if [ ! -f $PATCHED_MARK ]; then
|
||||
patch -p1 < $TP_PATCH_DIR/s2geometry-0.9.0.patch
|
||||
# replace uint64 with uint64_t to make compiler happy
|
||||
patch -p0 < $TP_PATCH_DIR/s2geometry-0.9.0-uint64.patch
|
||||
touch $PATCHED_MARK
|
||||
fi
|
||||
cd -
|
||||
|
||||
94
thirdparty/patches/s2geometry-0.9.0-uint64.patch
vendored
Normal file
94
thirdparty/patches/s2geometry-0.9.0-uint64.patch
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
--- src/s2/third_party/absl/base/internal/unaligned_access.h.orig 2019-12-25 11:13:05.290000000 +0800
|
||||
+++ src/s2/third_party/absl/base/internal/unaligned_access.h 2019-12-25 11:15:06.650000000 +0800
|
||||
@@ -80,7 +80,7 @@
|
||||
return __sanitizer_unaligned_load32(p);
|
||||
}
|
||||
|
||||
-inline uint64 UnalignedLoad64(const void *p) {
|
||||
+inline uint64_t UnalignedLoad64(const void *p) {
|
||||
return __sanitizer_unaligned_load64(p);
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
__sanitizer_unaligned_store32(p, v);
|
||||
}
|
||||
|
||||
-inline void UnalignedStore64(void *p, uint64 v) {
|
||||
+inline void UnalignedStore64(void *p, uint64_t v) {
|
||||
__sanitizer_unaligned_store64(p, v);
|
||||
}
|
||||
|
||||
@@ -130,8 +130,8 @@
|
||||
return t;
|
||||
}
|
||||
|
||||
-inline uint64 UnalignedLoad64(const void *p) {
|
||||
- uint64 t;
|
||||
+inline uint64_t UnalignedLoad64(const void *p) {
|
||||
+ uint64_t t;
|
||||
memcpy(&t, p, sizeof t);
|
||||
return t;
|
||||
}
|
||||
@@ -140,7 +140,7 @@
|
||||
|
||||
inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
|
||||
|
||||
-inline void UnalignedStore64(void *p, uint64 v) { memcpy(p, &v, sizeof v); }
|
||||
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
@@ -172,14 +172,14 @@
|
||||
#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
|
||||
(*reinterpret_cast<const uint32_t *>(_p))
|
||||
#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
|
||||
- (*reinterpret_cast<const uint64 *>(_p))
|
||||
+ (*reinterpret_cast<const uint64_t *>(_p))
|
||||
|
||||
#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
|
||||
(*reinterpret_cast<uint16_t *>(_p) = (_val))
|
||||
#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
|
||||
(*reinterpret_cast<uint32_t *>(_p) = (_val))
|
||||
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
|
||||
- (*reinterpret_cast<uint64 *>(_p) = (_val))
|
||||
+ (*reinterpret_cast<uint64_t *>(_p) = (_val))
|
||||
|
||||
#elif defined(__arm__) && \
|
||||
!defined(__ARM_ARCH_5__) && \
|
||||
@@ -246,13 +246,13 @@
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
-inline uint64 UnalignedLoad64(const void *p) {
|
||||
- uint64 t;
|
||||
+inline uint64_t UnalignedLoad64(const void *p) {
|
||||
+ uint64_t t;
|
||||
memcpy(&t, p, sizeof t);
|
||||
return t;
|
||||
}
|
||||
|
||||
-inline void UnalignedStore64(void *p, uint64 v) { memcpy(p, &v, sizeof v); }
|
||||
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
@@ -286,8 +286,8 @@
|
||||
return t;
|
||||
}
|
||||
|
||||
-inline uint64 UnalignedLoad64(const void *p) {
|
||||
- uint64 t;
|
||||
+inline uint64_t UnalignedLoad64(const void *p) {
|
||||
+ uint64_t t;
|
||||
memcpy(&t, p, sizeof t);
|
||||
return t;
|
||||
}
|
||||
@@ -296,7 +296,7 @@
|
||||
|
||||
inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
|
||||
|
||||
-inline void UnalignedStore64(void *p, uint64 v) { memcpy(p, &v, sizeof v); }
|
||||
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
Reference in New Issue
Block a user