diff --git a/thirdparty/download-thirdparty.sh b/thirdparty/download-thirdparty.sh index 7c1d99d0aa..9dad79e35b 100755 --- a/thirdparty/download-thirdparty.sh +++ b/thirdparty/download-thirdparty.sh @@ -301,6 +301,8 @@ echo "Finished patching $BRPC_SOURCE" cd $TP_SOURCE_DIR/$S2_SOURCE if [ ! -f $PATCHED_MARK ]; then patch -p1 < $TP_PATCH_DIR/s2geometry-0.9.0.patch + # replace uint64 with uint64_t to make compiler happy + patch -p0 < $TP_PATCH_DIR/s2geometry-0.9.0-uint64.patch touch $PATCHED_MARK fi cd - diff --git a/thirdparty/patches/s2geometry-0.9.0-uint64.patch b/thirdparty/patches/s2geometry-0.9.0-uint64.patch new file mode 100644 index 0000000000..940a3224ff --- /dev/null +++ b/thirdparty/patches/s2geometry-0.9.0-uint64.patch @@ -0,0 +1,94 @@ +--- src/s2/third_party/absl/base/internal/unaligned_access.h.orig 2019-12-25 11:13:05.290000000 +0800 ++++ src/s2/third_party/absl/base/internal/unaligned_access.h 2019-12-25 11:15:06.650000000 +0800 +@@ -80,7 +80,7 @@ + return __sanitizer_unaligned_load32(p); + } + +-inline uint64 UnalignedLoad64(const void *p) { ++inline uint64_t UnalignedLoad64(const void *p) { + return __sanitizer_unaligned_load64(p); + } + +@@ -92,7 +92,7 @@ + __sanitizer_unaligned_store32(p, v); + } + +-inline void UnalignedStore64(void *p, uint64 v) { ++inline void UnalignedStore64(void *p, uint64_t v) { + __sanitizer_unaligned_store64(p, v); + } + +@@ -130,8 +130,8 @@ + return t; + } + +-inline uint64 UnalignedLoad64(const void *p) { +- uint64 t; ++inline uint64_t UnalignedLoad64(const void *p) { ++ uint64_t t; + memcpy(&t, p, sizeof t); + return t; + } +@@ -140,7 +140,7 @@ + + inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +-inline void UnalignedStore64(void *p, uint64 v) { memcpy(p, &v, sizeof v); } ++inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + + } // namespace base_internal + } // namespace absl +@@ -172,14 +172,14 @@ + #define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (*reinterpret_cast(_p)) + #define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ +- (*reinterpret_cast(_p)) ++ (*reinterpret_cast(_p)) + + #define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) + #define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) + #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ +- (*reinterpret_cast(_p) = (_val)) ++ (*reinterpret_cast(_p) = (_val)) + + #elif defined(__arm__) && \ + !defined(__ARM_ARCH_5__) && \ +@@ -246,13 +246,13 @@ + namespace absl { + namespace base_internal { + +-inline uint64 UnalignedLoad64(const void *p) { +- uint64 t; ++inline uint64_t UnalignedLoad64(const void *p) { ++ uint64_t t; + memcpy(&t, p, sizeof t); + return t; + } + +-inline void UnalignedStore64(void *p, uint64 v) { memcpy(p, &v, sizeof v); } ++inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + + } // namespace base_internal + } // namespace absl +@@ -286,8 +286,8 @@ + return t; + } + +-inline uint64 UnalignedLoad64(const void *p) { +- uint64 t; ++inline uint64_t UnalignedLoad64(const void *p) { ++ uint64_t t; + memcpy(&t, p, sizeof t); + return t; + } +@@ -296,7 +296,7 @@ + + inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +-inline void UnalignedStore64(void *p, uint64 v) { memcpy(p, &v, sizeof v); } ++inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + + } // namespace base_internal + } // namespace absl