mirror of
https://github.com/discourse/discourse.git
synced 2025-05-22 22:43:33 +08:00
FEATURE: flexible crawler detection
You can use the crawler user agents site setting to amend what user agents are considered crawlers based on a string match in the user agent Also improves performance of crawler detection slightly
This commit is contained in:
@ -1,9 +1,17 @@
|
||||
module CrawlerDetection
|
||||
|
||||
# added 'ia_archiver' based on https://meta.discourse.org/t/unable-to-archive-discourse-pages-with-the-internet-archive/21232
|
||||
# added 'Wayback Save Page' based on https://meta.discourse.org/t/unable-to-archive-discourse-with-the-internet-archive-save-page-now-button/22875
|
||||
# added 'Swiftbot' based on https://meta.discourse.org/t/how-to-add-html-markup-or-meta-tags-for-external-search-engine/28220
|
||||
def self.to_matcher(string)
|
||||
escaped = string.split('|').map { |agent| Regexp.escape(agent) }.join('|')
|
||||
Regexp.new(escaped)
|
||||
end
|
||||
|
||||
def self.crawler?(user_agent)
|
||||
!/Googlebot|Mediapartners|AdsBot|curl|HTTrack|Twitterbot|facebookexternalhit|bingbot|Baiduspider|ia_archiver|Wayback Save Page|360Spider|Swiftbot|YandexBot/.match(user_agent).nil?
|
||||
# this is done to avoid regenerating regexes
|
||||
@matchers ||= {}
|
||||
matcher = (@matchers[SiteSetting.crawler_user_agents] ||= to_matcher(SiteSetting.crawler_user_agents))
|
||||
matcher.match?(user_agent)
|
||||
end
|
||||
end
|
||||
|
Reference in New Issue
Block a user