website_agent.rb 21KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. require 'nokogiri'
  2. require 'date'
  3. module Agents
  4. class WebsiteAgent < Agent
  5. include WebRequestConcern
  6. can_dry_run!
  7. can_order_created_events!
  8. default_schedule "every_12h"
  9. UNIQUENESS_LOOK_BACK = 200
  10. UNIQUENESS_FACTOR = 3
  11. description <<-MD
  12. The Website Agent scrapes a website, XML document, or JSON feed and creates Events based on the results.
  13. Specify a `url` and select a `mode` for when to create Events based on the scraped data, either `all`, `on_change`, or `merge` (if fetching based on an Event, see below).
  14. The `url` option can be a single url, or an array of urls (for example, for multiple pages with the exact same structure but different content to scrape).
  15. The WebsiteAgent can also scrape based on incoming events.
  16. * Set the `url_from_event` option to a Liquid template to generate the url to access based on the Event. (To fetch the url in the Event's `url` key, for example, set `url_from_event` to `{{ url }}`.)
  17. * Alternatively, set `data_from_event` to a Liquid template to use data directly without fetching any URL. (For example, set it to `{{ html }}` to use HTML contained in the `html` key of the incoming Event.)
  18. * If you specify `merge` for the `mode` option, Huginn will retain the old payload and update it with new values.
  19. # Supported Document Types
  20. The `type` value can be `xml`, `html`, `json`, or `text`.
  21. To tell the Agent how to parse the content, specify `extract` as a hash with keys naming the extractions and values of hashes.
  22. Note that for all of the formats, whatever you extract MUST have the same number of matches for each extractor. E.g., if you're extracting rows, all extractors must match all rows. For generating CSS selectors, something like [SelectorGadget](http://selectorgadget.com) may be helpful.
  23. # Scraping HTML and XML
  24. When parsing HTML or XML, these sub-hashes specify how each extraction should be done. The Agent first selects a node set from the document for each extraction key by evaluating either a CSS selector in `css` or an XPath expression in `xpath`. It then evaluates an XPath expression in `value` (default: `.`) on each node in the node set, converting the result into a string. Here's an example:
  25. "extract": {
  26. "url": { "css": "#comic img", "value": "@src" },
  27. "title": { "css": "#comic img", "value": "@title" },
  28. "body_text": { "css": "div.main", "value": ".//text()" }
  29. }
  30. "@_attr_" is the XPath expression to extract the value of an attribute named _attr_ from a node, and `.//text()` extracts all the enclosed text. To extract the innerHTML, use `./node()`; and to extract the outer HTML, use `.`.
  31. You can also use [XPath functions](http://www.w3.org/TR/xpath/#section-String-Functions) like `normalize-space` to strip and squeeze whitespace, `substring-after` to extract part of a text, and `translate` to remove commas from formatted numbers, etc. Note that these functions take a string, not a node set, so what you may think would be written as `normalize-space(.//text())` should actually be `normalize-space(.)`.
  32. Beware that when parsing an XML document (i.e. `type` is `xml`) using `xpath` expressions, all namespaces are stripped from the document unless the top-level option `use_namespaces` is set to `true`.
  33. # Scraping JSON
  34. When parsing JSON, these sub-hashes specify [JSONPaths](http://goessner.net/articles/JsonPath/) to the values that you care about. For example:
  35. "extract": {
  36. "title": { "path": "results.data[*].title" },
  37. "description": { "path": "results.data[*].description" }
  38. }
  39. The `extract` option can be skipped for the JSON type, causing the full JSON response to be returned.
  40. # Scraping Text
  41. When parsing text, each sub-hash should contain a `regexp` and `index`. Output text is matched against the regular expression repeatedly from the beginning through to the end, collecting a captured group specified by `index` in each match. Each index should be either an integer or a string name which corresponds to <code>(?&lt;<em>name</em>&gt;...)</code>. For example, to parse lines of <code><em>word</em>: <em>definition</em></code>, the following should work:
  42. "extract": {
  43. "word": { "regexp": "^(.+?): (.+)$", index: 1 },
  44. "definition": { "regexp": "^(.+?): (.+)$", index: 2 }
  45. }
  46. Or if you prefer names to numbers for index:
  47. "extract": {
  48. "word": { "regexp": "^(?<word>.+?): (?<definition>.+)$", index: 'word' },
  49. "definition": { "regexp": "^(?<word>.+?): (?<definition>.+)$", index: 'definition' }
  50. }
  51. To extract the whole content as one event:
  52. "extract": {
  53. "content": { "regexp": "\A(?m:.)*\z", index: 0 }
  54. }
  55. Beware that `.` does not match the newline character (LF) unless the `m` flag is in effect, and `^`/`$` basically match every line beginning/end. See [this document](http://ruby-doc.org/core-#{RUBY_VERSION}/doc/regexp_rdoc.html) to learn the regular expression variant used in this service.
  56. # General Options
  57. Can be configured to use HTTP basic auth by including the `basic_auth` parameter with `"username:password"`, or `["username", "password"]`.
  58. Set `expected_update_period_in_days` to the maximum amount of time that you'd expect to pass between Events being created by this Agent. This is only used to set the "working" status.
  59. Set `uniqueness_look_back` to limit the number of events checked for uniqueness (typically for performance). This defaults to the larger of #{UNIQUENESS_LOOK_BACK} or #{UNIQUENESS_FACTOR}x the number of detected received results.
  60. Set `force_encoding` to an encoding name if the website is known to respond with a missing, invalid, or wrong charset in the Content-Type header. Note that a text content without a charset is taken as encoded in UTF-8 (not ISO-8859-1).
  61. Set `user_agent` to a custom User-Agent name if the website does not like the default value (`#{default_user_agent}`).
  62. The `headers` field is optional. When present, it should be a hash of headers to send with the request.
  63. Set `disable_ssl_verification` to `true` to disable ssl verification.
  64. Set `unzip` to `gzip` to inflate the resource using gzip.
  65. Set `http_success_codes` to an array of status codes (e.g., `[404, 422]`) to treat HTTP response codes beyond 200 as successes.
  66. # Liquid Templating
  67. In Liquid templating, the following variable is available:
  68. * `_response_`: A response object with the following keys:
  69. * `status`: HTTP status as integer. (Almost always 200)
  70. * `headers`: Response headers; for example, `{{ _response_.headers.Content-Type }}` expands to the value of the Content-Type header. Keys are insensitive to cases and -/_.
  71. # Ordering Events
  72. #{description_events_order}
  73. MD
  74. event_description do
  75. "Events will have the following fields:\n\n %s" % [
  76. Utils.pretty_print(Hash[options['extract'].keys.map { |key|
  77. [key, "..."]
  78. }])
  79. ]
  80. end
  81. def working?
  82. event_created_within?(options['expected_update_period_in_days']) && !recent_error_logs?
  83. end
  84. def default_options
  85. {
  86. 'expected_update_period_in_days' => "2",
  87. 'url' => "http://xkcd.com",
  88. 'type' => "html",
  89. 'mode' => "on_change",
  90. 'extract' => {
  91. 'url' => { 'css' => "#comic img", 'value' => "@src" },
  92. 'title' => { 'css' => "#comic img", 'value' => "@alt" },
  93. 'hovertext' => { 'css' => "#comic img", 'value' => "@title" }
  94. }
  95. }
  96. end
  97. def validate_options
  98. # Check for required fields
  99. errors.add(:base, "either url, url_from_event, or data_from_event are required") unless options['url'].present? || options['url_from_event'].present? || options['data_from_event'].present?
  100. errors.add(:base, "expected_update_period_in_days is required") unless options['expected_update_period_in_days'].present?
  101. validate_extract_options!
  102. validate_http_success_codes!
  103. # Check for optional fields
  104. if options['mode'].present?
  105. errors.add(:base, "mode must be set to on_change, all or merge") unless %w[on_change all merge].include?(options['mode'])
  106. end
  107. if options['expected_update_period_in_days'].present?
  108. errors.add(:base, "Invalid expected_update_period_in_days format") unless is_positive_integer?(options['expected_update_period_in_days'])
  109. end
  110. if options['uniqueness_look_back'].present?
  111. errors.add(:base, "Invalid uniqueness_look_back format") unless is_positive_integer?(options['uniqueness_look_back'])
  112. end
  113. validate_web_request_options!
  114. end
  115. def validate_http_success_codes!
  116. consider_success = options["http_success_codes"]
  117. if consider_success.present?
  118. if (consider_success.class != Array)
  119. errors.add(:http_success_codes, "must be an array and specify at least one status code")
  120. else
  121. if consider_success.uniq.count != consider_success.count
  122. errors.add(:http_success_codes, "duplicate http code found")
  123. else
  124. if consider_success.any?{|e| e.to_s !~ /^\d+$/ }
  125. errors.add(:http_success_codes, "please make sure to use only numeric values for code, ex 404, or \"404\"")
  126. end
  127. end
  128. end
  129. end
  130. end
  131. def validate_extract_options!
  132. extraction_type = (extraction_type() rescue extraction_type(options))
  133. case extract = options['extract']
  134. when Hash
  135. if extract.each_value.any? { |value| !value.is_a?(Hash) }
  136. errors.add(:base, 'extract must be a hash of hashes.')
  137. else
  138. case extraction_type
  139. when 'html', 'xml'
  140. extract.each do |name, details|
  141. case details['css']
  142. when String
  143. # ok
  144. when nil
  145. case details['xpath']
  146. when String
  147. # ok
  148. when nil
  149. errors.add(:base, "When type is html or xml, all extractions must have a css or xpath attribute (bad extraction details for #{name.inspect})")
  150. else
  151. errors.add(:base, "Wrong type of \"xpath\" value in extraction details for #{name.inspect}")
  152. end
  153. else
  154. errors.add(:base, "Wrong type of \"css\" value in extraction details for #{name.inspect}")
  155. end
  156. case details['value']
  157. when String, nil
  158. # ok
  159. else
  160. errors.add(:base, "Wrong type of \"value\" value in extraction details for #{name.inspect}")
  161. end
  162. end
  163. when 'json'
  164. extract.each do |name, details|
  165. case details['path']
  166. when String
  167. # ok
  168. when nil
  169. errors.add(:base, "When type is json, all extractions must have a path attribute (bad extraction details for #{name.inspect})")
  170. else
  171. errors.add(:base, "Wrong type of \"path\" value in extraction details for #{name.inspect}")
  172. end
  173. end
  174. when 'text'
  175. extract.each do |name, details|
  176. case regexp = details['regexp']
  177. when String
  178. begin
  179. re = Regexp.new(regexp)
  180. rescue => e
  181. errors.add(:base, "invalid regexp for #{name.inspect}: #{e.message}")
  182. end
  183. when nil
  184. errors.add(:base, "When type is text, all extractions must have a regexp attribute (bad extraction details for #{name.inspect})")
  185. else
  186. errors.add(:base, "Wrong type of \"regexp\" value in extraction details for #{name.inspect}")
  187. end
  188. case index = details['index']
  189. when Integer, /\A\d+\z/
  190. # ok
  191. when String
  192. if re && !re.names.include?(index)
  193. errors.add(:base, "no named capture #{index.inspect} found in regexp for #{name.inspect})")
  194. end
  195. when nil
  196. errors.add(:base, "When type is text, all extractions must have an index attribute (bad extraction details for #{name.inspect})")
  197. else
  198. errors.add(:base, "Wrong type of \"index\" value in extraction details for #{name.inspect}")
  199. end
  200. end
  201. when /\{/
  202. # Liquid templating
  203. else
  204. errors.add(:base, "Unknown extraction type #{extraction_type.inspect}")
  205. end
  206. end
  207. when nil
  208. unless extraction_type == 'json'
  209. errors.add(:base, 'extract is required for all types except json')
  210. end
  211. else
  212. errors.add(:base, 'extract must be a hash')
  213. end
  214. end
  215. def check
  216. check_urls(interpolated['url'])
  217. end
  218. def check_urls(in_url, existing_payload = {})
  219. return unless in_url.present?
  220. Array(in_url).each do |url|
  221. check_url(url, existing_payload)
  222. end
  223. end
  224. def check_url(url, existing_payload = {})
  225. unless /\Ahttps?:\/\//i === url
  226. error "Ignoring a non-HTTP url: #{url.inspect}"
  227. return
  228. end
  229. uri = Utils.normalize_uri(url)
  230. log "Fetching #{uri}"
  231. response = faraday.get(uri)
  232. raise "Failed: #{response.inspect}" unless consider_response_successful?(response)
  233. interpolation_context.stack {
  234. interpolation_context['_response_'] = ResponseDrop.new(response)
  235. handle_data(response.body, response.env[:url], existing_payload)
  236. }
  237. rescue => e
  238. error "Error when fetching url: #{e.message}\n#{e.backtrace.join("\n")}"
  239. end
  240. def handle_data(body, url, existing_payload)
  241. doc = parse(body)
  242. if extract_full_json?
  243. if store_payload!(previous_payloads(1), doc)
  244. log "Storing new result for '#{name}': #{doc.inspect}"
  245. create_event payload: existing_payload.merge(doc)
  246. end
  247. return
  248. end
  249. output =
  250. case extraction_type
  251. when 'json'
  252. extract_json(doc)
  253. when 'text'
  254. extract_text(doc)
  255. else
  256. extract_xml(doc)
  257. end
  258. num_unique_lengths = interpolated['extract'].keys.map { |name| output[name].length }.uniq
  259. if num_unique_lengths.length != 1
  260. raise "Got an uneven number of matches for #{interpolated['name']}: #{interpolated['extract'].inspect}"
  261. end
  262. old_events = previous_payloads num_unique_lengths.first
  263. num_unique_lengths.first.times do |index|
  264. result = {}
  265. interpolated['extract'].keys.each do |name|
  266. result[name] = output[name][index]
  267. if name.to_s == 'url' && url.present?
  268. result[name] = (url + Utils.normalize_uri(result[name])).to_s
  269. end
  270. end
  271. if store_payload!(old_events, result)
  272. log "Storing new parsed result for '#{name}': #{result.inspect}"
  273. create_event payload: existing_payload.merge(result)
  274. end
  275. end
  276. end
  277. def receive(incoming_events)
  278. incoming_events.each do |event|
  279. interpolate_with(event) do
  280. existing_payload = interpolated['mode'].to_s == "merge" ? event.payload : {}
  281. if data_from_event = options['data_from_event'].presence
  282. data = interpolate_options(data_from_event)
  283. if data.present?
  284. handle_event_data(data, event, existing_payload)
  285. else
  286. error "No data was found in the Event payload using the template #{data_from_event}", inbound_event: event
  287. end
  288. else
  289. url_to_scrape =
  290. if url_template = options['url_from_event'].presence
  291. interpolate_options(url_template)
  292. else
  293. interpolated['url']
  294. end
  295. check_urls(url_to_scrape, existing_payload)
  296. end
  297. end
  298. end
  299. end
  300. private
  301. def consider_response_successful?(response)
  302. response.success? || begin
  303. consider_success = options["http_success_codes"]
  304. consider_success.present? && (consider_success.include?(response.status.to_s) || consider_success.include?(response.status))
  305. end
  306. end
  307. def handle_event_data(data, event, existing_payload)
  308. handle_data(data, event.payload['url'], existing_payload)
  309. rescue => e
  310. error "Error when handling event data: #{e.message}\n#{e.backtrace.join("\n")}", inbound_event: event
  311. end
  312. # This method returns true if the result should be stored as a new event.
  313. # If mode is set to 'on_change', this method may return false and update an existing
  314. # event to expire further in the future.
  315. def store_payload!(old_events, result)
  316. case interpolated['mode'].presence
  317. when 'on_change'
  318. result_json = result.to_json
  319. if found = old_events.find { |event| event.payload.to_json == result_json }
  320. found.update!(expires_at: new_event_expiration_date)
  321. false
  322. else
  323. true
  324. end
  325. when 'all', 'merge', ''
  326. true
  327. else
  328. raise "Illegal options[mode]: #{interpolated['mode']}"
  329. end
  330. end
  331. def previous_payloads(num_events)
  332. if interpolated['uniqueness_look_back'].present?
  333. look_back = interpolated['uniqueness_look_back'].to_i
  334. else
  335. # Larger of UNIQUENESS_FACTOR * num_events and UNIQUENESS_LOOK_BACK
  336. look_back = UNIQUENESS_FACTOR * num_events
  337. if look_back < UNIQUENESS_LOOK_BACK
  338. look_back = UNIQUENESS_LOOK_BACK
  339. end
  340. end
  341. events.order("id desc").limit(look_back) if interpolated['mode'] == "on_change"
  342. end
  343. def extract_full_json?
  344. !interpolated['extract'].present? && extraction_type == "json"
  345. end
  346. def extraction_type(interpolated = interpolated())
  347. (interpolated['type'] || begin
  348. case interpolated['url']
  349. when /\.(rss|xml)$/i
  350. "xml"
  351. when /\.json$/i
  352. "json"
  353. when /\.(txt|text)$/i
  354. "text"
  355. else
  356. "html"
  357. end
  358. end).to_s
  359. end
  360. def use_namespaces?
  361. if value = interpolated.key?('use_namespaces')
  362. boolify(interpolated['use_namespaces'])
  363. else
  364. interpolated['extract'].none? { |name, extraction_details|
  365. extraction_details.key?('xpath')
  366. }
  367. end
  368. end
  369. def extract_each(&block)
  370. interpolated['extract'].each_with_object({}) { |(name, extraction_details), output|
  371. output[name] = block.call(extraction_details)
  372. }
  373. end
  374. def extract_json(doc)
  375. extract_each { |extraction_details|
  376. result = Utils.values_at(doc, extraction_details['path'])
  377. log "Extracting #{extraction_type} at #{extraction_details['path']}: #{result}"
  378. result
  379. }
  380. end
  381. def extract_text(doc)
  382. extract_each { |extraction_details|
  383. regexp = Regexp.new(extraction_details['regexp'])
  384. case index = extraction_details['index']
  385. when /\A\d+\z/
  386. index = index.to_i
  387. end
  388. result = []
  389. doc.scan(regexp) {
  390. result << Regexp.last_match[index]
  391. }
  392. log "Extracting #{extraction_type} at #{regexp}: #{result}"
  393. result
  394. }
  395. end
  396. def extract_xml(doc)
  397. extract_each { |extraction_details|
  398. case
  399. when css = extraction_details['css']
  400. nodes = doc.css(css)
  401. when xpath = extraction_details['xpath']
  402. nodes = doc.xpath(xpath)
  403. else
  404. raise '"css" or "xpath" is required for HTML or XML extraction'
  405. end
  406. case nodes
  407. when Nokogiri::XML::NodeSet
  408. result = nodes.map { |node|
  409. value = node.xpath(extraction_details['value'] || '.')
  410. if value.is_a?(Nokogiri::XML::NodeSet)
  411. child = value.first
  412. if child && child.cdata?
  413. value = child.text
  414. end
  415. end
  416. case value
  417. when Float
  418. # Node#xpath() returns any numeric value as float;
  419. # convert it to integer as appropriate.
  420. value = value.to_i if value.to_i == value
  421. end
  422. value.to_s
  423. }
  424. else
  425. raise "The result of HTML/XML extraction was not a NodeSet"
  426. end
  427. log "Extracting #{extraction_type} at #{xpath || css}: #{result}"
  428. result
  429. }
  430. end
  431. def parse(data)
  432. case type = extraction_type
  433. when "xml"
  434. doc = Nokogiri::XML(data)
  435. # ignore xmlns, useful when parsing atom feeds
  436. doc.remove_namespaces! unless use_namespaces?
  437. doc
  438. when "json"
  439. JSON.parse(data)
  440. when "html"
  441. Nokogiri::HTML(data)
  442. when "text"
  443. data
  444. else
  445. raise "Unknown extraction type: #{type}"
  446. end
  447. end
  448. def is_positive_integer?(value)
  449. Integer(value) >= 0
  450. rescue
  451. false
  452. end
  453. # Wraps Faraday::Response
  454. class ResponseDrop < LiquidDroppable::Drop
  455. def headers
  456. HeaderDrop.new(@object.headers)
  457. end
  458. # Integer value of HTTP status
  459. def status
  460. @object.status
  461. end
  462. end
  463. # Wraps Faraday::Utils::Headers
  464. class HeaderDrop < LiquidDroppable::Drop
  465. def before_method(name)
  466. @object[name.tr('_', '-')]
  467. end
  468. end
  469. end
  470. end