https://github.com/scrapy/scrapy/commit/a38a99e0e2f3d69bb7a7aa50403c7aef3854cf00

From 1075587dbd15e5ccb9a83c4ca14086c1e135fe12 Mon Sep 17 00:00:00 2001
From: Paul Tremberth <paul.tremberth@gmail.com>
Date: Wed, 30 Mar 2016 14:31:10 +0200
Subject: [PATCH] Add support for Sphinx 1.4

See http://www.sphinx-doc.org/en/stable/changes.html#release-1-4-released-mar-28-2016

sphinx_rtd_theme has become optional, needs to be added to reqs

https://github.com/sphinx-doc/sphinx/pull/2320 changes node entries tuples
to 5 values instead of 4

`sh` syntax highlighting added very locally in selectors.rst
because of this warning/error with Sphinx 1.4:

```
Warning, treated as error:
/home/paul/src/scrapy/docs/topics/selectors.rst:743:
WARNING: Could not lex literal_block as "python". Highlighting skipped.
```
---
 docs/_ext/scrapydocs.py   | 4 ++--
 docs/topics/selectors.rst | 4 ++++
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/docs/_ext/scrapydocs.py b/docs/_ext/scrapydocs.py
index f0827f2..83b0d2c 100644
--- a/docs/_ext/scrapydocs.py
+++ b/docs/_ext/scrapydocs.py
@@ -18,7 +18,7 @@ def is_setting_index(node):
     if node.tagname == 'index':
         # index entries for setting directives look like:
         # [(u'pair', u'SETTING_NAME; setting', u'std:setting-SETTING_NAME', '')]
-        entry_type, info, refid, _ = node['entries'][0]
+        entry_type, info, refid = node['entries'][0][:3]
         return entry_type == 'pair' and info.endswith('; setting')
     return False
 
@@ -30,7 +30,7 @@ def get_setting_target(node):
 
 def get_setting_name_and_refid(node):
     """Extract setting name from directive index node"""
-    entry_type, info, refid, _ = node['entries'][0]
+    entry_type, info, refid = node['entries'][0][:3]
     return info.replace('; setting', ''), refid
 
 
diff --git a/docs/topics/selectors.rst b/docs/topics/selectors.rst
index 688c2b7..12415d0 100644
--- a/docs/topics/selectors.rst
+++ b/docs/topics/selectors.rst
@@ -738,10 +738,14 @@ simple/convenient XPaths. You can use the
 
 Let's show an example that illustrates this with Github blog atom feed.
 
+.. highlight:: sh
+
 First, we open the shell with the url we want to scrape::
 
     $ scrapy shell https://github.com/blog.atom
 
+.. highlight:: python
+
 Once in the shell we can try selecting all ``<link>`` objects and see that it
 doesn't work (because the Atom XML namespace is obfuscating those nodes)::
 
