'),f="",g="",m="",v="",w="",b="",y="",x="",k="",T="";if("sections"===o[h].type){if(g=(f=o[h])._source.title,m=l+"#"+f._source.id,v=[f._source.content.substr(0,C)+" ..."],f.highlight&&(f.highlight["sections.title"]&&(g=O(f.highlight["sections.title"][0])),f.highlight["sections.content"])){w=f.highlight["sections.content"],v=[];for(var E=0;E<%= section_subtitle %><% for (var i = 0; i < section_content.length; ++i) { %><%= section_content[i] %>
<% } %>',{section_subtitle_link:m,section_subtitle:g,section_content:v})}"domains"===o[h].type&&(y=(b=o[h])._source.role_name,x=l+"#"+b._source.anchor,k=b._source.name,(T="")!==b._source.docstrings&&(T=b._source.docstrings.substr(0,C)+" ..."),b.highlight&&(b.highlight["domains.docstrings"]&&(T="... "+O(b.highlight["domains.docstrings"][0])+" ..."),b.highlight["domains.name"]&&(k=O(b.highlight["domains.name"][0]))),I(p,'<%= domain_content %>
',{domain_subtitle_link:x,domain_subtitle:"["+y+"]: "+k,domain_content:T})),p.find("span").addClass("highlighted"),s.append(p),h!==o.length-1&&s.append($(""))}Search.output.append(s),s.slideDown(5)}t.length?Search.status.text(_("Search finished, found %s page(s) matching the search query.").replace("%s",t.length)):(Search.query_fallback(A),console.log("Read the Docs search failed. Falling back to Sphinx search."))}).fail(function(e){Search.query_fallback(A)}).always(function(){$("#search-progress").empty(),Search.stopPulse(),Search.title.text(_("Search Results")),Search.status.fadeIn(500)}),$.ajax({url:e.href,crossDomain:!0,xhrFields:{withCredentials:!0},complete:function(e,t){return"success"!==t||void 0===e.responseJSON||0===e.responseJSON.count?n.reject():n.resolve(e.responseJSON)}}).fail(function(e,t,i){return n.reject()})}}$(document).ready(function(){"undefined"!=typeof Search&&Search.init()})}(n.get())}}},{"./../../../../../../bower_components/xss/lib/index":3,"./rtd-data":16}],18:[function(r,e,t){var o=r("./rtd-data");e.exports={init:function(){var e=o.get();if($(document).on("click","[data-toggle='rst-current-version']",function(){var e=$("[data-toggle='rst-versions']").hasClass("shift-up")?"was_open":"was_closed";"undefined"!=typeof ga?ga("rtfd.send","event","Flyout","Click",e):"undefined"!=typeof _gaq&&_gaq.push(["rtfd._setAccount","UA-17997319-1"],["rtfd._trackEvent","Flyout","Click",e])}),void 0===window.SphinxRtdTheme){var t=r("./../../../../../../bower_components/sphinx-rtd-theme/js/theme.js").ThemeNav;if($(document).ready(function(){setTimeout(function(){t.navBar||t.enable()},1e3)}),e.is_rtd_like_theme()&&!$("div.wy-side-scroll:first").length){console.log("Applying theme sidebar fix...");var i=$("nav.wy-nav-side:first"),n=$("").addClass("wy-side-scroll");i.children().detach().appendTo(n),n.prependTo(i),t.navBar=n}}}}},{"./../../../../../../bower_components/sphinx-rtd-theme/js/theme.js":1,"./rtd-data":16}],19:[function(e,t,i){var d,c=e("./constants"),u=e("./rtd-data"),n=e("bowser"),h="#ethical-ad-placement";function p(){var e,t,i="rtd-"+(Math.random()+1).toString(36).substring(4),n=c.PROMO_TYPES.LEFTNAV,r=c.DEFAULT_PROMO_PRIORITY,o=null;return d.is_mkdocs_builder()&&d.is_rtd_like_theme()?(o="nav.wy-nav-side",e="ethical-rtd ethical-dark-theme"):d.is_rtd_like_theme()?(o="nav.wy-nav-side > div.wy-side-scroll",e="ethical-rtd ethical-dark-theme"):d.is_alabaster_like_theme()&&(o="div.sphinxsidebar > div.sphinxsidebarwrapper",e="ethical-alabaster"),o?($("").attr("id",i).addClass(e).appendTo(o),(!(t=$("#"+i).offset())||t.top>$(window).height())&&(r=c.LOW_PROMO_PRIORITY),{div_id:i,display_type:n,priority:r}):null}function f(){var e,t,i="rtd-"+(Math.random()+1).toString(36).substring(4),n=c.PROMO_TYPES.FOOTER,r=c.DEFAULT_PROMO_PRIORITY,o=null;return d.is_rtd_like_theme()?(o=$("").insertAfter("footer hr"),e="ethical-rtd"):d.is_alabaster_like_theme()&&(o="div.bodywrapper .body",e="ethical-alabaster"),o?($("").attr("id",i).addClass(e).appendTo(o),(!(t=$("#"+i).offset())||t.top<$(window).height())&&(r=c.LOW_PROMO_PRIORITY),{div_id:i,display_type:n,priority:r}):null}function g(){var e="rtd-"+(Math.random()+1).toString(36).substring(4),t=c.PROMO_TYPES.FIXED_FOOTER,i=c.DEFAULT_PROMO_PRIORITY;return n&&n.mobile&&(i=c.MAXIMUM_PROMO_PRIORITY),$("").attr("id",e).appendTo("body"),{div_id:e,display_type:t,priority:i}}function m(e){this.id=e.id,this.div_id=e.div_id||"",this.html=e.html||"",this.display_type=e.display_type||"",this.view_tracking_url=e.view_url,this.click_handler=function(){"undefined"!=typeof ga?ga("rtfd.send","event","Promo","Click",e.id):"undefined"!=typeof _gaq&&_gaq.push(["rtfd._setAccount","UA-17997319-1"],["rtfd._trackEvent","Promo","Click",e.id])}}m.prototype.display=function(){var e="#"+this.div_id,t=this.view_tracking_url;$(e).html(this.html),$(e).find('a[href*="/sustainability/click/"]').on("click",this.click_handler);function i(){$.inViewport($(e),-3)&&($("
").attr("src",t).css("display","none").appendTo(e),$(window).off(".rtdinview"),$(".wy-side-scroll").off(".rtdinview"))}$(window).on("DOMContentLoaded.rtdinview load.rtdinview scroll.rtdinview resize.rtdinview",i),$(".wy-side-scroll").on("scroll.rtdinview",i),$(".ethical-close").on("click",function(){return $(e).hide(),!1}),this.post_promo_display()},m.prototype.disable=function(){$("#"+this.div_id).hide()},m.prototype.post_promo_display=function(){this.display_type===c.PROMO_TYPES.FOOTER&&($("
").insertAfter("#"+this.div_id),$("
").insertBefore("#"+this.div_id+".ethical-alabaster .ethical-footer"))},t.exports={Promo:m,init:function(){var e,t,i={format:"jsonp"},n=[],r=[],o=[],s=[f,p,g],a=!1;if(d=u.get(),t=function(){var e,t="rtd-"+(Math.random()+1).toString(36).substring(4),i=c.PROMO_TYPES.LEFTNAV;return e=d.is_rtd_like_theme()?"ethical-rtd ethical-dark-theme":"ethical-alabaster",0<$(h).length?($("").attr("id",t).addClass(e).appendTo(h),{div_id:t,display_type:i}):null}())n.push(t.div_id),r.push(t.display_type),o.push(t.priority||c.DEFAULT_PROMO_PRIORITY),a=!0;else{if(!d.show_promo())return;for(var l=0;l").attr("id","rtd-detection").attr("class","ethical-rtd").html(" ").appendTo("body"),0===$("#rtd-detection").height()&&(e=!0),$("#rtd-detection").remove(),e}()&&(console.log("---------------------------------------------------------------------------------------"),console.log("Read the Docs hosts documentation for tens of thousands of open source projects."),console.log("We fund our development (we are open source) and operations through advertising."),console.log("We promise to:"),console.log(" - never let advertisers run 3rd party JavaScript"),console.log(" - never sell user data to advertisers or other 3rd parties"),console.log(" - only show advertisements of interest to developers"),console.log("Read more about our approach to advertising here: https://docs.readthedocs.io/en/latest/ethical-advertising.html"),console.log("%cPlease allow our Ethical Ads or go ad-free:","font-size: 2em"),console.log("https://docs.readthedocs.io/en/latest/advertising/ad-blocking.html"),console.log("--------------------------------------------------------------------------------------"),function(){var e=p(),t=null;e&&e.div_id&&(t=$("#"+e.div_id).attr("class","keep-us-sustainable"),$("").text("Support Read the Docs!").appendTo(t),$("").html('Please help keep us sustainable by allowing our Ethical Ads in your ad blocker or go ad-free by subscribing.').appendTo(t),$("").text("Thank you! ❤️").appendTo(t))}())}})}}},{"./constants":14,"./rtd-data":16,bowser:7}],20:[function(e,t,i){var o=e("./rtd-data");t.exports={init:function(e){var t=o.get();if(!e.is_highest){var i=window.location.pathname.replace(t.version,e.slug),n=$(' Note
You are not reading the most recent version of this documentation. is the latest version available.
');n.find("a").attr("href",i).text(e.slug);var r=$("div.body");r.length||(r=$("div.document")),r.prepend(n)}}}},{"./rtd-data":16}],21:[function(e,t,i){var n=e("./doc-embed/sponsorship"),r=e("./doc-embed/footer.js"),o=(e("./doc-embed/rtd-data"),e("./doc-embed/sphinx")),s=e("./doc-embed/search");$.extend(e("verge")),$(document).ready(function(){r.init(),o.init(),s.init(),n.init()})},{"./doc-embed/footer.js":15,"./doc-embed/rtd-data":16,"./doc-embed/search":17,"./doc-embed/sphinx":18,"./doc-embed/sponsorship":19,verge:13}]},{},[21]);
\ No newline at end of file
diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py
index 11bca8555f4..e025730b9c8 100644
--- a/readthedocs/projects/models.py
+++ b/readthedocs/projects/models.py
@@ -1305,6 +1305,7 @@ def get_processed_json(self):
'path': file_path,
'title': '',
'sections': [],
+ 'domain_data': {},
}
@cached_property
diff --git a/readthedocs/projects/static/projects/js/tools.js b/readthedocs/projects/static/projects/js/tools.js
index ec4ff8a1f40..5e60509d765 100644
--- a/readthedocs/projects/static/projects/js/tools.js
+++ b/readthedocs/projects/static/projects/js/tools.js
@@ -1 +1 @@
-require=function o(i,a,l){function c(t,e){if(!a[t]){if(!i[t]){var n="function"==typeof require&&require;if(!e&&n)return n(t,!0);if(u)return u(t,!0);var r=new Error("Cannot find module '"+t+"'");throw r.code="MODULE_NOT_FOUND",r}var s=a[t]={exports:{}};i[t][0].call(s.exports,function(e){return c(i[t][1][e]||e)},s,s.exports,o,i,a,l)}return a[t].exports}for(var u="function"==typeof require&&require,e=0;e'),i("body").append(t));var n=e.insertContent(t);i(n).show(),t.show(),i(document).click(function(e){i(e.target).closest("#embed-container").length||(i(n).remove(),t.remove())})}function s(e){var s=this;s.config=e||{},void 0===s.config.api_host&&(s.config.api_host="https://readthedocs.org"),s.help=o.observable(null),s.error=o.observable(null),s.project=o.observable(s.config.project),s.file=o.observable(null),s.sections=o.observableArray(),o.computed(function(){var e=s.file();(s.sections.removeAll(),e)&&(s.help("Loading..."),s.error(null),s.section(null),new r.Embed(s.config).page(s.project(),"latest",s.file(),function(e){s.sections.removeAll(),s.help(null),s.error(null);var t,n=[];for(t in e.sections){var r=e.sections[t];i.each(r,function(e,t){n.push({title:e,id:e})})}s.sections(n)},function(e){s.help(null),s.error("There was a problem retrieving data from the API")}))}),s.has_sections=o.computed(function(){return 0'),i("body").append(t));var n=e.insertContent(t);i(n).show(),t.show(),i(document).click(function(e){i(e.target).closest("#embed-container").length||(i(n).remove(),t.remove())})}function s(e){var s=this;s.config=e||{},void 0===s.config.api_host&&(s.config.api_host="https://readthedocs.org"),s.help=o.observable(null),s.error=o.observable(null),s.project=o.observable(s.config.project),s.file=o.observable(null),s.sections=o.observableArray(),o.computed(function(){var e=s.file();(s.sections.removeAll(),e)&&(s.help("Loading..."),s.error(null),s.section(null),new r.Embed(s.config).page(s.project(),"latest",s.file(),function(e){s.sections.removeAll(),s.help(null),s.error(null);var t,n=[];for(t in e.sections){var r=e.sections[t];i.each(r,function(e,t){n.push({title:e,id:e})})}s.sections(n)},function(e){s.help(null),s.error("There was a problem retrieving data from the API")}))}),s.has_sections=o.computed(function(){return 0 tags to prevent duplicate indexing with Sphinx Domains.
+ try:
+ # remove all tags which contains - tags having 'id' attribute
+ dt_tags = body('dt[id]')
+ dt_tags.parents('dl').remove()
+ except Exception:
+ log.exception('Error removing
tags from file: %s', fjson_storage_path)
+
+ # remove toctree elements
+ try:
+ body('.toctree-wrapper').remove()
+ except Exception:
+ log.exception('Error removing toctree elements from file: %s', fjson_storage_path)
+
# Capture text inside h1 before the first h2
h1_section = body('.section > h1')
if h1_section:
@@ -27,7 +42,12 @@ def generate_sections_from_pyquery(body):
if 'section' in next_p[0].attrib['class']:
break
- h1_content += parse_content(next_p.text())
+ text = parse_content(next_p.text(), remove_first_line=True)
+ if h1_content:
+ h1_content = f'{h1_content.rstrip(".")}. {text}'
+ else:
+ h1_content = text
+
next_p = next_p.next()
if h1_content:
yield {
@@ -45,7 +65,7 @@ def generate_sections_from_pyquery(body):
section_id = div.attr('id')
content = div.text()
- content = parse_content(content)
+ content = parse_content(content, remove_first_line=True)
yield {
'id': section_id,
@@ -74,6 +94,7 @@ def process_file(fjson_storage_path):
sections = []
path = ''
title = ''
+ domain_data = {}
if 'current_page_name' in data:
path = data['current_page_name']
@@ -82,7 +103,8 @@ def process_file(fjson_storage_path):
if data.get('body'):
body = PyQuery(data['body'])
- sections.extend(generate_sections_from_pyquery(body))
+ sections.extend(generate_sections_from_pyquery(body.clone(), fjson_storage_path))
+ domain_data = generate_domains_data_from_pyquery(body.clone(), fjson_storage_path)
else:
log.info('Unable to index content for: %s', fjson_storage_path)
@@ -96,24 +118,70 @@ def process_file(fjson_storage_path):
'path': path,
'title': title,
'sections': sections,
+ 'domain_data': domain_data,
}
-def parse_content(content):
- """
- Removes the starting text and ¶.
-
- It removes the starting text from the content
- because it contains the title of that content,
- which is redundant here.
- """
+def parse_content(content, remove_first_line=False):
+ """Removes new line characters and ¶."""
content = content.replace('¶', '').strip()
# removing the starting text of each
content = content.split('\n')
- if len(content) > 1: # there were \n
+ if remove_first_line and len(content) > 1:
content = content[1:]
# converting newlines to ". "
content = '. '.join([text.strip().rstrip('.') for text in content])
return content
+
+
+def _get_text_for_domain_data(desc_contents):
+ """Returns the text from the PyQuery object ``desc_contents``."""
+ # remove the 'dl', 'dt' and 'dd' tags from it
+ # because all the 'dd' and 'dt' tags are inside 'dl'
+ # and all 'dl' tags are already captured.
+ desc_contents.remove('dl')
+ desc_contents.remove('dt')
+ desc_contents.remove('dd')
+
+ # remove multiple spaces, new line characters and '¶' symbol.
+ docstrings = parse_content(desc_contents.text())
+ return docstrings
+
+
+def generate_domains_data_from_pyquery(body, fjson_storage_path):
+ """
+ Given a pyquery object, generate sphinx domain objects' docstrings.
+
+ Returns a dict with the generated data.
+ The returned dict is in the following form::
+
+ {
+ "domain-id-1": "docstrings for the domain-id-1",
+ "domain-id-2": "docstrings for the domain-id-2",
+ }
+ """
+
+ domain_data = {}
+ dl_tags = body('dl')
+
+ for dl_tag in dl_tags:
+
+ dt = dl_tag.findall('dt')
+ dd = dl_tag.findall('dd')
+
+ # len(dt) should be equal to len(dd)
+ # because these tags go together.
+ for title, desc in zip(dt, dd):
+ try:
+ id_ = title.attrib.get('id')
+ if id_:
+ # clone the PyQuery objects so that
+ # the original one remains undisturbed
+ docstrings = _get_text_for_domain_data(PyQuery(desc).clone())
+ domain_data[id_] = docstrings
+ except Exception:
+ log.exception('Error parsing docstrings for domains in file %s', fjson_storage_path)
+
+ return domain_data
diff --git a/readthedocs/search/tests/data/docs/support.json b/readthedocs/search/tests/data/docs/support.json
index 265041504ad..5388b19e2c4 100644
--- a/readthedocs/search/tests/data/docs/support.json
+++ b/readthedocs/search/tests/data/docs/support.json
@@ -5,7 +5,7 @@
{
"id": "usage-questions",
"title": "Usage Questions",
- "content": "If you have questions about how to use Read the Docs, or have an issue that isn’t related to a bug, Stack Overflow is the best place to ask. Tag questions with read-the-docs so other folks can find them easily.. Good questions for Stack Overflow would be:. “What is the best way to structure the table of contents across a project?”. “How do I structure translations inside of my project for easiest contribution from users?”. “How do I use Sphinx to use SVG images in HTML output but PNG in PDF output?”"
+ "content": "For help, Stack Overflow is the palce. Tag questions with read-the-docs so other folks can find them easily.. Good questions for Stack Overflow would be:. “What is the best way to structure the table of contents across a project?”. “How do I structure translations inside of my project for easiest contribution from users?”. “How do I use Sphinx to use SVG images in HTML output but PNG in PDF output?”"
},
{
"id": "community-support",
@@ -20,22 +20,20 @@
],
"domains": [
{
- "role_name": "http:post",
- "doc_name": "api/v3.html",
- "anchor": "post--api-v3-projects-(string-project_slug)-versions-(string-version_slug)-builds-",
- "type_display": "post",
- "doc_display": "API v3",
- "name": "/api/v3/projects/(string:project_slug)/versions/(string:version_slug)/builds/",
- "display_name": ""
+ "role_name": "py:function",
+ "anchor": "celery.utils.deprecated.warn",
+ "type_display": "function",
+ "name": "celery.utils.deprecated.warn"
},
{
- "role_name": "http:patch",
- "doc_name": "api/v3.html",
- "anchor": "patch--api-v3-projects-(string-project_slug)-version-(string-version_slug)-",
- "type_display": "patch",
- "doc_display": "API v3",
- "name": "/api/v3/projects/(string:project_slug)/version/(string:version_slug)/",
- "display_name": ""
+ "role_name": "py:function",
+ "anchor": "celery.utils.deprecated.Property",
+ "type_display": "function",
+ "name": "celery.utils.deprecated.Property"
}
- ]
+ ],
+ "domain_data": {
+ "celery.utils.deprecated.warn": "Warn of (pending) deprecation",
+ "celery.utils.deprecated.Property": "Decorator for deprecated properties"
+ }
}
diff --git a/readthedocs/search/tests/data/docs/wiping.json b/readthedocs/search/tests/data/docs/wiping.json
index b5a269551d6..cdcba86e28b 100644
--- a/readthedocs/search/tests/data/docs/wiping.json
+++ b/readthedocs/search/tests/data/docs/wiping.json
@@ -5,45 +5,25 @@
{
"id": "wiping-a-build-environment",
"title": "Wiping a Build Environment",
- "content": "Sometimes it happen that your Builds start failing because the build environment where the documentation is created is stale or broken. This could happen for a couple of different reasons like pip not upgrading a package properly or a corrupted cached Python package.In any of these cases (and many others), the solution could be just wiping out the existing build environment files and allow Read the Docs to create a new fresh one.Follow these steps to wipe the build environment:Click on the Edit button of the version you want to wipe on the right side of the page. Go to the bottom of the page and click the wipe link, next to the “Save” buttonBy wiping the documentation build environment, all the rst, md, and code files associated with it will be removed but not the documentation already built (HTML and PDF files). Your documentation will still online after wiping the build environment.Now you can re-build the version with a fresh build environment! This is a test line which contains the word 'Elasticsearch Query'."
+ "content": "Sometimes it happen that your Builds start failing because the build environment where the documentation is created is stale or broken. This could happen for a couple of different reasons like pip not upgrading a package properly or a corrupted cached Python package.In any of these cases (and many others), the solution could be just wiping out the existing build environment files and allow Read the Docs to create a new fresh one.Follow these steps to wipe the build environment:Click on the Edit button of the version you want to wipe on the right side of the page. Go to the bottom of the page and click the wipe link, next to the “Save” buttonBy wiping the documentation build environment, all the rst, md, and code files associated with it will be removed but not the documentation already built (HTML and PDF files). Your documentation will still online after wiping the build environment.Now you can re-build the version with a fresh build environment! This line which contains the word 'Elasticsearch Query'."
}
],
"domains": [
{
- "role_name": "http:get",
- "doc_name": "api/v3.html",
- "anchor": "get--api-v3-users-(str-username)",
- "type_display": "get",
- "doc_display": "API v3",
- "name": "/api/v3/users/(str:username)",
- "display_name": ""
+ "role_name": "py:function",
+ "anchor": "celery.concurrency.get_implementation",
+ "type_display": "function",
+ "name": "celery.concurrency.get_implementation"
},
{
- "role_name": "http:get",
- "doc_name": "api/v3.html",
- "anchor": "get--api-v3-projects-(string-project_slug)-versions-(string-version_slug)-",
- "type_display": "get",
- "doc_display": "API v3",
- "name": "/api/v3/projects/(string:project_slug)/versions/(string:version_slug)/",
- "display_name": ""
- },
- {
- "role_name": "http:get",
- "doc_name": "api/v3.html",
- "anchor": "get--api-v3-projects-(string-project_slug)-versions-",
- "type_display": "get",
- "doc_display": "API v3",
- "name": "/api/v3/projects/(string:project_slug)/versions/",
- "display_name": ""
- },
- {
- "role_name": "http:get",
- "doc_name": "api/v3.html",
- "anchor": "get--api-v3-projects-(string-project_slug)-",
- "type_display": "get",
- "doc_display": "API v3",
- "name": "/api/v3/projects/(string:project_slug)/",
- "display_name": ""
+ "role_name": "py:function",
+ "anchor": "celery.utils.functional.head_from_fun",
+ "type_display": "function",
+ "name": "celery.utils.functional.head_from_fun"
}
- ]
+ ],
+ "domain_data": {
+ "celery.concurrency.get_implementation": "Return pool implementation by name",
+ "celery.utils.functional.head_from_fun": "Generate signature function from actual function"
+ }
}
diff --git a/readthedocs/search/tests/data/kuma/docker.json b/readthedocs/search/tests/data/kuma/docker.json
index b91a0c420a3..39a77cb9e08 100644
--- a/readthedocs/search/tests/data/kuma/docker.json
+++ b/readthedocs/search/tests/data/kuma/docker.json
@@ -15,22 +15,20 @@
],
"domains": [
{
- "role_name": "py:module",
- "doc_name": "autoapi/notfound/utils/index.html",
- "anchor": "module-notfound.utils",
- "type_display": "module",
- "doc_display": "notfound.utils",
- "name": "notfound.utils",
- "display_name": ""
+ "role_name": "py:function",
+ "anchor": "celery.utils.text.simple_format",
+ "type_display": "function",
+ "name": "celery.utils.text.simple_format"
},
{
"role_name": "py:function",
- "doc_name": "autoapi/notfound/utils/index.html",
- "anchor": "notfound.utils.replace_uris",
+ "anchor": "celery.utils.text.pluralize",
"type_display": "function",
- "doc_display": "notfound.utils",
- "name": "notfound.utils.replace_uris",
- "display_name": ""
+ "name": "celery.utils.text.pluralize"
}
- ]
+ ],
+ "domain_data": {
+ "celery.utils.text.simple_format": "Format string, expanding abbreviations in keys'",
+ "celery.utils.text.pluralize": "Pluralize term when n is greater than one"
+ }
}
diff --git a/readthedocs/search/tests/data/kuma/documentation.json b/readthedocs/search/tests/data/kuma/documentation.json
index 3f0969bcf1c..94b84f5e3d4 100644
--- a/readthedocs/search/tests/data/kuma/documentation.json
+++ b/readthedocs/search/tests/data/kuma/documentation.json
@@ -15,22 +15,20 @@
],
"domains": [
{
- "role_name": "py:module",
- "doc_name": "autoapi/notfound/index.html",
- "anchor": "module-notfound",
- "type_display": "module",
- "doc_display": "notfound",
- "name": "notfound",
- "display_name": ""
+ "role_name": "py:method",
+ "anchor": "celery.worker.components.Hub.include_if",
+ "type_display": "method",
+ "name": "celery.worker.components.Hub.include_if"
},
{
- "role_name": "py:data",
- "doc_name": "autoapi/notfound/index.html",
- "anchor": "notfound.version",
- "type_display": "data",
- "doc_display": "notfound",
- "name": "notfound.version",
- "display_name": ""
+ "role_name": "py:function",
+ "anchor": "celery.utils.text.fill_paragraphs",
+ "type_display": "function",
+ "name": "celery.utils.text.fill_paragraphs"
}
- ]
+ ],
+ "domain_data": {
+ "celery.worker.components.Hub.include_if": "Return true if bootstep should be included. You can define this as an optional predicate that decides whether this step should be created",
+ "celery.utils.text.fill_paragraphs": "Fill paragraphs with newlines (or custom separator)"
+ }
}
diff --git a/readthedocs/search/tests/data/pipeline/installation.json b/readthedocs/search/tests/data/pipeline/installation.json
index c6516015f34..316dfa1c201 100644
--- a/readthedocs/search/tests/data/pipeline/installation.json
+++ b/readthedocs/search/tests/data/pipeline/installation.json
@@ -25,13 +25,13 @@
],
"domains": [
{
- "role_name": "std:confval",
- "doc_name": "configuration.html",
- "anchor": "confval-notfound_default_language",
- "type_display": "confval",
- "doc_display": "Configuration",
- "name": "notfound_default_language",
- "display_name": ""
+ "role_name": "py:method",
+ "anchor": "sphinx.domains.Domain.process_field_xref",
+ "type_display": "method",
+ "name": "sphinx.domains.Domain.process_field_xref"
}
- ]
+ ],
+ "domain_data": {
+ "sphinx.domains.Domain.process_field_xref": "Process a pending xref."
+ }
}
diff --git a/readthedocs/search/tests/data/pipeline/signals.json b/readthedocs/search/tests/data/pipeline/signals.json
index 97b1095d29f..efc82eebb26 100644
--- a/readthedocs/search/tests/data/pipeline/signals.json
+++ b/readthedocs/search/tests/data/pipeline/signals.json
@@ -20,22 +20,20 @@
],
"domains": [
{
- "role_name": "py:method",
- "doc_name": "autoapi/notfound/extension/index.html",
- "anchor": "notfound.extension.OrphanMetadataCollector.process_doc",
- "type_display": "method",
- "doc_display": "notfound.extension",
- "name": "notfound.extension.OrphanMetadataCollector.process_doc",
- "display_name": ""
+ "role_name": "py:class",
+ "anchor": "celery.worker.control.Panel",
+ "type_display": "class",
+ "name": "celery.worker.control.Panel"
},
{
"role_name": "py:method",
- "doc_name": "autoapi/notfound/extension/index.html",
- "anchor": "notfound.extension.OrphanMetadataCollector.clear_doc",
+ "anchor": "celery.platforms.Pidfile.remove_if_stale",
"type_display": "method",
- "doc_display": "notfound.extension",
- "name": "notfound.extension.OrphanMetadataCollector.clear_doc",
- "display_name": ""
+ "name": "celery.platforms.Pidfile.remove_if_stale"
}
- ]
+ ],
+ "domain_data": {
+ "celery.worker.control.Panel": "Global registry of remote control commands",
+ "celery.platforms.Pidfile.remove_if_stale": "Remove the lock if the process isn’t running. I.e. process does not respons to signal"
+ }
}
diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py
index 2f8bb947cc2..603024ca3b5 100644
--- a/readthedocs/search/tests/test_views.py
+++ b/readthedocs/search/tests/test_views.py
@@ -154,17 +154,17 @@ def test_file_search(self, client, project, data_type, page_num):
def test_file_search_have_correct_role_name_facets(self, client):
"""Test that searching files should result all role_names."""
- # searching for '/api/v3/' to test that
+ # searching for 'celery' to test that
# correct role_names are displayed
results, facets = self._get_search_result(
url=self.url,
client=client,
- search_params={ 'q': '/api/v3/', 'type': 'file' }
+ search_params={ 'q': 'celery', 'type': 'file' }
)
assert len(results) >= 1
role_name_facets = facets['role_name']
role_name_facets_str = [facet[0] for facet in role_name_facets]
- expected_role_names = ['http:get', 'http:patch', 'http:post']
+ expected_role_names = ['py:class', 'py:function', 'py:method']
assert sorted(expected_role_names) == sorted(role_name_facets_str)
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
@@ -172,7 +172,7 @@ def test_file_search_have_correct_role_name_facets(self, client):
def test_file_search_filter_role_name(self, client):
"""Test that searching files filtered according to role_names."""
- search_params = { 'q': 'notfound', 'type': 'file' }
+ search_params = { 'q': 'celery', 'type': 'file' }
# searching without the filter
results, facets = self._get_search_result(
url=self.url,
@@ -184,11 +184,11 @@ def test_file_search_filter_role_name(self, client):
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
- confval_facet = 'std:confval'
- # checking if 'std:confval' facet is present in results
+ confval_facet = 'py:class'
+ # checking if 'py:class' facet is present in results
assert confval_facet in [facet[0] for facet in role_name_facets]
- # filtering with role_name=std:confval
+ # filtering with role_name=py:class
search_params['role_name'] = confval_facet
new_results, new_facets = self._get_search_result(
url=self.url,
@@ -196,8 +196,8 @@ def test_file_search_filter_role_name(self, client):
search_params=search_params
)
new_role_names_facets = new_facets['role_name']
- # there is only one result with role_name='std:confval'
- # in `installation` page
+ # there is only one result with role_name='py:class'
+ # in `signals` page
assert len(new_results) == 1
first_result = new_results[0] # first result
inner_hits = first_result.meta.inner_hits # inner_hits of first results
@@ -279,9 +279,9 @@ def test_file_search_exact_match(self, client, project):
def test_file_search_have_correct_project_facets(self, client, all_projects):
"""Test that file search have correct project facets in results"""
- # `Sphinx` word is present both in `kuma` and `docs` files
+ # `environment` word is present both in `kuma` and `docs` files
# so search with this phrase
- query = 'Sphinx'
+ query = 'environment'
results, facets = self._get_search_result(
url=self.url,
client=client,
@@ -299,10 +299,10 @@ def test_file_search_have_correct_project_facets(self, client, all_projects):
def test_file_search_filter_by_project(self, client):
"""Test that search result are filtered according to project."""
- # `Sphinx` word is present both in `kuma` and `docs` files
+ # `environment` word is present both in `kuma` and `docs` files
# so search with this phrase but filter through `kuma` project
search_params = {
- 'q': 'Sphinx',
+ 'q': 'environment',
'type': 'file',
'project': 'kuma'
}
diff --git a/readthedocs/search/tests/utils.py b/readthedocs/search/tests/utils.py
index 1a2a433091f..d3cf1530996 100644
--- a/readthedocs/search/tests/utils.py
+++ b/readthedocs/search/tests/utils.py
@@ -6,7 +6,7 @@
SECTION_FIELDS = [ 'sections.title', 'sections.content' ]
-DOMAIN_FIELDS = [ 'domains.type_display', 'domains.name' ]
+DOMAIN_FIELDS = [ 'domains.name', 'domains.docstrings' ]
DATA_TYPES_VALUES = ['title'] + SECTION_FIELDS + DOMAIN_FIELDS
@@ -42,19 +42,14 @@ def get_search_query_from_project_file(project_slug, page_num=0, data_type='titl
query_data = query_data[0]['content'].split()
start = random.randint(0, 6)
- # 3 words to generate query to make sure that
+ # 5 words to generate query to make sure that
# query does not only contains 'is', 'and', 'the'
# and other stop words
- end = start + 3
+ end = start + 5
query = query_data[start:end]
query = ' '.join(query)
- elif data_type == 'domains.type_display':
-
- # uses first word of domains.type_display as query
- query = query_data[0]['type_display'].split()[0]
-
elif data_type == 'domains.name':
# test data contains domains.name
# some of which contains '.' and some '/'
@@ -80,4 +75,20 @@ def get_search_query_from_project_file(project_slug, page_num=0, data_type='titl
else:
query = query_data[0]['name'].split()[0]
+ elif data_type == 'domains.docstrings':
+
+ # generates query from domain docstrings
+ anchor = query_data[0]['anchor']
+ docstrings = file_data['domain_data'][anchor]
+ query_data = docstrings.split()
+ start = random.randint(0, 1)
+
+ # 5 words to generate query to make sure that
+ # query does not only contains 'is', 'and', 'the'
+ # and other stop words
+ end = start + 5
+
+ query = query_data[start:end]
+ query = ' '.join(query)
+
return query
diff --git a/readthedocs/templates/search/elastic_search.html b/readthedocs/templates/search/elastic_search.html
index 18528a3eda4..ddea7c112bd 100644
--- a/readthedocs/templates/search/elastic_search.html
+++ b/readthedocs/templates/search/elastic_search.html
@@ -179,61 +179,64 @@
-
-
- {% if inner_hit.source.display_name|length >= 1 %}
- ({{ inner_hit.source.role_name }}) {{ inner_hit.source.display_name}}
+ {% with "100" as MAX_SUBSTRING_LIMIT %}
+
+ {{ result.project }} - {% if result.meta.highlight.title %} {{ result.meta.highlight.title.0|safe }} {% else %} {{ result.title }} {% endif %}
+
+
+ {% for inner_hit in result.meta.inner_hits %}
+ {% if inner_hit.type == 'domains' %}
+
+
+ {% if inner_hit.highlight|get_key_or_none:"domains.name" %}
+ {% with domain_name=inner_hit.highlight|get_key_or_none:"domains.name" %}
+ [{{ inner_hit.source.role_name }}]: {{ domain_name.0|safe }}
+ {% endwith %}
+ {% else %}
+ [{{ inner_hit.source.role_name }}]: {{ inner_hit.source.name }}
+ {% endif %}
+
+
+
+ {% if inner_hit.highlight|get_key_or_none:"domains.docstrings" %}
+ {% with domain_docstrings=inner_hit.highlight|get_key_or_none:"domains.docstrings" %}
+ {{ domain_docstrings.0|safe }}
+ {% endwith %}
{% else %}
- {{ inner_hit.source.role_name }}
+ {% if inner_hit.source.docstrings %}
+ {{ inner_hit.source.docstrings|slice:MAX_SUBSTRING_LIMIT }} ...
+ {% endif %}
{% endif %}
-