Search is not available for this dataset
text stringlengths 75 104k |
|---|
def changelog(self, api_version, doc):
"""Add a changelog entry for this api."""
doc = textwrap.dedent(doc).strip()
self._changelog[api_version] = doc
self._changelog_locations[api_version] = get_callsite_location() |
def title_prefix(soup):
"titlePrefix for article JSON is only articles with certain display_channel values"
prefix = None
display_channel_match_list = ['feature article', 'insight', 'editorial']
for d_channel in display_channel(soup):
if d_channel.lower() in display_channel_match_list:
... |
def title_prefix_json(soup):
"titlePrefix with capitalisation changed"
prefix = title_prefix(soup)
prefix_rewritten = elifetools.json_rewrite.rewrite_json("title_prefix_json", soup, prefix)
return prefix_rewritten |
def research_organism(soup):
"Find the research-organism from the set of kwd-group tags"
if not raw_parser.research_organism_keywords(soup):
return []
return list(map(node_text, raw_parser.research_organism_keywords(soup))) |
def full_research_organism(soup):
"research-organism list including inline tags, such as italic"
if not raw_parser.research_organism_keywords(soup):
return []
return list(map(node_contents_str, raw_parser.research_organism_keywords(soup))) |
def keywords(soup):
"""
Find the keywords from the set of kwd-group tags
which are typically labelled as the author keywords
"""
if not raw_parser.author_keywords(soup):
return []
return list(map(node_text, raw_parser.author_keywords(soup))) |
def full_keywords(soup):
"author keywords list including inline tags, such as italic"
if not raw_parser.author_keywords(soup):
return []
return list(map(node_contents_str, raw_parser.author_keywords(soup))) |
def version_history(soup, html_flag=True):
"extract the article version history details"
convert = lambda xml_string: xml_to_html(html_flag, xml_string)
version_history = []
related_object_tags = raw_parser.related_object(raw_parser.article_meta(soup))
for tag in related_object_tags:
article... |
def article_id_list(soup):
"""return a list of article-id data"""
id_list = []
for article_id_tag in raw_parser.article_id(soup):
id_details = OrderedDict()
set_if_value(id_details, "type", article_id_tag.get("pub-id-type"))
set_if_value(id_details, "value", article_id_tag.text)
... |
def copyright_holder_json(soup):
"for json output add a full stop if ends in et al"
holder = None
permissions_tag = raw_parser.article_permissions(soup)
if permissions_tag:
holder = node_text(raw_parser.copyright_holder(permissions_tag))
if holder is not None and holder.endswith('et al'):
... |
def subject_area(soup):
"""
Find the subject areas from article-categories subject tags
"""
subject_area = []
tags = raw_parser.subject_area(soup)
for tag in tags:
subject_area.append(node_text(tag))
return subject_area |
def display_channel(soup):
"""
Find the subject areas of type display-channel
"""
display_channel = []
tags = raw_parser.display_channel(soup)
for tag in tags:
display_channel.append(node_text(tag))
return display_channel |
def category(soup):
"""
Find the category from subject areas
"""
category = []
tags = raw_parser.category(soup)
for tag in tags:
category.append(node_text(tag))
return category |
def ymd(soup):
"""
Get the year, month and day from child tags
"""
day = node_text(raw_parser.day(soup))
month = node_text(raw_parser.month(soup))
year = node_text(raw_parser.year(soup))
return (day, month, year) |
def pub_date(soup):
"""
Return the publishing date in struct format
pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp
Default date_type is pub
"""
pub_date = first(raw_parser.pub_date(soup, date_type="pub"))
if pub_date is None:
pub_date = first(raw_parse... |
def pub_dates(soup):
"""
return a list of all the pub dates
"""
pub_dates = []
tags = raw_parser.pub_date(soup)
for tag in tags:
pub_date = OrderedDict()
copy_attribute(tag.attrs, 'publication-format', pub_date)
copy_attribute(tag.attrs, 'date-type', pub_date)
cop... |
def history_date(soup, date_type = None):
"""
Find a date in the history tag for the specific date_type
typical date_type values: received, accepted
"""
if(date_type == None):
return None
history_date = raw_parser.history_date(soup, date_type)
if history_date is None:
return... |
def collection_year(soup):
"""
Pub date of type collection will hold a year element for VOR articles
"""
pub_date = first(raw_parser.pub_date(soup, pub_type="collection"))
if not pub_date:
pub_date = first(raw_parser.pub_date(soup, date_type="collection"))
if not pub_date:
return... |
def abstracts(soup):
"""
Find the article abstract and format it
"""
abstracts = []
abstract_tags = raw_parser.abstract(soup)
for tag in abstract_tags:
abstract = {}
abstract["abstract_type"] = tag.get("abstract-type")
title_tag = raw_parser.title(tag)
if titl... |
def component_doi(soup):
"""
Look for all object-id of pub-type-id = doi, these are the component DOI tags
"""
component_doi = []
object_id_tags = raw_parser.object_id(soup, pub_id_type = "doi")
# Get components too for later
component_list = components(soup)
position = 1
for tag... |
def tag_details(tag, nodenames):
"""
Used in media and graphics to extract data from their parent tags
"""
details = {}
details['type'] = tag.name
details['ordinal'] = tag_ordinal(tag)
# Ordinal value
if tag_details_sibling_ordinal(tag):
details['sibling_ordinal'] = tag_details... |
def media(soup):
"""
All media tags and some associated data about the related component doi
and the parent of that doi (not always present)
"""
media = []
media_tags = raw_parser.media(soup)
position = 1
for tag in media_tags:
media_item = {}
copy_attribute(tag.attrs... |
def graphics(soup):
"""
All graphic tags and some associated data about the related component doi
and the parent of that doi (not always present), and whether it is
part of a figure supplement
"""
graphics = []
graphic_tags = raw_parser.graphic(soup)
position = 1
for tag in graphi... |
def inline_graphics(soup):
"""
inline-graphic tags
"""
inline_graphics = []
inline_graphic_tags = raw_parser.inline_graphic(soup)
position = 1
for tag in inline_graphic_tags:
item = {}
copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href')
# Get the tag type... |
def self_uri(soup):
"""
self-uri tags
"""
self_uri = []
self_uri_tags = raw_parser.self_uri(soup)
position = 1
for tag in self_uri_tags:
item = {}
copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href')
copy_attribute(tag.attrs, 'content-type', item)
# ... |
def supplementary_material(soup):
"""
supplementary-material tags
"""
supplementary_material = []
supplementary_material_tags = raw_parser.supplementary_material(soup)
position = 1
for tag in supplementary_material_tags:
item = {}
copy_attribute(tag.attrs, 'id', item)
... |
def contrib_email(contrib_tag):
"""
Given a contrib tag, look for an email tag, and
only return the value if it is not inside an aff tag
"""
email = []
for email_tag in extract_nodes(contrib_tag, "email"):
if email_tag.parent.name != "aff":
email.append(email_tag.text)
re... |
def contrib_phone(contrib_tag):
"""
Given a contrib tag, look for an phone tag
"""
phone = None
if raw_parser.phone(contrib_tag):
phone = first(raw_parser.phone(contrib_tag)).text
return phone |
def contrib_inline_aff(contrib_tag):
"""
Given a contrib tag, look for an aff tag directly inside it
"""
aff_tags = []
for child_tag in contrib_tag:
if child_tag and child_tag.name and child_tag.name == "aff":
aff_tags.append(child_tag)
return aff_tags |
def contrib_xref(contrib_tag, ref_type):
"""
Given a contrib tag, look for an xref tag of type ref_type directly inside the contrib tag
"""
aff_tags = []
for child_tag in contrib_tag:
if (child_tag and child_tag.name and child_tag.name == "xref"
and child_tag.get('ref-type') and ... |
def all_contributors(soup, detail="brief"):
"find all contributors not contrained to only the ones in article meta"
contrib_tags = raw_parser.contributors(soup)
contributors = format_authors(soup, contrib_tags, detail)
return contributors |
def authors_non_byline(soup, detail="full"):
"""Non-byline authors for group author members"""
# Get a filtered list of contributors, in order to get their group-author-id
contrib_type = "author non-byline"
contributors_ = contributors(soup, detail)
non_byline_authors = [author for author in contrib... |
def refs(soup):
"""Find and return all the references"""
tags = raw_parser.ref_list(soup)
refs = []
position = 1
article_doi = doi(soup)
for tag in tags:
ref = {}
ref['ref'] = ref_text(tag)
# ref_id
copy_attribute(tag.attrs, "id", ref)
# article_title... |
def extract_component_doi(tag, nodenames):
"""
Used to get component DOI from a tag and confirm it is actually for that tag
and it is not for one of its children in the list of nodenames
"""
component_doi = None
if(tag.name == "sub-article"):
component_doi = doi_uri_to_doi(node_text(fir... |
def components(soup):
"""
Find the components, i.e. those parts that would be assigned
a unique component DOI, such as figures, tables, etc.
- position is in what order the tag appears in the entire set of nodes
- ordinal is in what order it is for all the tags of its own type
"""
components... |
def correspondence(soup):
"""
Find the corresp tags included in author-notes
for primary correspondence
"""
correspondence = []
author_notes_nodes = raw_parser.author_notes(soup)
if author_notes_nodes:
corresp_nodes = raw_parser.corresp(author_notes_nodes)
for tag in corres... |
def author_notes(soup):
"""
Find the fn tags included in author-notes
"""
author_notes = []
author_notes_section = raw_parser.author_notes(soup)
if author_notes_section:
fn_nodes = raw_parser.fn(author_notes_section)
for tag in fn_nodes:
if 'fn-type' in tag.attrs:
... |
def full_author_notes(soup, fntype_filter=None):
"""
Find the fn tags included in author-notes
"""
notes = []
author_notes_section = raw_parser.author_notes(soup)
if author_notes_section:
fn_nodes = raw_parser.fn(author_notes_section)
notes = footnotes(fn_nodes, fntype_filter)
... |
def competing_interests(soup, fntype_filter):
"""
Find the fn tags included in the competing interest
"""
competing_interests_section = extract_nodes(soup, "fn-group", attr="content-type", value="competing-interest")
if not competing_interests_section:
return None
fn = extract_nodes(fir... |
def author_contributions(soup, fntype_filter):
"""
Find the fn tags included in the competing interest
"""
author_contributions_section = extract_nodes(soup, "fn-group", attr="content-type", value="author-contribution")
if not author_contributions_section:
return None
fn = extract_nodes... |
def full_award_groups(soup):
"""
Find the award-group items and return a list of details
"""
award_groups = []
funding_group_section = extract_nodes(soup, "funding-group")
# counter for auto generated id values, if required
generated_id_counter = 1
for fg in funding_group_section:
... |
def award_groups(soup):
"""
Find the award-group items and return a list of details
"""
award_groups = []
funding_group_section = extract_nodes(soup, "funding-group")
for fg in funding_group_section:
award_group_tags = extract_nodes(fg, "award-group")
for ag in award_group_tag... |
def award_group_funding_source(tag):
"""
Given a funding group element
Find the award group funding sources, one for each
item found in the get_funding_group section
"""
award_group_funding_source = []
funding_source_tags = extract_nodes(tag, "funding-source")
for t in funding_source_tag... |
def full_award_group_funding_source(tag):
"""
Given a funding group element
Find the award group funding sources, one for each
item found in the get_funding_group section
"""
award_group_funding_sources = []
funding_source_nodes = extract_nodes(tag, "funding-source")
for funding_source_n... |
def award_group_award_id(tag):
"""
Find the award group award id, one for each
item found in the get_funding_group section
"""
award_group_award_id = []
award_id_tags = extract_nodes(tag, "award-id")
for t in award_id_tags:
award_group_award_id.append(t.text)
return award_group_a... |
def award_group_principal_award_recipient(tag):
"""
Find the award group principal award recipient, one for each
item found in the get_funding_group section
"""
award_group_principal_award_recipient = []
principal_award_recipients = extract_nodes(tag, "principal-award-recipient")
for t in p... |
def object_id_doi(tag, parent_tag_name=None):
"""DOI in an object-id tag found inside the tag"""
doi = None
object_id = None
object_ids = raw_parser.object_id(tag, "doi")
if object_ids:
object_id = first([id_ for id_ in object_ids])
if parent_tag_name and object_id and object_id.parent.n... |
def title_tag_inspected(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the title tag and sometimes inspect its parents"""
title_tag = None
if direct_sibling_only is True:
for sibling_tag in tag:
if sibling_tag.name and sibling_tag.name == "titl... |
def title_text(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the text of a title tag and sometimes inspect its parents"""
title = None
title_tag = title_tag_inspected(tag, parent_tag_name, p_parent_tag_name, direct_sibling_only)
if title_tag:
title =... |
def boxed_text_to_image_block(tag):
"covert boxed-text to an image block containing an inline-graphic"
tag_block = OrderedDict()
image_content = body_block_image_content(first(raw_parser.inline_graphic(tag)))
tag_block["type"] = "image"
set_if_value(tag_block, "doi", doi_uri_to_doi(object_id_doi(tag... |
def body_json(soup, base_url=None):
""" Get body json and then alter it with section wrapping and removing boxed-text """
body_content = body(soup, remove_key_info_box=True, base_url=base_url)
# Wrap in a section if the first block is not a section
if (body_content and len(body_content) > 0 and "type" i... |
def body_block_content_render(tag, recursive=False, base_url=None):
"""
Render the tag as body content and call recursively if
the tag has child tags
"""
block_content_list = []
tag_content = OrderedDict()
if tag.name == "p":
for block_content in body_block_paragraph_render(tag, bas... |
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None):
"""
paragraphs may wrap some other body block content
this is separated out so it can be called from more than one place
"""
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_strin... |
def body_block_caption_render(caption_tags, base_url=None):
"""fig and media tag captions are similar so use this common function"""
caption_content = []
supplementary_material_tags = []
for block_tag in remove_doi_paragraph(caption_tags):
# Note then skip p tags with supplementary-material ins... |
def body_block_supplementary_material_render(supp_tags, base_url=None):
"""fig and media tag caption may have supplementary material"""
source_data = []
for supp_tag in supp_tags:
for block_content in body_block_content_render(supp_tag, base_url=base_url):
if block_content != {}:
... |
def body_block_paragraph_content(text):
"for formatting of simple paragraphs of text only, and check if it is all whitespace"
tag_content = OrderedDict()
if text and text != '':
tag_content["type"] = "paragraph"
tag_content["text"] = clean_whitespace(text)
return tag_content |
def body_block_image_content(tag):
"format a graphic or inline-graphic into a body block json format"
image_content = OrderedDict()
if tag:
copy_attribute(tag.attrs, 'xlink:href', image_content, 'uri')
if "uri" in image_content:
# todo!! alt
set_if_value(image_content... |
def body_block_title_label_caption(tag_content, title_value, label_value,
caption_content, set_caption=True, prefer_title=False, prefer_label=False):
"""set the title, label and caption values in a consistent way
set_caption: insert a "caption" field
prefer_title: when on... |
def body_block_attribution(tag):
"extract the attribution content for figures, tables, videos"
attributions = []
if raw_parser.attrib(tag):
for attrib_tag in raw_parser.attrib(tag):
attributions.append(node_contents_str(attrib_tag))
if raw_parser.permissions(tag):
# concatena... |
def body_blocks(soup):
"""
Note: for some reason this works and few other attempted methods work
Search for certain node types, find the first nodes siblings of the same type
Add the first sibling and the other siblings to a list and return them
"""
nodenames = body_block_nodenames()
body_b... |
def abstract_json(soup):
"""abstract in article json format"""
abstract_tags = raw_parser.abstract(soup)
abstract_json = None
for tag in abstract_tags:
if tag.get("abstract-type") is None:
abstract_json = render_abstract_json(tag)
return abstract_json |
def digest_json(soup):
"""digest in article json format"""
abstract_tags = raw_parser.abstract(soup, abstract_type="executive-summary")
abstract_json = None
for tag in abstract_tags:
abstract_json = render_abstract_json(tag)
return abstract_json |
def author_affiliations(author, html_flag=True):
"""compile author affiliations for json output"""
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string)
affilations = []
if author.get("affiliations"):
for ... |
def author_json_details(author, author_json, contributions, correspondence,
competing_interests, equal_contributions_map, present_address_data,
foot_notes_data, html_flag=True):
# Configure the XML to HTML conversion preference for shorthand use below
convert = la... |
def collab_to_group_author_key_map(authors):
"""compile a map of author collab to group-author-key"""
collab_map = {}
for author in authors:
if author.get("collab"):
collab_map[author.get("collab")] = author.get("group-author-key")
return collab_map |
def map_equal_contributions(contributors):
"""assign numeric values to each unique equal-contrib id"""
equal_contribution_map = {}
equal_contribution_keys = []
for contributor in contributors:
if contributor.get("references") and "equal-contrib" in contributor.get("references"):
for ... |
def authors_json(soup):
"""authors list in article json format"""
authors_json_data = []
contributors_data = contributors(soup, "full")
author_contributions_data = author_contributions(soup, None)
author_competing_interests_data = competing_interests(soup, None)
author_correspondence_data = full... |
def author_line(soup):
"""take preferred names from authors json and format them into an author line"""
author_line = None
authors_json_data = authors_json(soup)
author_names = extract_author_line_names(authors_json_data)
if len(author_names) > 0:
author_line = format_author_line(author_name... |
def format_author_line(author_names):
"""authorLine format depends on if there is 1, 2 or more than 2 authors"""
author_line = None
if not author_names:
return author_line
if len(author_names) <= 2:
author_line = ", ".join(author_names)
elif len(author_names) > 2:
author_line... |
def references_date(year=None):
"Handle year value parsing for some edge cases"
date = None
discriminator = None
in_press = None
if year and "in press" in year.lower().strip():
in_press = True
elif year and re.match("^[0-9]+$", year):
date = year
elif year:
discrimina... |
def references_json_authors(ref_authors, ref_content):
"build the authors for references json here for testability"
all_authors = references_authors(ref_authors)
if all_authors != {}:
if ref_content.get("type") in ["conference-proceeding", "journal", "other",
... |
def convert_references_json(ref_content, soup=None):
"Check for references that will not pass schema validation, fix or convert them to unknown"
# Convert reference to unkonwn if still missing important values
if (
(ref_content.get("type") == "other")
or
(ref_content.get("type") == ... |
def references_json_unknown_details(ref_content, soup=None):
"Extract detail value for references of type unknown"
details = ""
# Try adding pages values first
if "pages" in ref_content:
if "range" in ref_content["pages"]:
details += ref_content["pages"]["range"]
else:
... |
def unwrap_appendix_box(json_content):
"""for use in removing unwanted boxed-content from appendices json"""
if json_content.get("content") and len(json_content["content"]) > 0:
first_block = json_content["content"][0]
if (first_block.get("type")
and first_block.get("type") == "box"
... |
def extract_schemas_from_file(source_path):
"""Extract schemas from 'source_path'.
:returns: a list of ViewSchema objects on success, None if no schemas
could be extracted.
"""
logging.info("Extracting schemas from %s", source_path)
try:
with open(source_path, 'r') as source_file:
... |
def _get_simple_assignments(tree):
"""Get simple assignments from node tree."""
result = {}
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name):
result[target.id] = node.value
ret... |
def extract_schemas_from_source(source, filename='<unknown>'):
"""Extract schemas from 'source'.
The 'source' parameter must be a string, and should be valid python
source.
If 'source' is not valid python source, a SyntaxError will be raised.
:returns: a list of ViewSchema objects.
"""
# ... |
def render_value(value):
"""Render a value, ensuring that any nested dicts are sorted by key."""
if isinstance(value, list):
return '[' + ', '.join(render_value(v) for v in value) + ']'
elif isinstance(value, dict):
return (
'{' +
', '.join('{k!r}: {v}'.format(
... |
def write_service_double_file(target_root, service_name, rendered):
"""Render syntactically valid python service double code."""
target_path = os.path.join(
target_root,
'snapstore_schemas', 'service_doubles', '%s.py' % service_name
)
with open(target_path, 'w') as target_file:
t... |
def clean_docstring(docstring):
"""Dedent docstring, special casing the first line."""
docstring = docstring.strip()
if '\n' in docstring:
# multiline docstring
if docstring[0].isspace():
# whole docstring is indented
return textwrap.dedent(docstring)
else:
... |
def _sort_schema(schema):
"""Recursively sorts a JSON schema by dict key."""
if isinstance(schema, dict):
for k, v in sorted(schema.items()):
if isinstance(v, dict):
yield k, OrderedDict(_sort_schema(v))
elif isinstance(v, list):
yield k, list(_so... |
def urlmap(patterns):
"""Recursively build a map of (group, name) => url patterns.
Group is either the resolver namespace or app name for the url config.
The urls are joined with any prefixes, and cleaned up of extraneous regex
specific syntax."""
for pattern in patterns:
group = getattr(p... |
def get_field_schema(name, field):
"""Returns a JSON Schema representation of a form field."""
field_schema = {
'type': 'string',
}
if field.label:
field_schema['title'] = str(field.label) # force translation
if field.help_text:
field_schema['description'] = str(field.help... |
def get_form_schema(form):
"""Return a JSON Schema object for a Django Form."""
schema = {
'type': 'object',
'properties': {},
}
for name, field in form.base_fields.items():
schema['properties'][name] = get_field_schema(name, field)
if field.required:
schema.... |
def handler(self, handler_class):
"""Link to an API handler class (e.g. piston or DRF)."""
self.handler_class = handler_class
# we take the docstring from the handler class, not the methods
if self.docs is None and handler_class.__doc__:
self.docs = clean_docstring(handler_cl... |
def xml_to_html(html_flag, xml_string, base_url=None):
"For formatting json output into HTML friendly format"
if not xml_string or not html_flag is True:
return xml_string
html_string = xml_string
html_string = remove_comment_tags(html_string)
# Escape unmatched angle brackets
if '<' in... |
def replace_simple_tags(s, from_tag='italic', to_tag='i', to_open_tag=None):
"""
Replace tags such as <italic> to <i>
This does not validate markup
"""
if to_open_tag:
s = s.replace('<' + from_tag + '>', to_open_tag)
elif to_tag:
s = s.replace('<' + from_tag + '>', '<' + to_tag +... |
def validate_body(schema):
"""Validate the body of incoming requests for a flask view.
An example usage might look like this::
from snapstore_schemas import validate_body
@validate_body({
'type': 'array',
'items': {
'type': 'object',
'p... |
def record_schemas(
fn, wrapper, location, request_schema=None, response_schema=None):
"""Support extracting the schema from the decorated function."""
# have we already been decorated by an acceptable api call?
has_acceptable = hasattr(fn, '_acceptable_metadata')
if request_schema is not None:... |
def validate_output(schema):
"""Validate the body of a response from a flask view.
Like `validate_body`, this function compares a json document to a
jsonschema specification. However, this function applies the schema to the
view response.
Instead of the view returning a flask response object, it s... |
def validate(payload, schema):
"""Validate `payload` against `schema`, returning an error list.
jsonschema provides lots of information in it's errors, but it can be a bit
of work to extract all the information.
"""
v = jsonschema.Draft4Validator(
schema, format_checker=jsonschema.FormatChe... |
def connect(url, max_retries=None, **kwargs):
"""Connects to a Phoenix query server.
:param url:
URL to the Phoenix query server, e.g. ``http://localhost:8765/``
:param autocommit:
Switch the connection to autocommit mode.
:param readonly:
Switch the connection to readonly mod... |
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (http... |
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
... |
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A... |
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Inf... |
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request) |
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
... |
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.conne... |
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.