# engine/result.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define result set constructs including :class:`.ResultProxy`
and :class:`.RowProxy."""
from .. import exc, util
from ..sql import expression, sqltypes, util as sql_util
import collections
import operator
# This reconstructor is necessary so that pickles with the C extension or
# without use the same Binary format.
try:
# We need a different reconstructor on the C extension so that we can
# add extra checks that fields have correctly been initialized by
# __setstate__.
from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor
# The extra function embedding is needed so that the
# reconstructor function has the same signature whether or not
# the extension is present.
def rowproxy_reconstructor(cls, state):
return safe_rowproxy_reconstructor(cls, state)
except ImportError:
def rowproxy_reconstructor(cls, state):
obj = cls.__new__(cls)
obj.__setstate__(state)
return obj
try:
from sqlalchemy.cresultproxy import BaseRowProxy
_baserowproxy_usecext = True
except ImportError:
_baserowproxy_usecext = False
class BaseRowProxy(object):
__slots__ = ('_parent', '_row', '_processors', '_keymap')
def __init__(self, parent, row, processors, keymap):
"""RowProxy objects are constructed by ResultProxy objects."""
self._parent = parent
self._row = row
self._processors = processors
self._keymap = keymap
def __reduce__(self):
return (rowproxy_reconstructor,
(self.__class__, self.__getstate__()))
def values(self):
"""Return the values represented by this RowProxy as a list."""
return list(self)
def __iter__(self):
for processor, value in zip(self._processors, self._row):
if processor is None:
yield value
else:
yield processor(value)
def __len__(self):
return len(self._row)
def __getitem__(self, key):
try:
processor, obj, index = self._keymap[key]
except KeyError:
processor, obj, index = self._parent._key_fallback(key)
except TypeError:
if isinstance(key, slice):
l = []
for processor, value in zip(self._processors[key],
self._row[key]):
if processor is None:
l.append(value)
else:
l.append(processor(value))
return tuple(l)
else:
raise
if index is None:
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % obj)
if processor is not None:
return processor(self._row[index])
else:
return self._row[index]
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e.args[0])
class RowProxy(BaseRowProxy):
"""Proxy values from a single cursor row.
Mostly follows "ordered dictionary" behavior, mapping result
values to the string-based column name, the integer position of
the result in the row, as well as Column instances which can be
mapped to the original Columns that produced this result set (for
results that correspond to constructed SQL expressions).
"""
__slots__ = ()
def __contains__(self, key):
return self._parent._has_key(key)
def __getstate__(self):
return {
'_parent': self._parent,
'_row': tuple(self)
}
def __setstate__(self, state):
self._parent = parent = state['_parent']
self._row = state['_row']
self._processors = parent._processors
self._keymap = parent._keymap
__hash__ = None
def _op(self, other, op):
return op(tuple(self), tuple(other)) \
if isinstance(other, RowProxy) \
else op(tuple(self), other)
def __lt__(self, other):
return self._op(other, operator.lt)
def __le__(self, other):
return self._op(other, operator.le)
def __ge__(self, other):
return self._op(other, operator.ge)
def __gt__(self, other):
return self._op(other, operator.gt)
def __eq__(self, other):
return self._op(other, operator.eq)
def __ne__(self, other):
return self._op(other, operator.ne)
def __repr__(self):
return repr(sql_util._repr_row(self))
def has_key(self, key):
"""Return True if this RowProxy contains the given key."""
return self._parent._has_key(key)
def items(self):
"""Return a list of tuples, each tuple containing a key/value pair."""
# TODO: no coverage here
return [(key, self[key]) for key in self.keys()]
def keys(self):
"""Return the list of keys as strings represented by this RowProxy."""
return self._parent.keys
def iterkeys(self):
return iter(self._parent.keys)
def itervalues(self):
return iter(self)
try:
# Register RowProxy with Sequence,
# so sequence protocol is implemented
from collections import Sequence
Sequence.register(RowProxy)
except ImportError:
pass
class ResultMetaData(object):
"""Handle cursor.description, applying additional info from an execution
context."""
__slots__ = (
'_keymap', 'case_sensitive', 'matched_on_name',
'_processors', 'keys', '_orig_processors')
def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
self.case_sensitive = dialect.case_sensitive
self.matched_on_name = False
self._orig_processors = None
if context.result_column_struct:
result_columns, cols_are_ordered, textual_ordered = \
context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = cols_are_ordered = \
num_ctx_cols = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context, cursor_description, result_columns,
num_ctx_cols, cols_are_ordered, textual_ordered)
self._keymap = {}
if not _baserowproxy_usecext:
# keymap indexes by integer index: this is only used
# in the pure Python BaseRowProxy.__getitem__
# implementation to avoid an expensive
# isinstance(key, util.int_types) in the most common
# case path
len_raw = len(raw)
self._keymap.update([
(elem[0], (elem[3], elem[4], elem[0]))
for elem in raw
] + [
(elem[0] - len_raw, (elem[3], elem[4], elem[0]))
for elem in raw
])
# processors in key order for certain per-row
# views like __iter__ and slices
self._processors = [elem[3] for elem in raw]
# keymap by primary string...
by_key = dict([
(elem[2], (elem[3], elem[4], elem[0]))
for elem in raw
])
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
if len(by_key) != num_ctx_cols:
seen = set()
for rec in raw:
key = rec[1]
if key in seen:
# this is an "ambiguous" element, replacing
# the full record in the map
key = key.lower() if not self.case_sensitive else key
by_key[key] = (None, key, None)
seen.add(key)
# copy secondary elements from compiled columns
# into self._keymap, write in the potentially "ambiguous"
# element
self._keymap.update([
(obj_elem, by_key[elem[2]])
for elem in raw if elem[4]
for obj_elem in elem[4]
])
# if we did a pure positional match, then reset the
# original "expression element" back to the "unambiguous"
# entry. This is a new behavior in 1.1 which impacts
# TextAsFrom but also straight compiled SQL constructs.
if not self.matched_on_name:
self._keymap.update([
(elem[4][0], (elem[3], elem[4], elem[0]))
for elem in raw if elem[4]
])
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap
self._keymap.update([
(obj_elem, (elem[3], elem[4], elem[0]))
for elem in raw if elem[4]
for obj_elem in elem[4]
])
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
# update keymap with "translated" names (sqlite-only thing)
if not num_ctx_cols and context._translate_colname:
self._keymap.update([
(elem[5], self._keymap[elem[2]])
for elem in raw if elem[5]
])
def _merge_cursor_description(
self, context, cursor_description, result_columns,
num_ctx_cols, cols_are_ordered, textual_ordered):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`.TextAsFrom` construct. This contruct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
Loading ...