Project import generated by Copybara.

GitOrigin-RevId: d9e9e3fb4e31372ec1fb43b178994ca78fa8fe70
diff --git a/search/__init__.py b/search/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/search/__init__.py
@@ -0,0 +1 @@
+
diff --git a/search/ast2ast.py b/search/ast2ast.py
new file mode 100644
index 0000000..bf4de4f
--- /dev/null
+++ b/search/ast2ast.py
@@ -0,0 +1,558 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Convert a user's issue search AST into a simplified AST.
+
+This phase of query processing simplifies the user's query by looking up
+the int IDs of any labels, statuses, or components that are mentioned by
+name in the original query.  The data needed for lookups is typically cached
+in RAM in each backend job, so this will not put much load on the DB.  The
+simplified ASTs are later converted into SQL which is simpler and has
+fewer joins.
+
+The simplified main query is better because:
+  + It is clearly faster, especially in the most common case where config
+    data is in RAM.
+  + Since less RAM is used to process the main query on each shard, query
+    execution time is more consistent with less variability under load.  Less
+    variability is good because the user must wait for the slowest shard.
+  + The config tables (LabelDef, StatusDef, etc.) exist only on the primary DB,
+    so they cannot be mentioned in a query that runs on a shard.
+  + The query string itself is shorter when numeric IDs are substituted, which
+    means that we can handle user queries with long lists of labels in a
+    reasonable-sized query.
+  + It bisects the complexity of the operation: it's easier to test and debug
+    the lookup and simplification logic plus the main query logic this way
+    than it would be to deal with an even more complex SQL main query.
+"""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import collections
+import logging
+import re
+
+from framework import exceptions
+from proto import ast_pb2
+from proto import tracker_pb2
+# TODO(jrobbins): if BUILTIN_ISSUE_FIELDS was passed through, I could
+# remove this dep.
+from search import query2ast
+from tracker import tracker_bizobj
+from features import federated
+
+
+def PreprocessAST(
+    cnxn, query_ast, project_ids, services, harmonized_config, is_member=True):
+  """Preprocess the query by doing lookups so that the SQL query is simpler.
+
+  Args:
+    cnxn: connection to SQL database.
+    query_ast: user query abstract syntax tree parsed by query2ast.py.
+    project_ids: collection of int project IDs to use to look up status values
+        and labels.
+    services: Connections to persistence layer for users and configs.
+    harmonized_config: harmonized config for all projects being searched.
+    is_member: True if user is a member of all the projects being searched,
+        so they can do user substring searches.
+
+  Returns:
+    A new QueryAST PB with simplified conditions.  Specifically, string values
+    for labels, statuses, and components are replaced with the int IDs of
+    those items.  Also, is:open is distilled down to
+    status_id != closed_status_ids.
+  """
+  new_conjs = []
+  for conj in query_ast.conjunctions:
+    new_conds = [
+        _PreprocessCond(
+            cnxn, cond, project_ids, services, harmonized_config, is_member)
+        for cond in conj.conds]
+    new_conjs.append(ast_pb2.Conjunction(conds=new_conds))
+
+  return ast_pb2.QueryAST(conjunctions=new_conjs)
+
+
+def _PreprocessIsOpenCond(
+    cnxn, cond, project_ids, services, _harmonized_config, _is_member):
+  """Preprocess an is:open cond into status_id != closed_status_ids."""
+  if project_ids:
+    closed_status_ids = []
+    for project_id in project_ids:
+      closed_status_ids.extend(services.config.LookupClosedStatusIDs(
+          cnxn, project_id))
+  else:
+    closed_status_ids = services.config.LookupClosedStatusIDsAnyProject(cnxn)
+
+  # Invert the operator, because we're comparing against *closed* statuses.
+  if cond.op == ast_pb2.QueryOp.EQ:
+    op = ast_pb2.QueryOp.NE
+  elif cond.op == ast_pb2.QueryOp.NE:
+    op = ast_pb2.QueryOp.EQ
+  else:
+    raise MalformedQuery('Open condition got nonsensical op %r' % cond.op)
+
+  return ast_pb2.Condition(
+      op=op, field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['status_id']],
+      int_values=closed_status_ids)
+
+
+def _PreprocessIsBlockedCond(
+    _cnxn, cond, _project_ids, _services, _harmonized_config, _is_member):
+  """Preprocess an is:blocked cond into issues that are blocked."""
+  if cond.op == ast_pb2.QueryOp.EQ:
+    op = ast_pb2.QueryOp.IS_DEFINED
+  elif cond.op == ast_pb2.QueryOp.NE:
+    op = ast_pb2.QueryOp.IS_NOT_DEFINED
+  else:
+    raise MalformedQuery('Blocked condition got nonsensical op %r' % cond.op)
+
+  return ast_pb2.Condition(
+      op=op, field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['blockedon_id']])
+
+
+def _PreprocessIsSpamCond(
+    _cnxn, cond, _project_ids, _services, _harmonized_config, _is_member):
+  """Preprocess an is:spam cond into is_spam == 1."""
+  if cond.op == ast_pb2.QueryOp.EQ:
+    int_values = [1]
+  elif cond.op == ast_pb2.QueryOp.NE:
+    int_values = [0]
+  else:
+    raise MalformedQuery('Spam condition got nonsensical op %r' % cond.op)
+
+  return ast_pb2.Condition(
+      op=ast_pb2.QueryOp.EQ,
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['is_spam']],
+      int_values=int_values)
+
+
+def _PreprocessBlockedOnCond(
+    cnxn, cond, project_ids, services, _harmonized_config, _is_member):
+  """Preprocess blockedon=xyz and has:blockedon conds.
+
+  Preprocesses blockedon=xyz cond into blockedon_id:issue_ids.
+  Preprocesses has:blockedon cond into issues that are blocked on other issues.
+  """
+  issue_ids, ext_issue_ids = _GetIssueIDsFromLocalIdsCond(cnxn,
+    cond, project_ids, services)
+  return ast_pb2.Condition(
+      op=_TextOpToIntOp(cond.op),
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['blockedon_id']],
+      int_values=issue_ids,
+      str_values=ext_issue_ids)
+
+
+def _PreprocessBlockingCond(
+    cnxn, cond, project_ids, services, _harmonized_config, _is_member):
+  """Preprocess blocking=xyz and has:blocking conds.
+
+  Preprocesses blocking=xyz cond into blocking_id:issue_ids.
+  Preprocesses has:blocking cond into issues that are blocking other issues.
+  """
+  issue_ids, ext_issue_ids = _GetIssueIDsFromLocalIdsCond(cnxn,
+    cond, project_ids, services)
+  return ast_pb2.Condition(
+      op=_TextOpToIntOp(cond.op),
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['blocking_id']],
+      int_values=issue_ids,
+      str_values=ext_issue_ids)
+
+
+def _PreprocessMergedIntoCond(
+    cnxn, cond, project_ids, services, _harmonized_config, _is_member):
+  """Preprocess mergedinto=xyz and has:mergedinto conds.
+
+  Preprocesses mergedinto=xyz cond into mergedinto_id:issue_ids.
+  Preprocesses has:mergedinto cond into has:mergedinto_id.
+  """
+  issue_ids, ext_issue_ids = _GetIssueIDsFromLocalIdsCond(cnxn,
+    cond, project_ids, services)
+  return ast_pb2.Condition(
+      op=_TextOpToIntOp(cond.op),
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['mergedinto_id']],
+      int_values=issue_ids,
+      str_values=ext_issue_ids)
+
+
+def _GetIssueIDsFromLocalIdsCond(cnxn, cond, project_ids, services):
+  """Returns global IDs from the local IDs provided in the cond."""
+  # Get {project_name: project} for all projects in project_ids.
+  ids_to_projects = services.project.GetProjects(cnxn, project_ids)
+  ref_projects = {pb.project_name: pb for pb in ids_to_projects.values()}
+  # Populate default_project_name if there is only one project id provided.
+  default_project_name = None
+  if len(ref_projects) == 1:
+    default_project_name = list(ref_projects.values())[0].project_name
+
+  # Populate refs with (project_name, local_id) pairs.
+  refs = []
+  # Populate ext_issue_ids with strings like 'b/1234'.
+  ext_issue_ids = []
+  for val in cond.str_values:
+    try:
+      project_name, local_id = tracker_bizobj.ParseIssueRef(val)
+      if not project_name:
+        if not default_project_name:
+          # TODO(rmistry): Support the below.
+          raise MalformedQuery(
+              'Searching for issues accross multiple/all projects without '
+              'project prefixes is ambiguous and is currently not supported.')
+        project_name = default_project_name
+      refs.append((project_name, int(local_id)))
+    except MalformedQuery as e:
+      raise e
+    # Can't parse issue id, try external issue pattern.
+    except ValueError as e:
+      if federated.FromShortlink(val):
+        ext_issue_ids.append(val)
+      else:
+        raise MalformedQuery('Could not parse issue reference: %s' % val)
+
+  issue_ids, _misses =  services.issue.ResolveIssueRefs(
+      cnxn, ref_projects, default_project_name, refs)
+  return issue_ids, ext_issue_ids
+
+
+def _PreprocessStatusCond(
+    cnxn, cond, project_ids, services, _harmonized_config, _is_member):
+  """Preprocess a status=names cond into status_id=IDs."""
+  if project_ids:
+    status_ids = []
+    for project_id in project_ids:
+      status_ids.extend(services.config.LookupStatusIDs(
+          cnxn, project_id, cond.str_values))
+  else:
+    status_ids = services.config.LookupStatusIDsAnyProject(
+        cnxn, cond.str_values)
+
+  return ast_pb2.Condition(
+      op=_TextOpToIntOp(cond.op),
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['status_id']],
+      int_values=status_ids)
+
+
+def _IsEqualityOp(op):
+  """Return True for EQ and NE."""
+  return op in (ast_pb2.QueryOp.EQ, ast_pb2.QueryOp.NE)
+
+
+def _IsDefinedOp(op):
+  """Return True for IS_DEFINED and IS_NOT_DEFINED."""
+  return op in (ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED)
+
+
+def _TextOpToIntOp(op):
+  """If a query is optimized from string to ID matching, use an equality op."""
+  if op == ast_pb2.QueryOp.TEXT_HAS or op == ast_pb2.QueryOp.KEY_HAS:
+    return ast_pb2.QueryOp.EQ
+  elif op == ast_pb2.QueryOp.NOT_TEXT_HAS:
+    return ast_pb2.QueryOp.NE
+  return op
+
+
+def _MakePrefixRegex(cond):
+  """Return a regex to match strings that start with cond values."""
+  all_prefixes = '|'.join(map(re.escape, cond.str_values))
+  return re.compile(r'(%s)-.+' % all_prefixes, re.I)
+
+
+def _MakeKeyValueRegex(cond):
+  """Return a regex to match the first token and remaining text separately."""
+  keys, values = list(zip(*[x.split('-', 1) for x in cond.str_values]))
+  if len(set(keys)) != 1:
+    raise MalformedQuery(
+        "KeyValue query with multiple different keys: %r" % cond.str_values)
+  all_values = '|'.join(map(re.escape, values))
+  return re.compile(r'%s-.*\b(%s)\b.*' % (keys[0], all_values), re.I)
+
+
+def _MakeWordBoundaryRegex(cond):
+  """Return a regex to match the cond values as whole words."""
+  all_words = '|'.join(map(re.escape, cond.str_values))
+  return re.compile(r'.*\b(%s)\b.*' % all_words, re.I)
+
+
+def _PreprocessLabelCond(
+    cnxn, cond, project_ids, services, _harmonized_config, _is_member):
+  """Preprocess a label=names cond into label_id=IDs."""
+  if project_ids:
+    label_ids = []
+    for project_id in project_ids:
+      if _IsEqualityOp(cond.op):
+        label_ids.extend(services.config.LookupLabelIDs(
+            cnxn, project_id, cond.str_values))
+      elif _IsDefinedOp(cond.op):
+        label_ids.extend(services.config.LookupIDsOfLabelsMatching(
+            cnxn, project_id, _MakePrefixRegex(cond)))
+      elif cond.op == ast_pb2.QueryOp.KEY_HAS:
+        label_ids.extend(services.config.LookupIDsOfLabelsMatching(
+            cnxn, project_id, _MakeKeyValueRegex(cond)))
+      else:
+        label_ids.extend(services.config.LookupIDsOfLabelsMatching(
+            cnxn, project_id, _MakeWordBoundaryRegex(cond)))
+  else:
+    if _IsEqualityOp(cond.op):
+      label_ids = services.config.LookupLabelIDsAnyProject(
+          cnxn, cond.str_values)
+    elif _IsDefinedOp(cond.op):
+      label_ids = services.config.LookupIDsOfLabelsMatchingAnyProject(
+          cnxn, _MakePrefixRegex(cond))
+    elif cond.op == ast_pb2.QueryOp.KEY_HAS:
+      label_ids = services.config.LookupIDsOfLabelsMatchingAnyProject(
+          cnxn, _MakeKeyValueRegex(cond))
+    else:
+      label_ids = services.config.LookupIDsOfLabelsMatchingAnyProject(
+          cnxn, _MakeWordBoundaryRegex(cond))
+
+  return ast_pb2.Condition(
+      op=_TextOpToIntOp(cond.op),
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['label_id']],
+      int_values=label_ids)
+
+
+def _PreprocessComponentCond(
+    cnxn, cond, project_ids, services, harmonized_config, _is_member):
+  """Preprocess a component= or component:name cond into component_id=IDs."""
+  exact = _IsEqualityOp(cond.op)
+  component_ids = []
+  if project_ids:
+    # We are searching within specific projects, so harmonized_config
+    # holds the config data for all those projects.
+    for comp_path in cond.str_values:
+      component_ids.extend(tracker_bizobj.FindMatchingComponentIDs(
+          comp_path, harmonized_config, exact=exact))
+  else:
+    # We are searching across the whole site, so we have no harmonized_config
+    # to use.
+    component_ids = services.config.FindMatchingComponentIDsAnyProject(
+        cnxn, cond.str_values, exact=exact)
+
+  return ast_pb2.Condition(
+      op=_TextOpToIntOp(cond.op),
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['component_id']],
+      int_values=component_ids)
+
+
+def _PreprocessExactUsers(
+    cnxn, cond, user_service, id_fields, is_member):
+  """Preprocess a foo=emails cond into foo_id=IDs, if exact user match.
+
+  This preprocesing step converts string conditions to int ID conditions.
+  E.g., [owner=email] to [owner_id=ID].  It only does it in cases
+  where (a) the email was "me", so it was already converted to an string of
+  digits in the search pipeline, or (b) it is "user@domain" which resolves to
+  a known Monorail user.  It is also possible to search for, e.g.,
+  [owner:substring], but such searches remain 'owner' field searches rather
+  than 'owner_id', and they cannot be combined with the "me" keyword.
+
+  Args:
+    cnxn: connection to the DB.
+    cond: original parsed query Condition PB.
+    user_service: connection to user persistence layer.
+    id_fields: list of the search fields to use if the conversion to IDs
+        succeed.
+    is_member: True if user is a member of all the projects being searchers,
+        so they can do user substring searches.
+
+  Returns:
+    A new Condition PB that checks the id_field.  Or, the original cond.
+
+  Raises:
+    MalformedQuery: A non-member used a query term that could be used to
+        guess full user email addresses.
+  """
+  op = _TextOpToIntOp(cond.op)
+  if _IsDefinedOp(op):
+    # No need to look up any IDs if we are just testing for any defined value.
+    return ast_pb2.Condition(op=op, field_defs=id_fields,
+                             key_suffix=cond.key_suffix,
+                             phase_name=cond.phase_name)
+
+  # This preprocessing step is only for ops that compare whole values, not
+  # substrings.
+  if not _IsEqualityOp(op):
+    logging.info('could not convert to IDs because op is %r', op)
+    if not is_member:
+      raise MalformedQuery('Only project members may compare user strings')
+    return cond
+
+  user_ids = []
+  for val in cond.str_values:
+    try:
+      user_ids.append(int(val))
+    except ValueError:
+      try:
+        user_ids.append(user_service.LookupUserID(cnxn, val))
+      except exceptions.NoSuchUserException:
+        if not is_member and val != 'me' and not val.startswith('@'):
+          logging.info('could not convert user %r to int ID', val)
+          if '@' in val:
+            raise MalformedQuery('User email address not found')
+          else:
+            raise MalformedQuery(
+                'Only project members may search for user substrings')
+        return cond  # preprocessing failed, stick with the original cond.
+
+  return ast_pb2.MakeCond(
+      op, id_fields, [], user_ids, key_suffix=cond.key_suffix,
+      phase_name=cond.phase_name)
+
+
+def _PreprocessOwnerCond(
+    cnxn, cond, _project_ids, services, _harmonized_config, is_member):
+  """Preprocess a owner=emails cond into owner_id=IDs, if exact user match."""
+  return _PreprocessExactUsers(
+      cnxn, cond, services.user, [query2ast.BUILTIN_ISSUE_FIELDS['owner_id']],
+      is_member)
+
+
+def _PreprocessCcCond(
+    cnxn, cond, _project_ids, services, _harmonized_config, is_member):
+  """Preprocess a cc=emails cond into cc_id=IDs, if exact user match."""
+  return _PreprocessExactUsers(
+      cnxn, cond, services.user, [query2ast.BUILTIN_ISSUE_FIELDS['cc_id']],
+      is_member)
+
+
+def _PreprocessReporterCond(
+    cnxn, cond, _project_ids, services, _harmonized_config, is_member):
+  """Preprocess a reporter=emails cond into reporter_id=IDs, if exact."""
+  return _PreprocessExactUsers(
+      cnxn, cond, services.user,
+      [query2ast.BUILTIN_ISSUE_FIELDS['reporter_id']], is_member)
+
+
+def _PreprocessStarredByCond(
+    cnxn, cond, _project_ids, services, _harmonized_config, is_member):
+  """Preprocess a starredby=emails cond into starredby_id=IDs, if exact."""
+  return _PreprocessExactUsers(
+      cnxn, cond, services.user,
+      [query2ast.BUILTIN_ISSUE_FIELDS['starredby_id']], is_member)
+
+
+def _PreprocessCommentByCond(
+    cnxn, cond, _project_ids, services, _harmonized_config, is_member):
+  """Preprocess a commentby=emails cond into commentby_id=IDs, if exact."""
+  return _PreprocessExactUsers(
+      cnxn, cond, services.user,
+      [query2ast.BUILTIN_ISSUE_FIELDS['commentby_id']], is_member)
+
+
+def _PreprocessHotlistCond(
+    cnxn, cond, _project_ids, services, _harmonized_config, _is_member):
+  """Preprocess hotlist query
+
+  Preprocesses a hotlist query in the form:
+  'hotlist=<user_email>:<hotlist-name>,<hotlist-name>,<user2_email>:...
+  into hotlist_id=IDs, if exact.
+  """
+  # TODO(jojwang): add support for searches that don't contain domain names.
+  # eg jojwang:hotlist-name
+  users_to_hotlists = collections.defaultdict(list)
+  cur_user = ''
+  for val in cond.str_values:
+    if ':' in val:
+      cur_user, hotlists_str = val.split(':', 1)
+    else:
+      hotlists_str = val
+    try:
+      users_to_hotlists[int(cur_user)].append(hotlists_str)
+    except ValueError:
+      try:
+        user_id = services.user.LookupUserID(cnxn, cur_user)
+        users_to_hotlists[user_id].append(hotlists_str)
+      except exceptions.NoSuchUserException:
+        logging.info('could not convert user %r to int ID', val)
+        return cond
+  hotlist_ids = set()
+  for user_id, hotlists in users_to_hotlists.items():
+    if not hotlists[0]:
+      user_hotlists = services.features.GetHotlistsByUserID(cnxn, user_id)
+      user_hotlist_ids = [hotlist.hotlist_id for hotlist in user_hotlists if
+                          user_id in hotlist.owner_ids]
+    else:
+      user_hotlist_ids = list(services.features.LookupHotlistIDs(
+          cnxn, hotlists, [user_id]).values())
+    for hotlist_id in user_hotlist_ids:
+      hotlist_ids.add(hotlist_id)
+  return ast_pb2.Condition(
+      op=_TextOpToIntOp(cond.op),
+      field_defs=[query2ast.BUILTIN_ISSUE_FIELDS['hotlist_id']],
+      int_values=list(hotlist_ids))
+
+
+def _PreprocessCustomCond(cnxn, cond, services, is_member):
+  """Preprocess a custom_user_field=emails cond into IDs, if exact matches."""
+  # TODO(jrobbins): better support for ambiguous fields.
+  # For now, if any field is USER_TYPE and the value being searched
+  # for is the email address of an existing account, it will convert
+  # to a user ID and we go with exact ID matching.  Otherwise, we
+  # leave the cond as-is for ast2select to do string matching on.
+  user_field_defs = [fd for fd in cond.field_defs
+                     if fd.field_type == tracker_pb2.FieldTypes.USER_TYPE]
+  if user_field_defs:
+    return _PreprocessExactUsers(
+        cnxn, cond, services.user, user_field_defs, is_member)
+
+  approval_field_defs = [fd for fd in cond.field_defs
+                         if (fd.field_type ==
+                             tracker_pb2.FieldTypes.APPROVAL_TYPE)]
+  if approval_field_defs:
+    if cond.key_suffix in [query2ast.APPROVER_SUFFIX, query2ast.SET_BY_SUFFIX]:
+      return _PreprocessExactUsers(
+          cnxn, cond, services.user, approval_field_defs, is_member)
+
+  return cond
+
+
+_PREPROCESSORS = {
+    'open': _PreprocessIsOpenCond,
+    'blocked': _PreprocessIsBlockedCond,
+    'spam': _PreprocessIsSpamCond,
+    'blockedon': _PreprocessBlockedOnCond,
+    'blocking': _PreprocessBlockingCond,
+    'mergedinto': _PreprocessMergedIntoCond,
+    'status': _PreprocessStatusCond,
+    'label': _PreprocessLabelCond,
+    'component': _PreprocessComponentCond,
+    'owner': _PreprocessOwnerCond,
+    'cc': _PreprocessCcCond,
+    'reporter': _PreprocessReporterCond,
+    'starredby': _PreprocessStarredByCond,
+    'commentby': _PreprocessCommentByCond,
+    'hotlist': _PreprocessHotlistCond,
+    }
+
+
+def _PreprocessCond(
+    cnxn, cond, project_ids, services, harmonized_config, is_member):
+  """Preprocess query by looking up status, label and component IDs."""
+  # All the fields in a cond share the same name because they are parsed
+  # from a user query term, and the term syntax allows just one field name.
+  field_name = cond.field_defs[0].field_name
+  assert all(fd.field_name == field_name for fd in cond.field_defs)
+
+  # Case 1: The user is searching custom fields.
+  if any(fd.field_id for fd in cond.field_defs):
+    # There can't be a mix of custom and built-in fields because built-in
+    # field names are reserved and take priority over any conflicting ones.
+    assert all(fd.field_id for fd in cond.field_defs)
+    return _PreprocessCustomCond(cnxn, cond, services, is_member)
+
+  # Case 2: The user is searching a built-in field.
+  preproc = _PREPROCESSORS.get(field_name)
+  if preproc:
+    # We have a preprocessor for that built-in field.
+    return preproc(
+        cnxn, cond, project_ids, services, harmonized_config, is_member)
+  else:
+    # We don't have a preprocessor for it.
+    return cond
+
+
+class MalformedQuery(ValueError):
+  pass
diff --git a/search/ast2select.py b/search/ast2select.py
new file mode 100644
index 0000000..a6e5f17
--- /dev/null
+++ b/search/ast2select.py
@@ -0,0 +1,957 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Convert a user's issue search AST into SQL clauses.
+
+The main query is done on the Issues table.
+ + Some simple conditions are implemented as WHERE conditions on the Issue
+   table rows.  These are generated by the _Compare() function.
+ + More complex conditions are implemented via a "LEFT JOIN ... ON ..." clause
+   plus a check in the WHERE clause to select only rows where the join's ON
+   condition was satisfied.  These are generated by appending a clause to
+   the left_joins list plus calling _CompareAlreadyJoined().  Each such left
+   join defines a unique alias to keep it separate from other conditions.
+
+The functions that generate SQL snippets need to insert table names, column
+names, alias names, and value placeholders into the generated string.  These
+functions use the string format() method and the "{varname}" syntax to avoid
+confusion with the "%s" syntax used for SQL value placeholders.
+"""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import logging
+
+from framework import sql
+from proto import ast_pb2
+from proto import tracker_pb2
+from search import query2ast
+from services import tracker_fulltext
+
+
+NATIVE_SEARCHABLE_FIELDS = {
+    'id': 'local_id',
+    'is_spam': 'is_spam',
+    'stars': 'star_count',
+    'attachments': 'attachment_count',
+    'opened': 'opened',
+    'closed': 'closed',
+    'modified': 'modified',
+    'ownermodified': 'owner_modified',
+    'statusmodified': 'status_modified',
+    'componentmodified': 'component_modified',
+    }
+
+
+def BuildSQLQuery(query_ast, snapshot_mode=False):
+  """Translate the user's query into an SQL query.
+
+  Args:
+    query_ast: user query abstract syntax tree parsed by query2ast.py.
+
+  Returns:
+    A pair of lists (left_joins, where) to use when building the SQL SELECT
+    statement.  Each of them is a list of (str, [val, ...]) pairs.
+  """
+  left_joins = []
+  where = []
+  unsupported_conds = []
+  # OR-queries are broken down into multiple simpler queries before they
+  # are sent to the backends, so we should never see an "OR"..
+  assert len(query_ast.conjunctions) == 1, 'OR-query should have been split'
+  conj = query_ast.conjunctions[0]
+
+  for cond_num, cond in enumerate(conj.conds):
+    cond_left_joins, cond_where, unsupported = _ProcessCond(cond_num, cond,
+        snapshot_mode)
+    left_joins.extend(cond_left_joins)
+    where.extend(cond_where)
+    unsupported_conds.extend(unsupported)
+
+  return left_joins, where, unsupported_conds
+
+
+def _ProcessBlockedOnIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a blockedon_id=issue_id cond to SQL."""
+  return _ProcessRelatedIDCond(cond, alias, 'blockedon',
+      snapshot_mode=snapshot_mode)
+
+
+def _ProcessBlockingIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a blocking_id:1,2 cond to SQL."""
+  return _ProcessRelatedIDCond(cond, alias, 'blockedon', reverse_relation=True,
+      snapshot_mode=snapshot_mode)
+
+
+def _ProcessMergedIntoIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a mergedinto:1,2 cond to SQL."""
+  return _ProcessRelatedIDCond(cond, alias, 'mergedinto',
+      snapshot_mode=snapshot_mode)
+
+
+def _ProcessRelatedIDCond(cond, alias, kind, reverse_relation=False,
+                          snapshot_mode=False):
+  """Convert either blocking_id, blockedon_id, or mergedinto_id cond to SQL.
+
+  Normally, we query for issue_id values where the dst_issue_id matches the
+  IDs specified in the cond.  However, when reverse_relation is True, we
+  query for dst_issue_id values where issue_id matches.  This is done for
+  blockedon_id.
+  """
+  if snapshot_mode:
+    return [], [], [cond]
+
+  matching_issue_col = 'issue_id' if reverse_relation else 'dst_issue_id'
+  ret_issue_col = 'dst_issue_id' if reverse_relation else 'issue_id'
+  ext_kind = 'blocking' if reverse_relation else kind
+  left_join = []
+  where = []
+
+  issue_ids = cond.int_values
+  ext_issue_ids = cond.str_values
+  # Filter has:blockedon and has:blocking.
+  if (not issue_ids) and (not ext_issue_ids):
+    kind_cond_str, kind_cond_args = _Compare(
+      alias, ast_pb2.QueryOp.EQ, tracker_pb2.FieldTypes.STR_TYPE, 'kind',
+      [kind])
+    left_join_str = (
+        'IssueRelation AS {alias} ON Issue.id = {alias}.{ret_issue_col} AND '
+         '{kind_cond}').format(
+             alias=alias, ret_issue_col=ret_issue_col, kind_cond=kind_cond_str)
+    left_join_args = kind_cond_args
+    left_join.append((left_join_str, left_join_args))
+    kind_cond_str, kind_cond_args = _Compare(
+      'DIR', ast_pb2.QueryOp.EQ, tracker_pb2.FieldTypes.STR_TYPE, 'kind',
+      [ext_kind])
+    ext_left_join_str = ('DanglingIssueRelation AS DIR ON '
+        'Issue.id = DIR.issue_id AND {kind_cond}').format(
+            kind_cond=kind_cond_str)
+    left_join.append((ext_left_join_str, kind_cond_args))
+    where_str, where_args = _CompareAlreadyJoined(alias,
+      cond.op, ret_issue_col)
+    ext_where_str, ext_where_args = _CompareAlreadyJoined('DIR',
+      cond.op, 'issue_id')
+    where.append(('({where} OR {ext_where})'.format(
+      where=where_str, ext_where=ext_where_str),
+      where_args + ext_where_args))
+  # Filter kind using provided issue ids.
+  if issue_ids:
+    kind_cond_str, kind_cond_args = _Compare(
+      alias, ast_pb2.QueryOp.EQ, tracker_pb2.FieldTypes.STR_TYPE, 'kind',
+      [kind])
+    left_join_str = (
+        'IssueRelation AS {alias} ON Issue.id = {alias}.{ret_issue_col} AND '
+         '{kind_cond}').format(
+             alias=alias, ret_issue_col=ret_issue_col, kind_cond=kind_cond_str)
+    left_join_args = kind_cond_args
+    related_cond_str, related_cond_args = _Compare(
+        alias, ast_pb2.QueryOp.EQ, tracker_pb2.FieldTypes.INT_TYPE,
+        matching_issue_col, issue_ids)
+    left_join_str += ' AND {related_cond}'.format(related_cond=related_cond_str)
+    left_join_args += related_cond_args
+
+    left_join.append((left_join_str, left_join_args))
+    where.append(_CompareAlreadyJoined(alias, cond.op, ret_issue_col))
+  # Filter kind using provided external issue ids.
+  if ext_issue_ids:
+    kind_cond_str, kind_cond_args = _Compare(
+      'DIR', ast_pb2.QueryOp.EQ, tracker_pb2.FieldTypes.STR_TYPE, 'kind',
+      [ext_kind])
+    ext_left_join_str = ('DanglingIssueRelation AS DIR ON '
+        'Issue.id = DIR.issue_id AND {kind_cond}').format(
+            kind_cond=kind_cond_str)
+    related_cond_str, related_cond_args = _Compare(
+        'DIR', ast_pb2.QueryOp.EQ, tracker_pb2.FieldTypes.INT_TYPE,
+        'ext_issue_identifier', ext_issue_ids)
+    ext_left_join_str += ' AND {related_cond}'.format(
+        related_cond=related_cond_str)
+    kind_cond_args += related_cond_args
+
+    left_join.append((ext_left_join_str, kind_cond_args))
+    where.append(_CompareAlreadyJoined('DIR', cond.op, 'issue_id'))
+  return left_join, where, []
+
+
+def _GetFieldTypeAndValues(cond):
+  """Returns the field type and values to use from the condition.
+
+  This function should be used when we do not know what values are present on
+  the condition. Eg: cond.int_values could be set if ast2ast.py preprocessing is
+  first done. If that preprocessing is not done then str_values could be set
+  instead.
+  If both int values and str values exist on the condition then the int values
+  are returned.
+  """
+  if cond.int_values:
+    return tracker_pb2.FieldTypes.INT_TYPE, cond.int_values
+  else:
+    return tracker_pb2.FieldTypes.STR_TYPE, cond.str_values
+
+
+def _ProcessOwnerCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert an owner:substring cond to SQL."""
+  if snapshot_mode:
+    left_joins = [(
+        'User AS {alias} ON '
+        'IssueSnapshot.owner_id = {alias}.user_id'.format(alias=alias),
+        [])]
+  else:
+    left_joins = [(
+        'User AS {alias} ON (Issue.owner_id = {alias}.user_id '
+        'OR Issue.derived_owner_id = {alias}.user_id)'.format(alias=alias),
+        [])]
+  where = [_Compare(alias, cond.op, tracker_pb2.FieldTypes.STR_TYPE, 'email',
+                    cond.str_values)]
+
+  return left_joins, where, []
+
+
+def _ProcessOwnerIDCond(cond, _alias, _spare_alias, snapshot_mode):
+  """Convert an owner_id=user_id cond to SQL."""
+  if snapshot_mode:
+    field_type, field_values = _GetFieldTypeAndValues(cond)
+    explicit_str, explicit_args = _Compare(
+        'IssueSnapshot', cond.op, field_type, 'owner_id', field_values)
+    where = [(explicit_str, explicit_args)]
+  else:
+    field_type, field_values = _GetFieldTypeAndValues(cond)
+    explicit_str, explicit_args = _Compare(
+        'Issue', cond.op, field_type, 'owner_id', field_values)
+    derived_str, derived_args = _Compare(
+        'Issue', cond.op, field_type, 'derived_owner_id', field_values)
+    if cond.op in (ast_pb2.QueryOp.NE, ast_pb2.QueryOp.NOT_TEXT_HAS):
+      where = [(explicit_str, explicit_args), (derived_str, derived_args)]
+    else:
+      if cond.op == ast_pb2.QueryOp.IS_NOT_DEFINED:
+        op = ' AND '
+      else:
+        op = ' OR '
+      where = [
+          ('(' + explicit_str + op + derived_str + ')',
+           explicit_args + derived_args)]
+
+  return [], where, []
+
+
+def _ProcessOwnerLastVisitCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert an ownerlastvisit<timestamp cond to SQL."""
+  # TODO(jeffcarp): It is possible to support this on snapshots.
+  if snapshot_mode:
+    return [], [], [cond]
+
+  left_joins = [(
+      'User AS {alias} '
+      'ON (Issue.owner_id = {alias}.user_id OR '
+      'Issue.derived_owner_id = {alias}.user_id)'.format(alias=alias),
+      [])]
+  where = [_Compare(alias, cond.op, tracker_pb2.FieldTypes.INT_TYPE,
+                    'last_visit_timestamp', cond.int_values)]
+  return left_joins, where, []
+
+
+def _ProcessIsOwnerBouncing(cond, alias, _spare_alias, snapshot_mode):
+  """Convert an is:ownerbouncing cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  left_joins = [(
+      'User AS {alias} '
+      'ON (Issue.owner_id = {alias}.user_id OR '
+      'Issue.derived_owner_id = {alias}.user_id)'.format(alias=alias),
+      [])]
+  if cond.op == ast_pb2.QueryOp.EQ:
+    op = ast_pb2.QueryOp.IS_DEFINED
+  else:
+    op = ast_pb2.QueryOp.IS_NOT_DEFINED
+
+  where = [_Compare(alias, op, tracker_pb2.FieldTypes.INT_TYPE,
+                    'email_bounce_timestamp', [])]
+  return left_joins, where, []
+
+
+def _ProcessReporterCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a reporter:substring cond to SQL."""
+  if snapshot_mode:
+    left_joins = [(
+        'User AS {alias} ON IssueSnapshot.reporter_id = {alias}.user_id'.format(
+            alias=alias), [])]
+  else:
+    left_joins = [(
+        'User AS {alias} ON Issue.reporter_id = {alias}.user_id'.format(
+            alias=alias), [])]
+  where = [_Compare(alias, cond.op, tracker_pb2.FieldTypes.STR_TYPE, 'email',
+                    cond.str_values)]
+
+  return left_joins, where, []
+
+
+def _ProcessReporterIDCond(cond, _alias, _spare_alias, snapshot_mode):
+  """Convert a reporter_ID=user_id cond to SQL."""
+  field_type, field_values = _GetFieldTypeAndValues(cond)
+
+  if snapshot_mode:
+    where = [_Compare(
+        'IssueSnapshot', cond.op, field_type, 'reporter_id', field_values)]
+  else:
+    where = [_Compare(
+        'Issue', cond.op, field_type, 'reporter_id', field_values)]
+  return [], where, []
+
+
+def _ProcessCcCond(cond, alias, user_alias, snapshot_mode):
+  """Convert a cc:substring cond to SQL."""
+  email_cond_str, email_cond_args = _Compare(
+      user_alias, ast_pb2.QueryOp.TEXT_HAS, tracker_pb2.FieldTypes.STR_TYPE,
+      'email', cond.str_values)
+
+  if snapshot_mode:
+    left_joins = [(
+        '(IssueSnapshot2Cc AS {alias} JOIN User AS {user_alias} '
+        'ON {alias}.cc_id = {user_alias}.user_id AND {email_cond}) '
+        'ON IssueSnapshot.id = {alias}.issuesnapshot_id'.format(
+            alias=alias, user_alias=user_alias, email_cond=email_cond_str),
+        email_cond_args)]
+  else:
+    # Note: email_cond_str will have parens, if needed.
+    left_joins = [(
+        '(Issue2Cc AS {alias} JOIN User AS {user_alias} '
+        'ON {alias}.cc_id = {user_alias}.user_id AND {email_cond}) '
+        'ON Issue.id = {alias}.issue_id AND '
+        'Issue.shard = {alias}.issue_shard'.format(
+            alias=alias, user_alias=user_alias, email_cond=email_cond_str),
+        email_cond_args)]
+  where = [_CompareAlreadyJoined(user_alias, cond.op, 'email')]
+
+  return left_joins, where, []
+
+
+def _ProcessCcIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a cc_id=user_id cond to SQL."""
+  if snapshot_mode:
+    join_str = (
+        'IssueSnapshot2Cc AS {alias} '
+        'ON IssueSnapshot.id = {alias}.issuesnapshot_id'.format(alias=alias))
+  else:
+    join_str = (
+        'Issue2Cc AS {alias} ON Issue.id = {alias}.issue_id AND '
+        'Issue.shard = {alias}.issue_shard'.format(
+            alias=alias))
+  if cond.op in (ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+    left_joins = [(join_str, [])]
+  else:
+    field_type, field_values = _GetFieldTypeAndValues(cond)
+    cond_str, cond_args = _Compare(
+        alias, ast_pb2.QueryOp.EQ, field_type, 'cc_id', field_values)
+    left_joins = [(join_str + ' AND ' + cond_str, cond_args)]
+
+  where = [_CompareAlreadyJoined(alias, cond.op, 'cc_id')]
+  return left_joins, where, []
+
+
+def _ProcessStarredByCond(cond, alias, user_alias, snapshot_mode):
+  """Convert a starredby:substring cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  email_cond_str, email_cond_args = _Compare(
+      user_alias, cond.op, tracker_pb2.FieldTypes.STR_TYPE, 'email',
+      cond.str_values)
+  # Note: email_cond_str will have parens, if needed.
+  left_joins = [(
+      '(IssueStar AS {alias} JOIN User AS {user_alias} '
+      'ON {alias}.user_id = {user_alias}.user_id AND {email_cond}) '
+      'ON Issue.id = {alias}.issue_id'.format(
+          alias=alias, user_alias=user_alias, email_cond=email_cond_str),
+      email_cond_args)]
+  where = [_CompareAlreadyJoined(user_alias, cond.op, 'email')]
+
+  return left_joins, where, []
+
+
+def _ProcessStarredByIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a starredby_id=user_id cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  join_str = 'IssueStar AS {alias} ON Issue.id = {alias}.issue_id'.format(
+      alias=alias)
+  if cond.op in (ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+    left_joins = [(join_str, [])]
+  else:
+    field_type, field_values = _GetFieldTypeAndValues(cond)
+    cond_str, cond_args = _Compare(
+        alias, ast_pb2.QueryOp.EQ, field_type, 'user_id', field_values)
+    left_joins = [(join_str + ' AND ' + cond_str, cond_args)]
+
+  where = [_CompareAlreadyJoined(alias, cond.op, 'user_id')]
+  return left_joins, where, []
+
+
+def _ProcessCommentByCond(cond, alias, user_alias, snapshot_mode):
+  """Convert a commentby:substring cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  email_cond_str, email_cond_args = _Compare(
+      user_alias, ast_pb2.QueryOp.TEXT_HAS, tracker_pb2.FieldTypes.STR_TYPE,
+      'email', cond.str_values)
+  # Note: email_cond_str will have parens, if needed.
+  left_joins = [(
+      '(Comment AS {alias} JOIN User AS {user_alias} '
+      'ON {alias}.commenter_id = {user_alias}.user_id AND {email_cond}) '
+      'ON Issue.id = {alias}.issue_id AND '
+      '{alias}.deleted_by IS NULL'.format(
+          alias=alias, user_alias=user_alias, email_cond=email_cond_str),
+      email_cond_args)]
+  where = [_CompareAlreadyJoined(user_alias, cond.op, 'email')]
+
+  return left_joins, where, []
+
+
+def _ProcessCommentByIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a commentby_id=user_id cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  field_type, field_values = _GetFieldTypeAndValues(cond)
+  commenter_cond_str, commenter_cond_args = _Compare(
+      alias, ast_pb2.QueryOp.EQ, field_type, 'commenter_id', field_values)
+  left_joins = [(
+      'Comment AS {alias} ON Issue.id = {alias}.issue_id AND '
+      '{commenter_cond} AND '
+      '{alias}.deleted_by IS NULL'.format(
+          alias=alias, commenter_cond=commenter_cond_str),
+      commenter_cond_args)]
+  where = [_CompareAlreadyJoined(alias, cond.op, 'commenter_id')]
+
+  return left_joins, where, []
+
+
+def _ProcessStatusIDCond(cond, _alias, _spare_alias, snapshot_mode):
+  """Convert a status_id=ID cond to SQL."""
+  field_type, field_values = _GetFieldTypeAndValues(cond)
+  if snapshot_mode:
+    explicit_str, explicit_args = _Compare(
+        'IssueSnapshot', cond.op, field_type, 'status_id', field_values)
+    where = [(explicit_str, explicit_args)]
+  else:
+    explicit_str, explicit_args = _Compare(
+        'Issue', cond.op, field_type, 'status_id', field_values)
+    derived_str, derived_args = _Compare(
+        'Issue', cond.op, field_type, 'derived_status_id', field_values)
+    if cond.op in (ast_pb2.QueryOp.IS_NOT_DEFINED, ast_pb2.QueryOp.NE):
+      where = [(explicit_str, explicit_args), (derived_str, derived_args)]
+    else:
+      where = [
+          ('(' + explicit_str + ' OR ' + derived_str + ')',
+           explicit_args + derived_args)]
+
+  return [], where, []
+
+
+def _ProcessSummaryCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a summary="exact string" cond to SQL."""
+  left_joins = []
+  where = []
+  field_type, field_values = _GetFieldTypeAndValues(cond)
+  if snapshot_mode:
+    return [], [], [cond]
+  elif cond.op in (ast_pb2.QueryOp.EQ, ast_pb2.QueryOp.NE,
+                   ast_pb2.QueryOp.GT, ast_pb2.QueryOp.LT,
+                   ast_pb2.QueryOp.GE, ast_pb2.QueryOp.LE,
+                   ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+    summary_cond_str, summary_cond_args = _Compare(
+        alias, cond.op, field_type, 'summary', field_values)
+    left_joins = [(
+        'IssueSummary AS {alias} ON Issue.id = {alias}.issue_id AND '
+        '{summary_cond}'.format(
+          alias=alias, summary_cond=summary_cond_str),
+        summary_cond_args)]
+    where = [_CompareAlreadyJoined(alias, ast_pb2.QueryOp.EQ, 'issue_id')]
+
+  return left_joins, where, []
+
+
+def _ProcessLabelIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a label_id=ID cond to SQL."""
+  if snapshot_mode:
+    join_str = (
+        'IssueSnapshot2Label AS {alias} '
+        'ON IssueSnapshot.id = {alias}.issuesnapshot_id'.format(alias=alias))
+  else:
+    join_str = (
+        'Issue2Label AS {alias} ON Issue.id = {alias}.issue_id AND '
+        'Issue.shard = {alias}.issue_shard'.format(alias=alias))
+
+  field_type, field_values = _GetFieldTypeAndValues(cond)
+  if not field_values and cond.op == ast_pb2.QueryOp.NE:
+    return [], [], []
+  cond_str, cond_args = _Compare(
+      alias, ast_pb2.QueryOp.EQ, field_type, 'label_id', field_values)
+  left_joins = [(join_str + ' AND ' + cond_str, cond_args)]
+  where = [_CompareAlreadyJoined(alias, cond.op, 'label_id')]
+  return left_joins, where, []
+
+
+def _ProcessComponentIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert a component_id=ID cond to SQL."""
+  # This is a built-in field, so it shadows any other fields w/ the same name.
+  if snapshot_mode:
+    join_str = (
+        'IssueSnapshot2Component AS {alias} '
+        'ON IssueSnapshot.id = {alias}.issuesnapshot_id'.format(alias=alias))
+  else:
+    join_str = (
+        'Issue2Component AS {alias} ON Issue.id = {alias}.issue_id AND '
+        'Issue.shard = {alias}.issue_shard'.format(alias=alias))
+  if cond.op in (ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+    left_joins = [(join_str, [])]
+  else:
+    field_type, field_values = _GetFieldTypeAndValues(cond)
+    cond_str, cond_args = _Compare(
+        alias, ast_pb2.QueryOp.EQ, field_type, 'component_id', field_values)
+    left_joins = [(join_str + ' AND ' + cond_str, cond_args)]
+
+  where = [_CompareAlreadyJoined(alias, cond.op, 'component_id')]
+  return left_joins, where, []
+
+
+# TODO(jojang): monorail:3819, check for cond.phase_name and process
+# appropriately so users can search 'Canary.UXReview-status:Approved'
+def _ProcessApprovalFieldCond(cond, alias, user_alias, snapshot_mode):
+  """Convert a custom approval field cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  approval_fd = cond.field_defs[0]
+  left_joins = []
+
+  join_str_tmpl = (
+    '{tbl_name} AS {alias} ON Issue.id = {alias}.issue_id AND '
+    '{alias}.approval_id = %s')
+
+  join_args = [approval_fd.field_id]
+
+  val_type, values = _GetFieldTypeAndValues(cond)
+  if val_type is tracker_pb2.FieldTypes.STR_TYPE:
+    values = [val.lower() for val in values]
+  # TODO(jojwang):monorail:3809, check if there is a cond.key_suffx.
+  # status, approver should always have a value, so 'has:UXReview-approver'
+  # should return the same issues as 'has:UXReview'.
+  # There will not always be values approval.setter_id and approval.set_on
+  # and the current code would not process 'has:UXReview-by' correctly.
+  if cond.op in (
+      ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+    join_str = join_str_tmpl.format(
+        tbl_name='Issue2ApprovalValue', alias=alias)
+    left_joins = [(join_str, join_args)]
+  else:
+    op = cond.op
+    if op == ast_pb2.QueryOp.NE:
+      op = ast_pb2.QueryOp.EQ  # Negation is done in WHERE clause.
+    elif op == ast_pb2.QueryOp.NOT_TEXT_HAS:
+      op = ast_pb2.QueryOp.TEXT_HAS
+
+    if (not cond.key_suffix) or cond.key_suffix == query2ast.STATUS_SUFFIX:
+      tbl_str = 'Issue2ApprovalValue'
+      cond_str, cond_args = _Compare(
+          alias, op, val_type, 'status', values)
+    elif cond.key_suffix == query2ast.SET_ON_SUFFIX:
+      tbl_str = 'Issue2ApprovalValue'
+      cond_str, cond_args = _Compare(
+          alias, op, val_type, 'set_on', values)
+    elif cond.key_suffix in [
+        query2ast.APPROVER_SUFFIX, query2ast.SET_BY_SUFFIX]:
+      if cond.key_suffix == query2ast.SET_BY_SUFFIX:
+        tbl_str = 'Issue2ApprovalValue'
+        col_name = 'setter_id'
+      else:
+        tbl_str = 'IssueApproval2Approver'
+        col_name = 'approver_id'
+
+      if val_type == tracker_pb2.FieldTypes.INT_TYPE:
+        cond_str, cond_args = _Compare(
+            alias, op, val_type, col_name, values)
+      else:
+        email_cond_str, email_cond_args = _Compare(
+            user_alias, op, val_type, 'email', values)
+        left_joins.append((
+          'User AS {user_alias} ON {email_cond}'.format(
+              user_alias=user_alias, email_cond=email_cond_str),
+          email_cond_args))
+
+        cond_str = '{alias}.{col_name} = {user_alias}.user_id'.format(
+            alias=alias, col_name=col_name, user_alias=user_alias)
+        cond_args = []
+    if cond_str or cond_args:
+      join_str = join_str_tmpl.format(tbl_name=tbl_str, alias=alias)
+      join_str += ' AND ' + cond_str
+      join_args.extend(cond_args)
+    left_joins.append((join_str, join_args))
+
+  where = [_CompareAlreadyJoined(alias, cond.op, 'approval_id')]
+  return left_joins, where, []
+
+
+def _ProcessCustomFieldCond(
+    cond, alias, user_alias, phase_alias, snapshot_mode):
+  """Convert a custom field cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  # TODO(jrobbins): handle ambiguous field names that map to multiple
+  # field definitions, especially for cross-project search.
+  field_def = cond.field_defs[0]
+  field_type = field_def.field_type
+  left_joins = []
+
+  join_str = (
+      'Issue2FieldValue AS {alias} ON Issue.id = {alias}.issue_id AND '
+      'Issue.shard = {alias}.issue_shard AND '
+      '{alias}.field_id = %s'.format(alias=alias))
+  join_args = [field_def.field_id]
+
+  if cond.op not in (
+      ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+    op = cond.op
+    if op == ast_pb2.QueryOp.NE:
+      op = ast_pb2.QueryOp.EQ  # Negation is done in WHERE clause.
+    if field_type == tracker_pb2.FieldTypes.INT_TYPE:
+      cond_str, cond_args = _Compare(
+          alias, op, field_type, 'int_value', cond.int_values)
+    elif field_type == tracker_pb2.FieldTypes.STR_TYPE:
+      cond_str, cond_args = _Compare(
+          alias, op, field_type, 'str_value', cond.str_values)
+    elif field_type == tracker_pb2.FieldTypes.USER_TYPE:
+      if cond.int_values:
+        cond_str, cond_args = _Compare(
+            alias, op, field_type, 'user_id', cond.int_values)
+      else:
+        email_cond_str, email_cond_args = _Compare(
+            user_alias, op, field_type, 'email', cond.str_values)
+        left_joins.append((
+            'User AS {user_alias} ON {email_cond}'.format(
+                user_alias=user_alias, email_cond=email_cond_str),
+            email_cond_args))
+        cond_str = '{alias}.user_id = {user_alias}.user_id'.format(
+            alias=alias, user_alias=user_alias)
+        cond_args = []
+    elif field_type == tracker_pb2.FieldTypes.URL_TYPE:
+      cond_str, cond_args = _Compare(
+          alias, op, field_type, 'url_value', cond.str_values)
+    if field_type == tracker_pb2.FieldTypes.DATE_TYPE:
+      cond_str, cond_args = _Compare(
+          alias, op, field_type, 'date_value', cond.int_values)
+    if cond_str or cond_args:
+      join_str += ' AND ' + cond_str
+      join_args.extend(cond_args)
+
+  if cond.phase_name:
+    phase_cond_str, phase_cond_args = _Compare(
+        phase_alias, ast_pb2.QueryOp.EQ, tracker_pb2.FieldTypes.STR_TYPE,
+        'name', [cond.phase_name])
+    left_joins.append((
+        'IssuePhaseDef AS {phase_alias} ON {phase_cond}'.format(
+            phase_alias=phase_alias, phase_cond=phase_cond_str),
+        phase_cond_args))
+    cond_str = '{alias}.phase_id = {phase_alias}.id'.format(
+        alias=alias, phase_alias=phase_alias)
+    cond_args = []
+    join_str += ' AND ' + cond_str
+    join_args.extend(cond_args)
+
+  left_joins.append((join_str, join_args))
+  where = [_CompareAlreadyJoined(alias, cond.op, 'field_id')]
+  return left_joins, where, []
+
+
+def _ProcessAttachmentCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert has:attachment and -has:attachment cond to SQL."""
+  if snapshot_mode:
+    return [], [], [cond]
+
+  if cond.op in (ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+    left_joins = []
+    where = [_Compare('Issue', cond.op, tracker_pb2.FieldTypes.INT_TYPE,
+                      'attachment_count', cond.int_values)]
+  else:
+    field_def = cond.field_defs[0]
+    field_type = field_def.field_type
+    left_joins = [
+      ('Attachment AS {alias} ON Issue.id = {alias}.issue_id AND '
+       '{alias}.deleted = %s'.format(alias=alias),
+       [False])]
+    where = [_Compare(alias, cond.op, field_type, 'filename', cond.str_values)]
+
+  return left_joins, where, []
+
+
+def _ProcessHotlistIDCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert hotlist_id=IDS cond to SQL."""
+  if snapshot_mode:
+    join_str = (
+      'IssueSnapshot2Hotlist AS {alias} '
+      'ON IssueSnapshot.id = {alias}.issuesnapshot_id'.format(alias=alias))
+  else:
+    join_str = (
+      'Hotlist2Issue AS {alias} ON Issue.id = {alias}.issue_id'.format(
+          alias=alias))
+
+  field_type, field_values = _GetFieldTypeAndValues(cond)
+  if not field_values and cond.op == ast_pb2.QueryOp.NE:
+    return [], [], []
+  cond_str, cond_args = _Compare(
+      alias, ast_pb2.QueryOp.EQ, field_type, 'hotlist_id', field_values)
+  left_joins = [(join_str + ' AND ' + cond_str, cond_args)]
+  where = [_CompareAlreadyJoined(alias, cond.op, 'hotlist_id')]
+
+  return left_joins, where, []
+
+
+def _ProcessHotlistCond(cond, alias, _spare_alias, snapshot_mode):
+  """Convert hotlist=user:hotlist-name to SQL"""
+  # hotlist conditions that reach this function definitely have invalid
+  # user_name/id/email. This validity was determined in
+  # ast2ast._PreprocessHotlistCond. Any possible user identification is ignored.
+  hotlist_substrings = []
+  for val in cond.str_values:
+    substring = val.split(':')[-1]
+    if substring:
+      hotlist_substrings.append(substring)
+  hotlist_cond_str, hotlist_cond_args = _Compare(
+      alias, ast_pb2.QueryOp.TEXT_HAS, tracker_pb2.FieldTypes.STR_TYPE,
+      'name', hotlist_substrings)
+  if snapshot_mode:
+    left_joins = [(
+        '(IssueSnapshot2Hotlist JOIN Hotlist AS {alias} '
+        'ON IssueSnapshot2Hotlist.hotlist_id = {alias}.id AND {hotlist_cond}) '
+        'ON IssueSnapshot.id = IssueSnapshot2Hotlist.issuesnapshot_id'.format(
+            alias=alias, hotlist_cond=hotlist_cond_str), hotlist_cond_args)]
+  else:
+    left_joins = [(
+        '(Hotlist2Issue JOIN Hotlist AS {alias} '
+        'ON Hotlist2Issue.hotlist_id = {alias}.id AND {hotlist_cond}) '
+        'ON Issue.id = Hotlist2Issue.issue_id'.format(
+            alias=alias, hotlist_cond=hotlist_cond_str), hotlist_cond_args)]
+  where = [_CompareAlreadyJoined(alias, cond.op, 'name')]
+
+  return left_joins, where, []
+
+
+def _ProcessPhaseCond(cond, alias, phase_alias, _snapshot_mode):
+  """Convert gate:<phase_name> to SQL."""
+
+  op = cond.op
+  if cond.op == ast_pb2.QueryOp.NE:
+    op = ast_pb2.QueryOp.EQ
+  elif cond.op == ast_pb2.QueryOp.NOT_TEXT_HAS:
+    op = ast_pb2.QueryOp.TEXT_HAS
+
+  cond_str, cond_args = _Compare(
+      phase_alias, op, tracker_pb2.FieldTypes.STR_TYPE,
+      'name', cond.str_values)
+  left_joins = [(
+      '(Issue2ApprovalValue AS {alias} JOIN IssuePhaseDef AS {phase_alias} '
+      'ON {alias}.phase_id = {phase_alias}.id AND {name_cond}) '
+      'ON Issue.id = {alias}.issue_id'.format(
+          alias=alias, phase_alias=phase_alias, name_cond=cond_str),
+      cond_args)]
+  where = [_CompareAlreadyJoined(phase_alias, cond.op, 'name')]
+
+  return left_joins, where, []
+
+
+_PROCESSORS = {
+    'owner': _ProcessOwnerCond,
+    'owner_id': _ProcessOwnerIDCond,
+    'ownerlastvisit': _ProcessOwnerLastVisitCond,
+    'ownerbouncing': _ProcessIsOwnerBouncing,
+    'reporter': _ProcessReporterCond,
+    'reporter_id': _ProcessReporterIDCond,
+    'cc': _ProcessCcCond,
+    'cc_id': _ProcessCcIDCond,
+    'starredby': _ProcessStarredByCond,
+    'starredby_id': _ProcessStarredByIDCond,
+    'commentby': _ProcessCommentByCond,
+    'commentby_id': _ProcessCommentByIDCond,
+    'status_id': _ProcessStatusIDCond,
+    'summary': _ProcessSummaryCond,
+    'label_id': _ProcessLabelIDCond,
+    'component_id': _ProcessComponentIDCond,
+    'blockedon_id': _ProcessBlockedOnIDCond,
+    'blocking_id': _ProcessBlockingIDCond,
+    'mergedinto_id': _ProcessMergedIntoIDCond,
+    'attachment': _ProcessAttachmentCond,
+    'hotlist_id': _ProcessHotlistIDCond,
+    'hotlist': _ProcessHotlistCond,
+    }
+
+
+def _ProcessCond(cond_num, cond, snapshot_mode):
+  """Translate one term of the user's search into an SQL query.
+
+  Args:
+    cond_num: integer cond number used to make distinct local variable names.
+    cond: user query cond parsed by query2ast.py.
+
+  Returns:
+    A pair of lists (left_joins, where) to use when building the SQL SELECT
+    statement.  Each of them is a list of (str, [val, ...]) pairs.
+  """
+  alias = 'Cond%d' % cond_num
+  spare_alias = 'Spare%d' % cond_num
+  # Note: a condition like [x=y] has field_name "x", there may be multiple
+  # field definitions that match "x", but they will all have field_name "x".
+  field_def = cond.field_defs[0]
+  assert all(field_def.field_name == fd.field_name for fd in cond.field_defs)
+
+  if field_def.field_name in NATIVE_SEARCHABLE_FIELDS:
+    # TODO(jeffcarp): Support local_id search here.
+    if snapshot_mode:
+      return [], [], [cond]
+    else:
+      col = NATIVE_SEARCHABLE_FIELDS[field_def.field_name]
+      where = [_Compare(
+          'Issue', cond.op, field_def.field_type, col,
+          cond.str_values or cond.int_values)]
+      return [], where, []
+
+  elif field_def.field_name in _PROCESSORS:
+    proc = _PROCESSORS[field_def.field_name]
+    return proc(cond, alias, spare_alias, snapshot_mode)
+
+  #  Any phase conditions use the sql.SHORTHAND['phase_cond'], which expects a
+  # 'Phase' alias. 'phase_cond' cannot expect a 'Spare' alias because
+  # _ProcessCustomFieldCond also creates a phase_cond string where it uses the
+  # 'Phase' alias because it needs the 'Spare' alias for other conditions.
+  elif field_def.field_name == 'gate':
+    phase_alias = 'Phase%d' % cond_num
+    return _ProcessPhaseCond(cond, alias, phase_alias, snapshot_mode)
+
+  elif field_def.field_id:  # it is a search on a custom field
+    phase_alias = 'Phase%d' % cond_num
+    if field_def.field_type == tracker_pb2.FieldTypes.APPROVAL_TYPE:
+      return _ProcessApprovalFieldCond(cond, alias, spare_alias, snapshot_mode)
+    return _ProcessCustomFieldCond(
+        cond, alias, spare_alias, phase_alias, snapshot_mode)
+
+  elif (cond.op in (ast_pb2.QueryOp.TEXT_HAS, ast_pb2.QueryOp.NOT_TEXT_HAS) and
+        (field_def.field_name in tracker_fulltext.ISSUE_FULLTEXT_FIELDS or
+         field_def.field_name == 'any_field')):
+    if snapshot_mode:
+      return [], [], [cond]
+    # This case handled by full-text search.
+
+  else:
+    logging.error('untranslated search cond %r', cond)
+
+  return [], [], []
+
+
+def _Compare(alias, op, val_type, col, vals):
+  """Return an SQL comparison for the given values. For use in WHERE or ON.
+
+  Args:
+    alias: String name of the table or alias defined in a JOIN clause.
+    op: One of the operators defined in ast_pb2.py.
+    val_type: One of the value types defined in ast_pb2.py.
+    col: string column name to compare to vals.
+    vals: list of values that the user is searching for.
+
+  Returns:
+    (cond_str, cond_args) where cond_str is a SQL condition that may contain
+    some %s placeholders, and cond_args is the list of values that fill those
+    placeholders.  If the condition string contains any AND or OR operators,
+    the whole expression is put inside parens.
+
+  Raises:
+    NoPossibleResults: The user's query is impossible to ever satisfy, e.g.,
+        it requires matching an empty set of labels.
+  """
+  vals_ph = sql.PlaceHolders(vals)
+  if col in ['label', 'status', 'email', 'name']:
+    alias_col = 'LOWER(%s.%s)' % (alias, col)
+  else:
+    alias_col = '%s.%s' % (alias, col)
+
+  def Fmt(cond_str):
+    return cond_str.format(alias_col=alias_col, vals_ph=vals_ph)
+
+  no_value = (0 if val_type in [tracker_pb2.FieldTypes.DATE_TYPE,
+                                tracker_pb2.FieldTypes.INT_TYPE] else '')
+  if op == ast_pb2.QueryOp.IS_DEFINED:
+    return Fmt('({alias_col} IS NOT NULL AND {alias_col} != %s)'), [no_value]
+  if op == ast_pb2.QueryOp.IS_NOT_DEFINED:
+    return Fmt('({alias_col} IS NULL OR {alias_col} = %s)'), [no_value]
+
+  if val_type in [tracker_pb2.FieldTypes.DATE_TYPE,
+                  tracker_pb2.FieldTypes.INT_TYPE]:
+    if op == ast_pb2.QueryOp.TEXT_HAS:
+      op = ast_pb2.QueryOp.EQ
+    if op == ast_pb2.QueryOp.NOT_TEXT_HAS:
+      op = ast_pb2.QueryOp.NE
+
+  if op == ast_pb2.QueryOp.EQ:
+    if not vals:
+      raise NoPossibleResults('Column %s has no possible value' % alias_col)
+    elif len(vals) == 1:
+      cond_str = Fmt('{alias_col} = %s')
+    else:
+      cond_str = Fmt('{alias_col} IN ({vals_ph})')
+    return cond_str, vals
+
+  if op == ast_pb2.QueryOp.NE:
+    if not vals:
+      return 'TRUE', []  # a no-op that matches every row.
+    elif len(vals) == 1:
+      comp = Fmt('{alias_col} != %s')
+    else:
+      comp = Fmt('{alias_col} NOT IN ({vals_ph})')
+    return '(%s IS NULL OR %s)' % (alias_col, comp), vals
+
+  wild_vals = ['%%%s%%' % val for val in vals]
+  if op == ast_pb2.QueryOp.TEXT_HAS:
+    cond_str = ' OR '.join(Fmt('{alias_col} LIKE %s') for v in vals)
+    return ('(%s)' % cond_str), wild_vals
+  if op == ast_pb2.QueryOp.NOT_TEXT_HAS:
+    cond_str = (Fmt('{alias_col} IS NULL OR ') +
+                ' AND '.join(Fmt('{alias_col} NOT LIKE %s') for v in vals))
+    return ('(%s)' % cond_str), wild_vals
+
+
+  # Note: These operators do not support quick-OR
+  val = vals[0]
+
+  if op == ast_pb2.QueryOp.GT:
+    return Fmt('{alias_col} > %s'), [val]
+  if op == ast_pb2.QueryOp.LT:
+    return Fmt('{alias_col} < %s'), [val]
+  if op == ast_pb2.QueryOp.GE:
+    return Fmt('{alias_col} >= %s'), [val]
+  if op == ast_pb2.QueryOp.LE:
+    return Fmt('{alias_col} <= %s'), [val]
+
+  logging.error('unknown op: %r', op)
+
+
+def _CompareAlreadyJoined(alias, op, col):
+  """Return a WHERE clause comparison that checks that a join succeeded."""
+  def Fmt(cond_str):
+    return cond_str.format(alias_col='%s.%s' % (alias, col))
+
+  if op in (ast_pb2.QueryOp.NE, ast_pb2.QueryOp.NOT_TEXT_HAS,
+            ast_pb2.QueryOp.IS_NOT_DEFINED):
+    return Fmt('{alias_col} IS NULL'), []
+  else:
+    return Fmt('{alias_col} IS NOT NULL'), []
+
+
+class Error(Exception):
+  """Base class for errors from this module."""
+
+
+class NoPossibleResults(Error):
+  """The query could never match any rows from the database, so don't try.."""
diff --git a/search/ast2sort.py b/search/ast2sort.py
new file mode 100644
index 0000000..08ed346
--- /dev/null
+++ b/search/ast2sort.py
@@ -0,0 +1,451 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Convert a user's issue sorting directives into SQL clauses.
+
+Some sort directives translate into simple ORDER BY column specifications.
+Other sort directives require that a LEFT JOIN be done to bring in
+relevant information that is then used in the ORDER BY.
+
+Sorting based on strings can slow down the DB because long sort-keys
+must be loaded into RAM, which means that fewer sort-keys fit into the
+DB's sorting buffers at a time.  Also, Monorail defines the sorting
+order of well-known labels and statuses based on the order in which
+they are defined in the project's config.  So, we determine the sort order of
+labels and status values before executing the query and then use the MySQL
+FIELD() function to sort their IDs in the desired order, without sorting
+strings.
+
+For more info, see the "Sorting in Monorail" and "What makes Monorail Fast?"
+design docs.
+"""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import logging
+
+from framework import sql
+from proto import tracker_pb2
+from tracker import tracker_constants
+
+
+NATIVE_SORTABLE_FIELDS = [
+    'id', 'stars', 'attachments', 'opened', 'closed', 'modified',
+    'ownermodified', 'statusmodified', 'componentmodified',
+    ]
+
+FIELDS_TO_COLUMNS = {
+    'id': 'local_id',
+    'stars': 'star_count',
+    'attachments': 'attachment_count',
+    'ownermodified': 'owner_modified',
+    'statusmodified': 'status_modified',
+    'componentmodified': 'component_modified',
+    }
+
+APPROVAL_STATUS_SORT_ORDER = [
+    '\'not_set\'', '\'needs_review\'', '\'na\'', '\'review_requested\'',
+    '\'review_started\'', '\'need_info\'', '\'approved\'', '\'not_approved\'']
+
+
+def BuildSortClauses(
+    sort_directives, harmonized_labels, harmonized_statuses,
+    harmonized_fields):
+  """Return LEFT JOIN and ORDER BY clauses needed to sort the results."""
+  if not sort_directives:
+    return [], []
+
+  all_left_joins = []
+  all_order_by = []
+  for i, sd in enumerate(sort_directives):
+    left_join_parts, order_by_parts = _OneSortDirective(
+        i, sd, harmonized_labels, harmonized_statuses, harmonized_fields)
+    all_left_joins.extend(left_join_parts)
+    all_order_by.extend(order_by_parts)
+
+  return all_left_joins, all_order_by
+
+
+def _ProcessProjectSD(fmt):
+  """Convert a 'project' sort directive into SQL."""
+  left_joins = []
+  order_by = [(fmt('Issue.project_id {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessReporterSD(fmt):
+  """Convert a 'reporter' sort directive into SQL."""
+  left_joins = [
+      (fmt('User AS {alias} ON Issue.reporter_id = {alias}.user_id'), [])]
+  order_by = [
+      (fmt('ISNULL({alias}.email) {sort_dir}'), []),
+      (fmt('{alias}.email {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessOwnerSD(fmt):
+  """Convert a 'owner' sort directive into SQL."""
+  left_joins = [
+      (fmt('User AS {alias}_exp ON Issue.owner_id = {alias}_exp.user_id'), []),
+      (fmt('User AS {alias}_der ON '
+           'Issue.derived_owner_id = {alias}_der.user_id'), [])]
+  order_by = [
+      (fmt('(ISNULL({alias}_exp.email) AND ISNULL({alias}_der.email)) '
+           '{sort_dir}'), []),
+      (fmt('CONCAT({alias}_exp.email, {alias}_der.email) {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessCcSD(fmt):
+  """Convert a 'cc' sort directive into SQL."""
+  # Note: derived cc's are included automatically.
+  # Note: This sorts on the best Cc, not all Cc addresses.
+  # Being more exact might require GROUP BY and GROUP_CONCAT().
+  left_joins = [
+      (fmt('Issue2Cc AS {alias} ON Issue.id = {alias}.issue_id '
+           'LEFT JOIN User AS {alias}_user '
+           'ON {alias}.cc_id = {alias}_user.user_id'), [])]
+  order_by = [
+      (fmt('ISNULL({alias}_user.email) {sort_dir}'), []),
+      (fmt('{alias}_user.email {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessComponentSD(fmt):
+  """Convert a 'component' sort directive into SQL."""
+  # Note: derived components are included automatically.
+  # Note: This sorts on the best component, not all of them.
+  # Being more exact might require GROUP BY and GROUP_CONCAT().
+  left_joins = [
+      (fmt('Issue2Component AS {alias} ON Issue.id = {alias}.issue_id '
+           'LEFT JOIN ComponentDef AS {alias}_component '
+           'ON {alias}.component_id = {alias}_component.id'), [])]
+  order_by = [
+      (fmt('ISNULL({alias}_component.path) {sort_dir}'), []),
+      (fmt('{alias}_component.path {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessSummarySD(fmt):
+  """Convert a 'summary' sort directive into SQL."""
+  left_joins = [
+      (fmt('IssueSummary AS {alias} ON Issue.id = {alias}.issue_id'), [])]
+  order_by = [(fmt('{alias}.summary {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessStatusSD(fmt, harmonized_statuses):
+  """Convert a 'status' sort directive into SQL."""
+  left_joins = []
+  # Note: status_def_rows are already ordered by REVERSED rank.
+  wk_status_ids = [
+      stat_id for stat_id, rank, _ in harmonized_statuses
+      if rank is not None]
+  odd_status_ids = [
+      stat_id for stat_id, rank, _ in harmonized_statuses
+      if rank is None]
+  wk_status_ph = sql.PlaceHolders(wk_status_ids)
+  # Even though oddball statuses sort lexographically, use FIELD to determine
+  # the order so that the database sorts ints rather than strings for speed.
+  odd_status_ph = sql.PlaceHolders(odd_status_ids)
+
+  order_by = []  # appended to below: both well-known and oddball can apply
+  sort_col = ('IF(ISNULL(Issue.status_id), Issue.derived_status_id, '
+              'Issue.status_id)')
+  # Reverse sort by using rev_sort_dir because we want NULLs at the end.
+  if wk_status_ids:
+    order_by.append(
+        (fmt('FIELD({sort_col}, {wk_status_ph}) {rev_sort_dir}',
+             sort_col=sort_col, wk_status_ph=wk_status_ph),
+         wk_status_ids))
+  if odd_status_ids:
+    order_by.append(
+        (fmt('FIELD({sort_col}, {odd_status_ph}) {rev_sort_dir}',
+             sort_col=sort_col, odd_status_ph=odd_status_ph),
+         odd_status_ids))
+
+  return left_joins, order_by
+
+
+def _ProcessBlockedSD(fmt):
+  """Convert a 'blocked' sort directive into SQL."""
+  left_joins = [
+      (fmt('IssueRelation AS {alias} ON Issue.id = {alias}.issue_id '
+           'AND {alias}.kind = %s'),
+       ['blockedon'])]
+  order_by = [(fmt('ISNULL({alias}.dst_issue_id) {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessBlockedOnSD(fmt):
+  """Convert a 'blockedon' sort directive into SQL."""
+  left_joins = [
+      (fmt('IssueRelation AS {alias} ON Issue.id = {alias}.issue_id '
+           'AND {alias}.kind = %s'),
+       ['blockedon'])]
+  order_by = [(fmt('ISNULL({alias}.dst_issue_id) {sort_dir}'), []),
+              (fmt('{alias}.dst_issue_id {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessBlockingSD(fmt):
+  """Convert a 'blocking' sort directive into SQL."""
+  left_joins = [
+      (fmt('IssueRelation AS {alias} ON Issue.id = {alias}.dst_issue_id '
+           'AND {alias}.kind = %s'),
+       ['blockedon'])]
+  order_by = [(fmt('ISNULL({alias}.issue_id) {sort_dir}'), []),
+              (fmt('{alias}.issue_id {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessMergedIntoSD(fmt):
+  """Convert a 'mergedinto' sort directive into SQL."""
+  left_joins = [
+      (fmt('IssueRelation AS {alias} ON Issue.id = {alias}.issue_id '
+           'AND {alias}.kind = %s'),
+       ['mergedinto'])]
+  order_by = [(fmt('ISNULL({alias}.dst_issue_id) {sort_dir}'), []),
+              (fmt('{alias}.dst_issue_id {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessOwnerLastVisitSD(fmt):
+  """Convert a 'ownerlastvisit' sort directive into SQL."""
+  left_joins = [
+      (fmt('User AS {alias} ON (Issue.owner_id = {alias}.user_id OR '
+           'Issue.derived_owner_id = {alias}.user_id)'), [])]
+  order_by = [
+      (fmt('ISNULL({alias}.last_visit_timestamp) {sort_dir}'), []),
+      (fmt('{alias}.last_visit_timestamp {sort_dir}'), [])]
+  return left_joins, order_by
+
+
+def _ProcessCustomAndLabelSD(
+    sd, harmonized_labels, harmonized_fields, alias, sort_dir, fmt):
+  """Convert a label or custom field sort directive into SQL."""
+  left_joins = []
+  order_by = []
+  phase_name = None
+  # If a custom field is an approval_type with no suffix, the
+  # approvals should be sorted by status.
+  approval_suffix = '-status'
+  approval_fd_list = []
+
+  # Check for reserved suffixes in col_name sd.
+  # TODO(jojwang): check for other suffixes in
+  # tracker_constants.RESERVED_COL_NAME_SUFFIXES
+  if sd.endswith(tracker_constants.APPROVER_COL_SUFFIX):
+    field_name = sd[:-len(tracker_constants.APPROVER_COL_SUFFIX)]
+    fd_list = []
+    approval_fd_list = [fd for fd in harmonized_fields
+                        if fd.field_name.lower() == field_name]
+    approval_suffix = tracker_constants.APPROVER_COL_SUFFIX
+  else:
+    field_name = sd
+    if '.' in sd:
+      phase_name, field_name = sd.split('.', 1)
+
+    fd_list = [fd for fd in harmonized_fields
+               if fd.field_name.lower() == field_name]
+    if not phase_name:
+      approval_fd_list = [fd for fd in fd_list if
+                          fd.field_type == tracker_pb2.FieldTypes.APPROVAL_TYPE]
+
+  # 'alias' is used for all the CustomField, Approval, and Label sort clauses.
+  # Custom field aliases are alwyas appended by the value_col name.
+  # Approval aliases are always appended with 'approval'.
+  # Label clauses use 'alias' as-is.
+  if fd_list:
+    int_left_joins, int_order_by = _CustomFieldSortClauses(
+        fd_list, tracker_pb2.FieldTypes.INT_TYPE, 'int_value',
+        alias, sort_dir, phase_name=phase_name)
+    str_left_joins, str_order_by = _CustomFieldSortClauses(
+        fd_list, tracker_pb2.FieldTypes.STR_TYPE, 'str_value',
+        alias, sort_dir, phase_name=phase_name)
+    user_left_joins, user_order_by = _CustomFieldSortClauses(
+        fd_list, tracker_pb2.FieldTypes.USER_TYPE, 'user_id',
+        alias, sort_dir, phase_name=phase_name)
+    left_joins.extend(int_left_joins + str_left_joins + user_left_joins)
+    order_by.extend(int_order_by + str_order_by + user_order_by)
+
+  if approval_fd_list:
+    approval_left_joins, approval_order_by = _ApprovalFieldSortClauses(
+        approval_fd_list, approval_suffix, fmt)
+    left_joins.extend(approval_left_joins)
+    order_by.extend(approval_order_by)
+
+  label_left_joinss, label_order_by = _LabelSortClauses(
+      sd, harmonized_labels, fmt)
+  left_joins.extend(label_left_joinss)
+  order_by.extend(label_order_by)
+
+  return left_joins, order_by
+
+
+def _ApprovalFieldSortClauses(
+    approval_fd_list, approval_suffix, fmt):
+  """Give LEFT JOIN and ORDER BY terms for approval sort directives."""
+  approver_left_joins = None
+  if approval_suffix == tracker_constants.APPROVER_COL_SUFFIX:
+    tbl_name = 'IssueApproval2Approver'
+    approver_left_joins = (
+        fmt('User AS {alias}_approval_user '
+            'ON {alias}_approval.approver_id = {alias}_approval_user.user_id'),
+        [])
+    order_by = [
+        (fmt('ISNULL({alias}_approval_user.email) {sort_dir}'), []),
+        (fmt('{alias}_approval_user.email {sort_dir}'), [])]
+  else:
+    tbl_name = 'Issue2ApprovalValue'
+    order_by = [
+        (fmt('FIELD({alias}_approval.status, {approval_status_ph}) '
+             '{rev_sort_dir}',
+             approval_status_ph=sql.PlaceHolders(APPROVAL_STATUS_SORT_ORDER)),
+         APPROVAL_STATUS_SORT_ORDER
+        )]
+
+  left_joins = [(
+      fmt('{tbl_name} AS {alias}_approval '
+          'ON Issue.id = {alias}_approval.issue_id '
+          'AND {alias}_approval.approval_id IN ({approval_ids_ph})',
+          approval_ids_ph=sql.PlaceHolders(approval_fd_list),
+          tbl_name=tbl_name),
+      [fd.field_id for fd in approval_fd_list]
+  )]
+
+  if approver_left_joins:
+    left_joins.append(approver_left_joins)
+
+  return left_joins, order_by
+
+
+def _LabelSortClauses(sd, harmonized_labels, fmt):
+  """Give LEFT JOIN and ORDER BY terms for label sort directives."""
+  # Note: derived labels should work automatically.
+
+  # label_def_rows are already ordered by REVERSED rank.
+  wk_label_ids = [
+      label_id for label_id, rank, label in harmonized_labels
+      if label.lower().startswith('%s-' % sd) and rank is not None]
+  odd_label_ids = [
+      label_id for label_id, rank, label in harmonized_labels
+      if label.lower().startswith('%s-' % sd) and rank is None]
+  all_label_ids = wk_label_ids + odd_label_ids
+
+  if all_label_ids:
+    left_joins = [
+        (fmt('Issue2Label AS {alias} ON Issue.id = {alias}.issue_id '
+             'AND {alias}.label_id IN ({all_label_ph})',
+             all_label_ph=sql.PlaceHolders(all_label_ids)),
+         all_label_ids)]
+  else:
+    left_joins = []
+
+  order_by = []
+  # Reverse sort by using rev_sort_dir because we want NULLs at the end.
+  if wk_label_ids:
+    order_by.append(
+        (fmt('FIELD({alias}.label_id, {wk_label_ph}) {rev_sort_dir}',
+             wk_label_ph=sql.PlaceHolders(wk_label_ids)),
+         wk_label_ids))
+  if odd_label_ids:
+    # Even though oddball labels sort lexographically, use FIELD to determine
+    # the order so that the database sorts ints rather than strings for speed
+    order_by.append(
+        (fmt('FIELD({alias}.label_id, {odd_label_ph}) {rev_sort_dir}',
+             odd_label_ph=sql.PlaceHolders(odd_label_ids)),
+         odd_label_ids))
+
+  return left_joins, order_by
+
+
+def _CustomFieldSortClauses(
+    fd_list, value_type, value_column, alias, sort_dir, phase_name=None):
+  """Give LEFT JOIN and ORDER BY terms for custom fields of the given type."""
+  relevant_fd_list = [fd for fd in fd_list if fd.field_type == value_type]
+  if not relevant_fd_list:
+    return [], []
+
+  field_ids_ph = sql.PlaceHolders(relevant_fd_list)
+  def Fmt(sql_str):
+    return sql_str.format(
+        value_column=value_column, sort_dir=sort_dir,
+        field_ids_ph=field_ids_ph, alias=alias + '_' + value_column,
+        phase_name=phase_name)
+
+  left_joins = [
+      (Fmt('Issue2FieldValue AS {alias} ON Issue.id = {alias}.issue_id '
+           'AND {alias}.field_id IN ({field_ids_ph})'),
+       [fd.field_id for fd in relevant_fd_list])]
+
+  if phase_name:
+    left_joins.append(
+        (Fmt('IssuePhaseDef AS {alias}_phase '
+             'ON {alias}.phase_id = {alias}_phase.id '
+             'AND LOWER({alias}_phase.name) = LOWER(%s)'),
+         [phase_name]))
+
+  if value_type == tracker_pb2.FieldTypes.USER_TYPE:
+    left_joins.append(
+        (Fmt('User AS {alias}_user ON {alias}.user_id = {alias}_user.user_id'),
+         []))
+    order_by = [
+        (Fmt('ISNULL({alias}_user.email) {sort_dir}'), []),
+        (Fmt('{alias}_user.email {sort_dir}'), [])]
+  else:
+    # Unfortunately, this sorts on the best field value, not all of them.
+    order_by = [
+        (Fmt('ISNULL({alias}.{value_column}) {sort_dir}'), []),
+        (Fmt('{alias}.{value_column} {sort_dir}'), [])]
+
+  return left_joins, order_by
+
+
+_PROCESSORS = {
+    'component': _ProcessComponentSD,
+    'project': _ProcessProjectSD,
+    'reporter': _ProcessReporterSD,
+    'owner': _ProcessOwnerSD,
+    'cc': _ProcessCcSD,
+    'summary': _ProcessSummarySD,
+    'blocked': _ProcessBlockedSD,
+    'blockedon': _ProcessBlockedOnSD,
+    'blocking': _ProcessBlockingSD,
+    'mergedinto': _ProcessMergedIntoSD,
+    'ownerlastvisit': _ProcessOwnerLastVisitSD,
+    }
+
+
+def _OneSortDirective(
+    i, sd, harmonized_labels, harmonized_statuses, harmonized_fields):
+  """Return SQL clauses to do the sorting for one sort directive."""
+  alias = 'Sort%d' % i
+  if sd.startswith('-'):
+    sort_dir, rev_sort_dir = 'DESC', 'ASC'
+    sd = sd[1:]
+  else:
+    sort_dir, rev_sort_dir = 'ASC', 'DESC'
+
+  def Fmt(sql_str, **kwargs):
+    return sql_str.format(
+        sort_dir=sort_dir, rev_sort_dir=rev_sort_dir, alias=alias,
+        sd=sd, col=FIELDS_TO_COLUMNS.get(sd, sd), **kwargs)
+
+  if sd in NATIVE_SORTABLE_FIELDS:
+    left_joins = []
+    order_by = [(Fmt('Issue.{col} {sort_dir}'), [])]
+    return left_joins, order_by
+
+  elif sd in _PROCESSORS:
+    proc = _PROCESSORS[sd]
+    return proc(Fmt)
+
+  elif sd == 'status':
+    return _ProcessStatusSD(Fmt, harmonized_statuses)
+  else:  # otherwise, it must be a field or label, or both
+    return _ProcessCustomAndLabelSD(
+        sd, harmonized_labels, harmonized_fields, alias, sort_dir, Fmt)
diff --git a/search/backendnonviewable.py b/search/backendnonviewable.py
new file mode 100644
index 0000000..d76eeef
--- /dev/null
+++ b/search/backendnonviewable.py
@@ -0,0 +1,137 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Servlet that searches for issues that the specified user cannot view.
+
+The GET request to a backend has query string parameters for the
+shard_id, a user_id, and list of project IDs.  It returns a
+JSON-formatted dict with issue_ids that that user is not allowed to
+view.  As a side-effect, this servlet updates multiple entries
+in memcache, including each "nonviewable:USER_ID;PROJECT_ID;SHARD_ID".
+"""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import logging
+
+from google.appengine.api import memcache
+
+import settings
+from framework import authdata
+from framework import framework_constants
+from framework import framework_helpers
+from framework import jsonfeed
+from framework import permissions
+from framework import sql
+from search import search_helpers
+
+
+
+# We cache the set of IIDs that a given user cannot view, and we invalidate
+# that set when the issues are changed via Monorail.  Also, we limit the live
+# those cache entries so that changes in a user's (direct or indirect) roles
+# in a project will take effect.
+NONVIEWABLE_MEMCACHE_EXPIRATION = 15 * framework_constants.SECS_PER_MINUTE
+
+
+class BackendNonviewable(jsonfeed.InternalTask):
+  """JSON servlet for getting issue IDs that the specified user cannot view."""
+
+  CHECK_SAME_APP = True
+
+  def HandleRequest(self, mr):
+    """Get all the user IDs that the specified user cannot view.
+
+    Args:
+      mr: common information parsed from the HTTP request.
+
+    Returns:
+      Results dictionary {project_id: [issue_id]} in JSON format.
+    """
+    if mr.shard_id is None:
+      return {'message': 'Cannot proceed without a valid shard_id.'}
+    user_id = mr.specified_logged_in_user_id
+    auth = authdata.AuthData.FromUserID(mr.cnxn, user_id, self.services)
+    project_id = mr.specified_project_id
+    project = self.services.project.GetProject(mr.cnxn, project_id)
+
+    perms = permissions.GetPermissions(
+        auth.user_pb, auth.effective_ids, project)
+
+    nonviewable_iids = self.GetNonviewableIIDs(
+      mr.cnxn, auth.user_pb, auth.effective_ids, project, perms, mr.shard_id)
+
+    cached_ts = mr.invalidation_timestep
+    if mr.specified_project_id:
+      memcache.set(
+        'nonviewable:%d;%d;%d' % (project_id, user_id, mr.shard_id),
+        (nonviewable_iids, cached_ts),
+        time=NONVIEWABLE_MEMCACHE_EXPIRATION,
+        namespace=settings.memcache_namespace)
+    else:
+      memcache.set(
+        'nonviewable:all;%d;%d' % (user_id, mr.shard_id),
+        (nonviewable_iids, cached_ts),
+        time=NONVIEWABLE_MEMCACHE_EXPIRATION,
+        namespace=settings.memcache_namespace)
+
+    logging.info('set nonviewable:%s;%d;%d to %r', project_id, user_id,
+                 mr.shard_id, nonviewable_iids)
+
+    return {
+      'nonviewable': nonviewable_iids,
+
+      # These are not used in the frontend, but useful for debugging.
+      'project_id': project_id,
+      'user_id': user_id,
+      'shard_id': mr.shard_id,
+      }
+
+  def GetNonviewableIIDs(
+    self, cnxn, user, effective_ids, project, perms, shard_id):
+    """Return a list of IIDs that the user cannot view in the project shard."""
+    # Project owners and site admins can see all issues.
+    if not perms.consider_restrictions:
+      return []
+
+    # There are two main parts to the computation that we do in parallel:
+    # getting at-risk IIDs and getting OK-iids.
+    cnxn_2 = sql.MonorailConnection()
+    at_risk_iids_promise = framework_helpers.Promise(
+      self.GetAtRiskIIDs, cnxn_2, user, effective_ids, project, perms, shard_id)
+    ok_iids = self.GetViewableIIDs(
+      cnxn, effective_ids, project.project_id, shard_id)
+    at_risk_iids = at_risk_iids_promise.WaitAndGetValue()
+
+    # The set of non-viewable issues is the at-risk ones minus the ones where
+    # the user is the reporter, owner, CC'd, or granted "View" permission.
+    nonviewable_iids = set(at_risk_iids).difference(ok_iids)
+
+    return list(nonviewable_iids)
+
+  def GetAtRiskIIDs(
+    self, cnxn, user, effective_ids, project, perms, shard_id):
+    # type: (MonorailConnection, proto.user_pb2.User, Sequence[int], Project,
+    #     permission_objects_pb2.PermissionSet, int) -> Sequence[int]
+    """Return IIDs of restricted issues that user might not be able to view."""
+    at_risk_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
+      cnxn, user, self.services.config, effective_ids, project, perms)
+    at_risk_iids = self.services.issue.GetIIDsByLabelIDs(
+      cnxn, at_risk_label_ids, project.project_id, shard_id)
+
+    return at_risk_iids
+
+
+  def GetViewableIIDs(self, cnxn, effective_ids, project_id, shard_id):
+    """Return IIDs of issues that user can view because they participate."""
+    # Anon user is never reporter, owner, CC'd or granted perms.
+    if not effective_ids:
+      return []
+
+    ok_iids = self.services.issue.GetIIDsByParticipant(
+      cnxn, effective_ids, [project_id], shard_id)
+
+    return ok_iids
diff --git a/search/backendsearch.py b/search/backendsearch.py
new file mode 100644
index 0000000..53e87ec
--- /dev/null
+++ b/search/backendsearch.py
@@ -0,0 +1,76 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""A servlet that implements the backend of issues search.
+
+The GET request to a backend search has the same query string
+parameters as the issue list servlet.  But, instead of rendering a
+HTML page, the backend search handler returns a JSON response with a
+list of matching, sorted issue IID numbers from this shard that are
+viewable by the requesting user.
+
+Each backend search request works within a single shard.  Each
+besearch backend job can access any single shard while processing a request.
+
+The current user ID must be passed in from the frontend for permission
+checking.  The user ID for the special "me" term can also be passed in
+(so that you can view another user's dashboard and "me" will refer to
+them).
+"""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import logging
+import time
+
+from framework import jsonfeed
+from search import backendsearchpipeline
+from tracker import tracker_constants
+
+
+class BackendSearch(jsonfeed.InternalTask):
+  """JSON servlet for issue search in a GAE backend."""
+
+  CHECK_SAME_APP = True
+  _DEFAULT_RESULTS_PER_PAGE = tracker_constants.DEFAULT_RESULTS_PER_PAGE
+
+  def HandleRequest(self, mr):
+    """Search for issues and respond with the IIDs of matching issues.
+
+    Args:
+      mr: common information parsed from the HTTP request.
+
+    Returns:
+      Results dictionary in JSON format.
+    """
+    # Users are never logged into backends, so the frontends tell us.
+    logging.info('query_project_names is %r', mr.query_project_names)
+    pipeline = backendsearchpipeline.BackendSearchPipeline(
+        mr, self.services, self._DEFAULT_RESULTS_PER_PAGE,
+        mr.query_project_names, mr.specified_logged_in_user_id,
+        mr.specified_me_user_ids)
+    pipeline.SearchForIIDs()
+
+    start = time.time()
+    # Backends work in parallel to precache issues that the
+    # frontend is very likely to need.
+    _prefetched_issues = self.services.issue.GetIssues(
+        mr.cnxn, pipeline.result_iids[:mr.start + mr.num],
+        shard_id=mr.shard_id)
+    logging.info('prefetched and memcached %d issues in %d ms',
+                 len(pipeline.result_iids[:mr.start + mr.num]),
+                 int(1000 * (time.time() - start)))
+
+    if pipeline.error:
+      error_message = pipeline.error.message
+    else:
+      error_message = None
+
+    return {
+        'unfiltered_iids': pipeline.result_iids,
+        'search_limit_reached': pipeline.search_limit_reached,
+        'error': error_message,
+    }
diff --git a/search/backendsearchpipeline.py b/search/backendsearchpipeline.py
new file mode 100644
index 0000000..69fdc6b
--- /dev/null
+++ b/search/backendsearchpipeline.py
@@ -0,0 +1,325 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Backend issue issue search and sorting.
+
+Each of several "besearch" backend jobs manages one shard of the overall set
+of issues in the system. The backend search pipeline retrieves the issues
+that match the user query, puts them into memcache, and returns them to
+the frontend search pipeline.
+"""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import logging
+import re
+import time
+
+from google.appengine.api import memcache
+
+import settings
+from features import savedqueries_helpers
+from framework import authdata
+from framework import framework_constants
+from framework import framework_helpers
+from framework import sorting
+from framework import sql
+from proto import ast_pb2
+from proto import tracker_pb2
+from search import ast2ast
+from search import ast2select
+from search import ast2sort
+from search import query2ast
+from search import searchpipeline
+from services import tracker_fulltext
+from services import fulltext_helpers
+from tracker import tracker_bizobj
+
+
+# Used in constructing the at-risk query.
+AT_RISK_LABEL_RE = re.compile(r'^(restrict-view-.+)$', re.IGNORECASE)
+
+# Limit on the number of list items to show in debug log statements
+MAX_LOG = 200
+
+
+class BackendSearchPipeline(object):
+  """Manage the process of issue search, including Promises and caching.
+
+  Even though the code is divided into several methods, the public
+  methods should be called in sequence, so the execution of the code
+  is pretty much in the order of the source code lines here.
+  """
+
+  def __init__(
+      self, mr, services, default_results_per_page,
+      query_project_names, logged_in_user_id, me_user_ids):
+
+    self.mr = mr
+    self.services = services
+    self.default_results_per_page = default_results_per_page
+
+    self.query_project_list = list(services.project.GetProjectsByName(
+        mr.cnxn, query_project_names).values())
+    self.query_project_ids = [
+        p.project_id for p in self.query_project_list]
+
+    self.me_user_ids = me_user_ids
+    self.mr.auth = authdata.AuthData.FromUserID(
+        mr.cnxn, logged_in_user_id, services)
+
+    # The following fields are filled in as the pipeline progresses.
+    # The value None means that we still need to compute that value.
+    self.result_iids = None  # Sorted issue IDs that match the query
+    self.search_limit_reached = False  # True if search results limit is hit.
+    self.error = None
+
+    self._MakePromises()
+
+  def _MakePromises(self):
+    config_dict = self.services.config.GetProjectConfigs(
+        self.mr.cnxn, self.query_project_ids)
+    self.harmonized_config = tracker_bizobj.HarmonizeConfigs(
+        list(config_dict.values()))
+
+    self.canned_query = savedqueries_helpers.SavedQueryIDToCond(
+        self.mr.cnxn, self.services.features, self.mr.can)
+
+    self.canned_query, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        self.me_user_ids, self.canned_query)
+    self.mr.warnings.extend(warnings)
+    self.user_query, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        self.me_user_ids, self.mr.query)
+    self.mr.warnings.extend(warnings)
+    logging.debug('Searching query: %s %s', self.canned_query, self.user_query)
+
+    slice_term = ('Issue.shard = %s', [self.mr.shard_id])
+
+    sd = sorting.ComputeSortDirectives(
+        self.harmonized_config, self.mr.group_by_spec, self.mr.sort_spec)
+
+    self.result_iids_promise = framework_helpers.Promise(
+        _GetQueryResultIIDs, self.mr.cnxn,
+        self.services, self.canned_query, self.user_query,
+        self.query_project_ids, self.harmonized_config, sd,
+        slice_term, self.mr.shard_id, self.mr.invalidation_timestep)
+
+  def SearchForIIDs(self):
+    """Wait for the search Promises and store their results."""
+    with self.mr.profiler.Phase('WaitOnPromises'):
+      self.result_iids, self.search_limit_reached, self.error = (
+          self.result_iids_promise.WaitAndGetValue())
+
+
+def SearchProjectCan(
+    cnxn, services, project_ids, query_ast, shard_id, harmonized_config,
+    left_joins=None, where=None, sort_directives=None, query_desc=''):
+  """Return a list of issue global IDs in the projects that satisfy the query.
+
+  Args:
+    cnxn: Regular database connection to the primary DB.
+    services: interface to issue storage backends.
+    project_ids: list of int IDs of the project to search
+    query_ast: A QueryAST PB with conjunctions and conditions.
+    shard_id: limit search to the specified shard ID int.
+    harmonized_config: harmonized config for all projects being searched.
+    left_joins: SQL LEFT JOIN clauses that are needed in addition to
+        anything generated from the query_ast.
+    where: SQL WHERE clauses that are needed in addition to
+        anything generated from the query_ast.
+    sort_directives: list of strings specifying the columns to sort on.
+    query_desc: descriptive string for debugging.
+
+  Returns:
+    (issue_ids, capped, error) where issue_ids is a list of issue issue_ids
+    that satisfy the query, capped is True if the number of results were
+    capped due to an implementation limit, and error is any well-known error
+    (probably a query parsing error) encountered during search.
+  """
+  logging.info('searching projects %r for AST %r', project_ids, query_ast)
+  start_time = time.time()
+  left_joins = left_joins or []
+  where = where or []
+  if project_ids:
+    cond_str = 'Issue.project_id IN (%s)' % sql.PlaceHolders(project_ids)
+    where.append((cond_str, project_ids))
+
+  try:
+    query_ast = ast2ast.PreprocessAST(
+        cnxn, query_ast, project_ids, services, harmonized_config)
+    logging.info('simplified AST is %r', query_ast)
+    query_left_joins, query_where, _ = ast2select.BuildSQLQuery(query_ast)
+    left_joins.extend(query_left_joins)
+    where.extend(query_where)
+  except ast2ast.MalformedQuery as e:
+    # TODO(jrobbins): inform the user that their query had invalid tokens.
+    logging.info('Invalid query tokens %s.\n %r\n\n', e.message, query_ast)
+    return [], False, e
+  except ast2select.NoPossibleResults as e:
+    # TODO(jrobbins): inform the user that their query was impossible.
+    logging.info('Impossible query %s.\n %r\n\n', e.message, query_ast)
+    return [], False, e
+  logging.info('translated to left_joins %r', left_joins)
+  logging.info('translated to where %r', where)
+
+  fts_capped = False
+  if query_ast.conjunctions:
+    # TODO(jrobbins): Handle "OR" in queries.  For now, we just process the
+    # first conjunction.
+    assert len(query_ast.conjunctions) == 1
+    conj = query_ast.conjunctions[0]
+    full_text_iids, fts_capped = tracker_fulltext.SearchIssueFullText(
+        project_ids, conj, shard_id)
+    if full_text_iids is not None:
+      if not full_text_iids:
+        return [], False, None  # No match on fulltext, so don't bother DB.
+      cond_str = 'Issue.id IN (%s)' % sql.PlaceHolders(full_text_iids)
+      where.append((cond_str, full_text_iids))
+
+  label_def_rows = []
+  status_def_rows = []
+  if sort_directives:
+    if project_ids:
+      for pid in project_ids:
+        label_def_rows.extend(services.config.GetLabelDefRows(cnxn, pid))
+        status_def_rows.extend(services.config.GetStatusDefRows(cnxn, pid))
+    else:
+      label_def_rows = services.config.GetLabelDefRowsAnyProject(cnxn)
+      status_def_rows = services.config.GetStatusDefRowsAnyProject(cnxn)
+
+  harmonized_labels = tracker_bizobj.HarmonizeLabelOrStatusRows(
+      label_def_rows)
+  harmonized_statuses = tracker_bizobj.HarmonizeLabelOrStatusRows(
+      status_def_rows)
+  harmonized_fields = harmonized_config.field_defs
+  sort_left_joins, order_by = ast2sort.BuildSortClauses(
+      sort_directives, harmonized_labels, harmonized_statuses,
+      harmonized_fields)
+  logging.info('translated to sort left_joins %r', sort_left_joins)
+  logging.info('translated to order_by %r', order_by)
+
+  issue_ids, db_capped = services.issue.RunIssueQuery(
+      cnxn, left_joins + sort_left_joins, where, order_by, shard_id=shard_id)
+  logging.warn('executed "%s" query %r for %d issues in %dms',
+               query_desc, query_ast, len(issue_ids),
+               int((time.time() - start_time) * 1000))
+  capped = fts_capped or db_capped
+  return issue_ids, capped, None
+
+def _FilterSpam(query_ast):
+  uses_spam = False
+  # TODO(jrobbins): Handle "OR" in queries.  For now, we just modify the
+  # first conjunction.
+  conjunction = query_ast.conjunctions[0]
+  for condition in conjunction.conds:
+    for field in condition.field_defs:
+      if field.field_name == 'spam':
+        uses_spam = True
+
+  if not uses_spam:
+    query_ast.conjunctions[0].conds.append(
+        ast_pb2.MakeCond(
+            ast_pb2.QueryOp.NE,
+            [tracker_pb2.FieldDef(
+                field_name='spam',
+                field_type=tracker_pb2.FieldTypes.BOOL_TYPE)
+             ],
+        [], []))
+
+  return query_ast
+
+def _GetQueryResultIIDs(
+    cnxn, services, canned_query, user_query,
+    query_project_ids, harmonized_config, sd, slice_term,
+    shard_id, invalidation_timestep):
+  """Do a search and return a list of matching issue IDs.
+
+  Args:
+    cnxn: connection to the database.
+    services: interface to issue storage backends.
+    canned_query: string part of the query from the drop-down menu.
+    user_query: string part of the query that the user typed in.
+    query_project_ids: list of project IDs to search.
+    harmonized_config: combined configs for all the queried projects.
+    sd: list of sort directives.
+    slice_term: additional query term to narrow results to a logical shard
+        within a physical shard.
+    shard_id: int number of the database shard to search.
+    invalidation_timestep: int timestep to use keep memcached items fresh.
+
+  Returns:
+    Tuple consisting of:
+      A list of issue issue_ids that match the user's query.  An empty list, [],
+      is returned if no issues match the query.
+      Boolean that is set to True if the search results limit of this shard is
+      hit.
+      An error (subclass of Exception) encountered during query processing. None
+      means that no error was encountered.
+  """
+  query_ast = _FilterSpam(query2ast.ParseUserQuery(
+      user_query, canned_query, query2ast.BUILTIN_ISSUE_FIELDS,
+      harmonized_config))
+
+  logging.info('query_project_ids is %r', query_project_ids)
+
+  is_fulltext_query = bool(
+    query_ast.conjunctions and
+    fulltext_helpers.BuildFTSQuery(
+      query_ast.conjunctions[0], tracker_fulltext.ISSUE_FULLTEXT_FIELDS))
+  expiration = framework_constants.CACHE_EXPIRATION
+  if is_fulltext_query:
+    expiration = framework_constants.FULLTEXT_MEMCACHE_EXPIRATION
+
+  # Might raise ast2ast.MalformedQuery or ast2select.NoPossibleResults.
+  result_iids, search_limit_reached, error = SearchProjectCan(
+      cnxn, services, query_project_ids, query_ast, shard_id,
+      harmonized_config, sort_directives=sd, where=[slice_term],
+      query_desc='getting query issue IDs')
+  logging.info('Found %d result_iids', len(result_iids))
+  if error:
+    logging.warn('Got error %r', error)
+
+  projects_str = ','.join(str(pid) for pid in sorted(query_project_ids))
+  projects_str = projects_str or 'all'
+  memcache_key = ';'.join([
+      projects_str, canned_query, user_query, ' '.join(sd), str(shard_id)])
+  memcache.set(memcache_key, (result_iids, invalidation_timestep),
+               time=expiration, namespace=settings.memcache_namespace)
+  logging.info('set memcache key %r', memcache_key)
+
+  search_limit_memcache_key = ';'.join([
+      projects_str, canned_query, user_query, ' '.join(sd),
+      'search_limit_reached', str(shard_id)])
+  memcache.set(search_limit_memcache_key,
+               (search_limit_reached, invalidation_timestep),
+               time=expiration, namespace=settings.memcache_namespace)
+  logging.info('set search limit memcache key %r',
+               search_limit_memcache_key)
+
+  timestamps_for_projects = memcache.get_multi(
+      keys=(['%d;%d' % (pid, shard_id) for pid in query_project_ids] +
+            ['all:%d' % shard_id]),
+      namespace=settings.memcache_namespace)
+
+  if query_project_ids:
+    for pid in query_project_ids:
+      key = '%d;%d' % (pid, shard_id)
+      if key not in timestamps_for_projects:
+        memcache.set(
+            key,
+            invalidation_timestep,
+            time=framework_constants.CACHE_EXPIRATION,
+            namespace=settings.memcache_namespace)
+  else:
+    key = 'all;%d' % shard_id
+    if key not in timestamps_for_projects:
+      memcache.set(
+          key,
+          invalidation_timestep,
+          time=framework_constants.CACHE_EXPIRATION,
+          namespace=settings.memcache_namespace)
+
+  return result_iids, search_limit_reached, error
diff --git a/search/frontendsearchpipeline.py b/search/frontendsearchpipeline.py
new file mode 100644
index 0000000..367c52f
--- /dev/null
+++ b/search/frontendsearchpipeline.py
@@ -0,0 +1,1237 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""The FrontendSearchPipeline class manages issue search and sorting.
+
+The frontend pipeline checks memcache for cached results in each shard.  It
+then calls backend jobs to do any shards that had a cache miss.  On cache hit,
+the cached results must be filtered by permissions, so the at-risk cache and
+backends are consulted.  Next, the sharded results are combined into an overall
+list of IIDs.  Then, that list is paginated and the issues on the current
+pagination page can be shown.  Alternatively, this class can determine just the
+position the currently shown issue would occupy in the overall sorted list.
+"""
+
+from __future__ import division
+from __future__ import print_function
+from __future__ import absolute_import
+
+import json
+
+import collections
+import logging
+import math
+import random
+import time
+
+from google.appengine.api import apiproxy_stub_map
+from google.appengine.api import memcache
+from google.appengine.api import modules
+from google.appengine.api import urlfetch
+
+import settings
+from features import savedqueries_helpers
+from framework import framework_bizobj
+from framework import framework_constants
+from framework import framework_helpers
+from framework import paginate
+from framework import permissions
+from framework import sorting
+from framework import urls
+from search import ast2ast
+from search import query2ast
+from search import searchpipeline
+from services import fulltext_helpers
+from tracker import tracker_bizobj
+from tracker import tracker_constants
+from tracker import tracker_helpers
+
+
+# Fail-fast responses usually finish in less than 50ms.  If we see a failure
+# in under that amount of time, we don't bother logging it.
+FAIL_FAST_LIMIT_SEC = 0.1
+
+DELAY_BETWEEN_RPC_COMPLETION_POLLS = 0.04  # 40 milliseconds
+
+# The choices help balance the cost of choosing samples vs. the cost of
+# selecting issues that are in a range bounded by neighboring samples.
+# Preferred chunk size parameters were determined by experimentation.
+MIN_SAMPLE_CHUNK_SIZE = int(
+    math.sqrt(tracker_constants.DEFAULT_RESULTS_PER_PAGE))
+MAX_SAMPLE_CHUNK_SIZE = int(math.sqrt(settings.search_limit_per_shard))
+PREFERRED_NUM_CHUNKS = 50
+
+
+# TODO(jojwang): monorail:4127: combine some url parameters info or
+# query info into dicts or tuples to make argument manager easier.
+class FrontendSearchPipeline(object):
+  """Manage the process of issue search, including backends and caching.
+
+  Even though the code is divided into several methods, the public
+  methods should be called in sequence, so the execution of the code
+  is pretty much in the order of the source code lines here.
+  """
+
+  def __init__(
+      self,
+      cnxn,
+      services,
+      auth,
+      me_user_ids,
+      query,
+      query_project_names,
+      items_per_page,
+      paginate_start,
+      can,
+      group_by_spec,
+      sort_spec,
+      warnings,
+      errors,
+      use_cached_searches,
+      profiler,
+      project=None):
+    self.cnxn = cnxn
+    self.me_user_ids = me_user_ids
+    self.auth = auth
+    self.logged_in_user_id = auth.user_id or 0
+    self.can = can
+    self.items_per_page = items_per_page
+    self.paginate_start = paginate_start
+    self.group_by_spec = group_by_spec
+    self.sort_spec = sort_spec
+    self.warnings = warnings
+    self.use_cached_searches = use_cached_searches
+    self.profiler = profiler
+
+    self.services = services
+    self.pagination = None
+    self.num_skipped_at_start = 0
+    self.total_count = 0
+    self.errors = errors
+
+    self.project_name = ''
+    if project:
+      self.project_name = project.project_name
+    self.query_projects = []
+    if query_project_names:
+      consider_projects = list(services.project.GetProjectsByName(
+        self.cnxn, query_project_names).values())
+      self.query_projects = [
+          p for p in consider_projects
+          if permissions.UserCanViewProject(
+              self.auth.user_pb, self.auth.effective_ids, p)]
+    if project:
+      self.query_projects.append(project)
+    member_of_all_projects = self.auth.user_pb.is_site_admin or all(
+        framework_bizobj.UserIsInProject(p, self.auth.effective_ids)
+        for p in self.query_projects)
+    self.query_project_ids = sorted([
+        p.project_id for p in self.query_projects])
+    self.query_project_names = sorted([
+        p.project_name for p in self.query_projects])
+
+    config_dict = self.services.config.GetProjectConfigs(
+        self.cnxn, self.query_project_ids)
+    self.harmonized_config = tracker_bizobj.HarmonizeConfigs(
+        list(config_dict.values()))
+
+    # The following fields are filled in as the pipeline progresses.
+    # The value None means that we still need to compute that value.
+    # A shard_key is a tuple (shard_id, subquery).
+    self.users_by_id = {}
+    self.nonviewable_iids = {}  # {shard_id: set(iid)}
+    self.unfiltered_iids = {}  # {shard_key: [iid, ...]} needing perm checks.
+    self.filtered_iids = {}  # {shard_key: [iid, ...]} already perm checked.
+    self.search_limit_reached = {}  # {shard_key: [bool, ...]}.
+    self.allowed_iids = []  # Matching iids that user is permitted to view.
+    self.allowed_results = None  # results that the user is permitted to view.
+    self.visible_results = None  # allowed_results on current pagination page.
+    self.error_responses = set()
+
+    error_msg = _CheckQuery(
+        self.cnxn, self.services, query, self.harmonized_config,
+        self.query_project_ids, member_of_all_projects,
+        warnings=self.warnings)
+    if error_msg:
+      self.errors.query = error_msg
+
+    # Split up query into smaller subqueries that would get the same results
+    # to improve performance. Smaller queries are more likely to get cache
+    # hits and subqueries can be parallelized by querying for them across
+    # multiple shards.
+    self.subqueries = []
+    try:
+      self.subqueries = query2ast.QueryToSubqueries(query)
+    except query2ast.InvalidQueryError:
+      # Ignore errors because they've already been recorded in
+      # self.errors.query.
+      pass
+
+  def SearchForIIDs(self):
+    """Use backends to search each shard and store their results."""
+    with self.profiler.Phase('Checking cache and calling Backends'):
+      rpc_tuples = _StartBackendSearch(
+          self.cnxn, self.query_project_names, self.query_project_ids,
+          self.harmonized_config, self.unfiltered_iids,
+          self.search_limit_reached, self.nonviewable_iids,
+          self.error_responses, self.services, self.me_user_ids,
+          self.logged_in_user_id, self.items_per_page + self.paginate_start,
+          self.subqueries, self.can, self.group_by_spec, self.sort_spec,
+          self.warnings, self.use_cached_searches)
+
+    with self.profiler.Phase('Waiting for Backends'):
+      try:
+        _FinishBackendSearch(rpc_tuples)
+      except Exception as e:
+        logging.exception(e)
+        raise
+
+    if self.error_responses:
+      logging.error('%r error responses. Incomplete search results.',
+                    self.error_responses)
+
+    with self.profiler.Phase('Filtering cached results'):
+      for shard_key in self.unfiltered_iids:
+        shard_id, _subquery = shard_key
+        if shard_id not in self.nonviewable_iids:
+          logging.error(
+            'Not displaying shard %r because of no nonviewable_iids', shard_id)
+          self.error_responses.add(shard_id)
+          filtered_shard_iids = []
+        else:
+          unfiltered_shard_iids = self.unfiltered_iids[shard_key]
+          nonviewable_shard_iids = self.nonviewable_iids[shard_id]
+          # TODO(jrobbins): avoid creating large temporary lists.
+          filtered_shard_iids = [iid for iid in unfiltered_shard_iids
+                                 if iid not in nonviewable_shard_iids]
+        self.filtered_iids[shard_key] = filtered_shard_iids
+
+    seen_iids_by_shard_id = collections.defaultdict(set)
+    with self.profiler.Phase('Dedupping result IIDs across shards'):
+      for shard_key in self.filtered_iids:
+        shard_id, _subquery = shard_key
+        deduped = [iid for iid in self.filtered_iids[shard_key]
+                   if iid not in seen_iids_by_shard_id[shard_id]]
+        self.filtered_iids[shard_key] = deduped
+        seen_iids_by_shard_id[shard_id].update(deduped)
+
+    with self.profiler.Phase('Counting all filtered results'):
+      for shard_key in self.filtered_iids:
+        self.total_count += len(self.filtered_iids[shard_key])
+
+    with self.profiler.Phase('Trimming results beyond pagination page'):
+      for shard_key in self.filtered_iids:
+        self.filtered_iids[shard_key] = self.filtered_iids[
+            shard_key][:self.paginate_start + self.items_per_page]
+
+  def MergeAndSortIssues(self):
+    """Merge and sort results from all shards into one combined list."""
+    with self.profiler.Phase('selecting issues to merge and sort'):
+      self._NarrowFilteredIIDs()
+      self.allowed_iids = []
+      for filtered_shard_iids in self.filtered_iids.values():
+        self.allowed_iids.extend(filtered_shard_iids)
+
+    with self.profiler.Phase('getting allowed results'):
+      self.allowed_results = self.services.issue.GetIssues(
+          self.cnxn, self.allowed_iids)
+
+    # Note: At this point, we have results that are only sorted within
+    # each backend's shard.  We still need to sort the merged result.
+    self._LookupNeededUsers(self.allowed_results)
+    with self.profiler.Phase('merging and sorting issues'):
+      self.allowed_results = _SortIssues(
+          self.allowed_results, self.harmonized_config, self.users_by_id,
+          self.group_by_spec, self.sort_spec)
+
+  def _NarrowFilteredIIDs(self):
+    """Combine filtered shards into a range of IIDs for issues to sort.
+
+    The niave way is to concatenate shard_iids[:start + num] for all
+    shards then select [start:start + num].  We do better by sampling
+    issues and then determining which of those samples are known to
+    come before start or after start+num.  We then trim off all those IIDs
+    and sort a smaller range of IIDs that might actuall be displayed.
+    See the design doc at go/monorail-sorting.
+
+    This method modifies self.fitered_iids and self.num_skipped_at_start.
+    """
+    # Sample issues and skip those that are known to come before start.
+    # See the "Sorting in Monorail" design doc.
+
+    # If the result set is small, don't bother optimizing it.
+    orig_length = _TotalLength(self.filtered_iids)
+    if orig_length < self.items_per_page * 4:
+      return
+
+    # 1. Get sample issues in each shard and sort them all together.
+    last = self.paginate_start + self.items_per_page
+
+    samples_by_shard, sample_iids_to_shard = self._FetchAllSamples(
+        self.filtered_iids)
+    sample_issues = []
+    for issue_dict in samples_by_shard.values():
+      sample_issues.extend(list(issue_dict.values()))
+
+    self._LookupNeededUsers(sample_issues)
+    sample_issues = _SortIssues(
+        sample_issues, self.harmonized_config, self.users_by_id,
+        self.group_by_spec, self.sort_spec)
+    sample_iid_tuples = [
+        (issue.issue_id, sample_iids_to_shard[issue.issue_id])
+        for issue in sample_issues]
+
+    # 2. Trim off some IIDs that are sure to be positioned after last.
+    num_trimmed_end = _TrimEndShardedIIDs(
+        self.filtered_iids, sample_iid_tuples, last)
+    logging.info('Trimmed %r issues from the end of shards', num_trimmed_end)
+
+    # 3. Trim off some IIDs that are sure to be posiitoned before start.
+    keep = _TotalLength(self.filtered_iids) - self.paginate_start
+    # Reverse the sharded lists.
+    _ReverseShards(self.filtered_iids)
+    sample_iid_tuples.reverse()
+    self.num_skipped_at_start = _TrimEndShardedIIDs(
+        self.filtered_iids, sample_iid_tuples, keep)
+    logging.info('Trimmed %r issues from the start of shards',
+                 self.num_skipped_at_start)
+    # Reverse sharded lists again to get back into forward order.
+    _ReverseShards(self.filtered_iids)
+
+  def DetermineIssuePosition(self, issue):
+    """Calculate info needed to show the issue flipper.
+
+    Args:
+      issue: The issue currently being viewed.
+
+    Returns:
+      A 3-tuple (prev_iid, index, next_iid) were prev_iid is the
+      IID of the previous issue in the total ordering (or None),
+      index is the index that the current issue has in the total
+      ordering, and next_iid is the next issue (or None).  If the current
+      issue is not in the list of results at all, returns None, None, None.
+    """
+    # 1. If the current issue is not in the results at all, then exit.
+    if not any(issue.issue_id in filtered_shard_iids
+               for filtered_shard_iids in self.filtered_iids.values()):
+      return None, None, None
+
+    # 2. Choose and retrieve sample issues in each shard.
+    samples_by_shard, _ = self._FetchAllSamples(self.filtered_iids)
+
+    # 3. Build up partial results for each shard.
+    preceeding_counts = {}  # dict {shard_key: num_issues_preceeding_current}
+    prev_candidates, next_candidates = [], []
+    for shard_key in self.filtered_iids:
+      prev_candidate, index_in_shard, next_candidate = (
+          self._DetermineIssuePositionInShard(
+              shard_key, issue, samples_by_shard[shard_key]))
+      preceeding_counts[shard_key] = index_in_shard
+      if prev_candidate:
+        prev_candidates.append(prev_candidate)
+      if next_candidate:
+        next_candidates.append(next_candidate)
+
+    # 4. Combine the results.
+    index = sum(preceeding_counts.values())
+    prev_candidates = _SortIssues(
+        prev_candidates, self.harmonized_config, self.users_by_id,
+        self.group_by_spec, self.sort_spec)
+    prev_iid = prev_candidates[-1].issue_id if prev_candidates else None
+    next_candidates = _SortIssues(
+        next_candidates, self.harmonized_config, self.users_by_id,
+        self.group_by_spec, self.sort_spec)
+    next_iid = next_candidates[0].issue_id if next_candidates else None
+
+    return prev_iid, index, next_iid
+
+  def _DetermineIssuePositionInShard(self, shard_key, issue, sample_dict):
+    """Determine where the given issue would fit into results from a shard."""
+    # See the design doc for details.  Basically, it first surveys the results
+    # to bound a range where the given issue would belong, then it fetches the
+    # issues in that range and sorts them.
+
+    filtered_shard_iids = self.filtered_iids[shard_key]
+
+    # 1. Select a sample of issues, leveraging ones we have in RAM already.
+    issues_on_hand = list(sample_dict.values())
+    if issue.issue_id not in sample_dict:
+      issues_on_hand.append(issue)
+
+    self._LookupNeededUsers(issues_on_hand)
+    sorted_on_hand = _SortIssues(
+        issues_on_hand, self.harmonized_config, self.users_by_id,
+        self.group_by_spec, self.sort_spec)
+    sorted_on_hand_iids = [soh.issue_id for soh in sorted_on_hand]
+    index_in_on_hand = sorted_on_hand_iids.index(issue.issue_id)
+
+    # 2. Bound the gap around where issue belongs.
+    if index_in_on_hand == 0:
+      fetch_start = 0
+    else:
+      prev_on_hand_iid = sorted_on_hand_iids[index_in_on_hand - 1]
+      fetch_start = filtered_shard_iids.index(prev_on_hand_iid) + 1
+
+    if index_in_on_hand == len(sorted_on_hand) - 1:
+      fetch_end = len(filtered_shard_iids)
+    else:
+      next_on_hand_iid = sorted_on_hand_iids[index_in_on_hand + 1]
+      fetch_end = filtered_shard_iids.index(next_on_hand_iid)
+
+    # 3. Retrieve all the issues in that gap to get an exact answer.
+    fetched_issues = self.services.issue.GetIssues(
+        self.cnxn, filtered_shard_iids[fetch_start:fetch_end])
+    if issue.issue_id not in filtered_shard_iids[fetch_start:fetch_end]:
+      fetched_issues.append(issue)
+    self._LookupNeededUsers(fetched_issues)
+    sorted_fetched = _SortIssues(
+        fetched_issues, self.harmonized_config, self.users_by_id,
+        self.group_by_spec, self.sort_spec)
+    sorted_fetched_iids = [sf.issue_id for sf in sorted_fetched]
+    index_in_fetched = sorted_fetched_iids.index(issue.issue_id)
+
+    # 4. Find the issues that come immediately before and after the place where
+    # the given issue would belong in this shard.
+    if index_in_fetched > 0:
+      prev_candidate = sorted_fetched[index_in_fetched - 1]
+    elif index_in_on_hand > 0:
+      prev_candidate = sorted_on_hand[index_in_on_hand - 1]
+    else:
+      prev_candidate = None
+
+    if index_in_fetched < len(sorted_fetched) - 1:
+      next_candidate = sorted_fetched[index_in_fetched + 1]
+    elif index_in_on_hand < len(sorted_on_hand) - 1:
+      next_candidate = sorted_on_hand[index_in_on_hand + 1]
+    else:
+      next_candidate = None
+
+    return prev_candidate, fetch_start + index_in_fetched, next_candidate
+
+  def _FetchAllSamples(self, filtered_iids):
+    """Return a dict {shard_key: {iid: sample_issue}}."""
+    samples_by_shard = {}  # {shard_key: {iid: sample_issue}}
+    sample_iids_to_shard = {}  # {iid: shard_key}
+    all_needed_iids = []  # List of iids to retrieve.
+
+    for shard_key in filtered_iids:
+      on_hand_issues, shard_needed_iids = self._ChooseSampleIssues(
+          filtered_iids[shard_key])
+      samples_by_shard[shard_key] = on_hand_issues
+      for iid in on_hand_issues:
+        sample_iids_to_shard[iid] = shard_key
+      for iid in shard_needed_iids:
+        sample_iids_to_shard[iid] = shard_key
+      all_needed_iids.extend(shard_needed_iids)
+
+    retrieved_samples, _misses = self.services.issue.GetIssuesDict(
+        self.cnxn, all_needed_iids)
+    for retrieved_iid, retrieved_issue in retrieved_samples.items():
+      retr_shard_key = sample_iids_to_shard[retrieved_iid]
+      samples_by_shard[retr_shard_key][retrieved_iid] = retrieved_issue
+
+    return samples_by_shard, sample_iids_to_shard
+
+  def _ChooseSampleIssues(self, issue_ids):
+    """Select a scattering of issues from the list, leveraging RAM cache.
+
+    Args:
+      issue_ids: A list of issue IDs that comprise the results in a shard.
+
+    Returns:
+      A pair (on_hand_issues, needed_iids) where on_hand_issues is
+      an issue dict {iid: issue} of issues already in RAM, and
+      shard_needed_iids is a list of iids of issues that need to be retrieved.
+    """
+    on_hand_issues = {}  # {iid: issue} of sample issues already in RAM.
+    needed_iids = []  # [iid, ...] of sample issues not in RAM yet.
+    chunk_size = max(MIN_SAMPLE_CHUNK_SIZE, min(MAX_SAMPLE_CHUNK_SIZE,
+        int(len(issue_ids) // PREFERRED_NUM_CHUNKS)))
+    for i in range(chunk_size, len(issue_ids), chunk_size):
+      issue = self.services.issue.GetAnyOnHandIssue(
+          issue_ids, start=i, end=min(i + chunk_size, len(issue_ids)))
+      if issue:
+        on_hand_issues[issue.issue_id] = issue
+      else:
+        needed_iids.append(issue_ids[i])
+
+    return on_hand_issues, needed_iids
+
+  def _LookupNeededUsers(self, issues):
+    """Look up user info needed to sort issues, if any."""
+    with self.profiler.Phase('lookup of owner, reporter, and cc'):
+      additional_user_views_by_id = (
+          tracker_helpers.MakeViewsForUsersInIssues(
+              self.cnxn, issues, self.services.user,
+              omit_ids=list(self.users_by_id.keys())))
+      self.users_by_id.update(additional_user_views_by_id)
+
+  def Paginate(self):
+    """Fetch matching issues and paginate the search results.
+
+    These two actions are intertwined because we try to only
+    retrieve the Issues on the current pagination page.
+    """
+    # We already got the issues, just display a slice of the visible ones.
+    limit_reached = False
+    for shard_limit_reached in self.search_limit_reached.values():
+      limit_reached |= shard_limit_reached
+    self.pagination = paginate.ArtifactPagination(
+        self.allowed_results,
+        self.items_per_page,
+        self.paginate_start,
+        self.project_name,
+        urls.ISSUE_LIST,
+        total_count=self.total_count,
+        limit_reached=limit_reached,
+        skipped=self.num_skipped_at_start)
+    self.visible_results = self.pagination.visible_results
+
+    # If we were not forced to look up visible users already, do it now.
+    self._LookupNeededUsers(self.visible_results)
+
+  def __repr__(self):
+    """Return a string that shows the internal state of this pipeline."""
+    if self.allowed_iids:
+      shown_allowed_iids = self.allowed_iids[:200]
+    else:
+      shown_allowed_iids = self.allowed_iids
+
+    if self.allowed_results:
+      shown_allowed_results = self.allowed_results[:200]
+    else:
+      shown_allowed_results = self.allowed_results
+
+    parts = [
+        'allowed_iids: %r' % shown_allowed_iids,
+        'allowed_results: %r' % shown_allowed_results,
+        'len(visible_results): %r' % (
+            self.visible_results and len(self.visible_results))]
+    return '%s(%s)' % (self.__class__.__name__, '\n'.join(parts))
+
+
+def _CheckQuery(
+    cnxn, services, query, harmonized_config, project_ids,
+    member_of_all_projects, warnings=None):
+  """Parse the given query and report the first error or None."""
+  try:
+    query_ast = query2ast.ParseUserQuery(
+        query, '', query2ast.BUILTIN_ISSUE_FIELDS, harmonized_config,
+        warnings=warnings)
+    query_ast = ast2ast.PreprocessAST(
+        cnxn, query_ast, project_ids, services, harmonized_config,
+        is_member=member_of_all_projects)
+  except query2ast.InvalidQueryError as e:
+    return e.message
+  except ast2ast.MalformedQuery as e:
+    return e.message
+
+  return None
+
+
+def _MakeBackendCallback(func, *args):
+  # type: (Callable[[*Any], Any], *Any) -> Callable[[*Any], Any]
+  """Helper to store a particular function and argument set into a callback.
+
+  Args:
+    func: Function to callback.
+    *args: The arguments to pass into the function.
+
+  Returns:
+    Callback function based on specified arguments.
+  """
+  return lambda: func(*args)
+
+
+def _StartBackendSearch(
+    cnxn, query_project_names, query_project_ids, harmonized_config,
+    unfiltered_iids_dict, search_limit_reached_dict, nonviewable_iids,
+    error_responses, services, me_user_ids, logged_in_user_id, new_url_num,
+    subqueries, can, group_by_spec, sort_spec, warnings, use_cached_searches):
+  # type: (MonorailConnection, Sequence[str], Sequence[int],
+  #     proto.tracker_pb2.ProjectIssueConfig,
+  #     Mapping[Tuple(int, str), Sequence[int]],
+  #     Mapping[Tuple(int, str), Sequence[bool]],
+  #     Mapping[Tuple(int, str), Collection[int]], Sequence[Tuple(int, str)],
+  #     Services, Sequence[int], int, int, Sequence[str], int, str, str,
+  #     Sequence[Tuple(str, Sequence[str])], bool) ->
+  #     Sequence[Tuple(int, Tuple(int, str),
+  #         google.appengine.api.apiproxy_stub_map.UserRPC)]
+  """Request that our backends search and return a list of matching issue IDs.
+
+  Args:
+    cnxn: monorail connection to the database.
+    query_project_names: set of project names to search.
+    query_project_ids: list of project IDs to search.
+    harmonized_config: combined ProjectIssueConfig for all projects being
+        searched.
+    unfiltered_iids_dict: dict {shard_key: [iid, ...]} of unfiltered search
+        results to accumulate into.  They need to be later filtered by
+        permissions and merged into filtered_iids_dict.
+    search_limit_reached_dict: dict {shard_key: [bool, ...]} to determine if
+        the search limit of any shard was reached.
+    nonviewable_iids: dict {shard_id: set(iid)} of restricted issues in the
+        projects being searched that the signed in user cannot view.
+    error_responses: shard_iids of shards that encountered errors.
+    services: connections to backends.
+    me_user_ids: Empty list when no user is logged in, or user ID of the logged
+        in user when doing an interactive search, or the viewed user ID when
+        viewing someone else's dashboard, or the subscribing user's ID when
+        evaluating subscriptions.  And, any linked accounts.
+    logged_in_user_id: user_id of the logged in user, 0 otherwise
+    new_url_num: the number of issues for BackendSearchPipeline to query.
+        Computed based on pagination offset + number of items per page.
+    subqueries: split up list of query string segments.
+    can: "canned query" number to scope the user's search.
+    group_by_spec: string that lists the grouping order.
+    sort_spec: string that lists the sort order.
+    warnings: list to accumulate warning messages.
+    use_cached_searches: Bool for whether to use cached searches.
+
+  Returns:
+    A list of rpc_tuples that can be passed to _FinishBackendSearch to wait
+    on any remaining backend calls.
+
+  SIDE-EFFECTS:
+    Any data found in memcache is immediately put into unfiltered_iids_dict.
+    As the backends finish their work, _HandleBackendSearchResponse will update
+    unfiltered_iids_dict for those shards.
+
+    Any warnings produced throughout this process will be added to the list
+    warnings.
+  """
+  rpc_tuples = []
+  needed_shard_keys = set()
+  for subquery in subqueries:
+    subquery, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        me_user_ids, subquery)
+    warnings.extend(warnings)
+    for shard_id in range(settings.num_logical_shards):
+      needed_shard_keys.add((shard_id, subquery))
+
+  # 1. Get whatever we can from memcache.  Cache hits are only kept if they are
+  # not already expired.
+  project_shard_timestamps = _GetProjectTimestamps(
+      query_project_ids, needed_shard_keys)
+
+  if use_cached_searches:
+    cached_unfiltered_iids_dict, cached_search_limit_reached_dict = (
+        _GetCachedSearchResults(
+            cnxn, query_project_ids, needed_shard_keys,
+            harmonized_config, project_shard_timestamps, services, me_user_ids,
+            can, group_by_spec, sort_spec, warnings))
+    unfiltered_iids_dict.update(cached_unfiltered_iids_dict)
+    search_limit_reached_dict.update(cached_search_limit_reached_dict)
+  for cache_hit_shard_key in unfiltered_iids_dict:
+    needed_shard_keys.remove(cache_hit_shard_key)
+
+  # 2. Each kept cache hit will have unfiltered IIDs, so we filter them by
+  # removing non-viewable IDs.
+  _GetNonviewableIIDs(
+    query_project_ids, logged_in_user_id,
+    set(range(settings.num_logical_shards)),
+    rpc_tuples, nonviewable_iids, project_shard_timestamps,
+    services.cache_manager.processed_invalidations_up_to,
+    use_cached_searches)
+
+  # 3. Hit backends for any shards that are still needed.  When these results
+  # come back, they are also put into unfiltered_iids_dict.
+  for shard_key in needed_shard_keys:
+    rpc = _StartBackendSearchCall(
+        query_project_names,
+        shard_key,
+        services.cache_manager.processed_invalidations_up_to,
+        me_user_ids,
+        logged_in_user_id,
+        new_url_num,
+        can=can,
+        sort_spec=sort_spec,
+        group_by_spec=group_by_spec)
+    rpc_tuple = (time.time(), shard_key, rpc)
+    rpc.callback = _MakeBackendCallback(
+        _HandleBackendSearchResponse, query_project_names, rpc_tuple,
+        rpc_tuples, settings.backend_retries, unfiltered_iids_dict,
+        search_limit_reached_dict,
+        services.cache_manager.processed_invalidations_up_to, error_responses,
+        me_user_ids, logged_in_user_id, new_url_num, can, sort_spec,
+        group_by_spec)
+    rpc_tuples.append(rpc_tuple)
+
+  return rpc_tuples
+
+
+def _FinishBackendSearch(rpc_tuples):
+  """Wait for all backend calls to complete, including any retries."""
+  while rpc_tuples:
+    active_rpcs = [rpc for (_time, _shard_key, rpc) in rpc_tuples]
+    # Wait for any active RPC to complete.  It's callback function will
+    # automatically be called.
+    finished_rpc = real_wait_any(active_rpcs)
+    # Figure out which rpc_tuple finished and remove it from our list.
+    for rpc_tuple in rpc_tuples:
+      _time, _shard_key, rpc = rpc_tuple
+      if rpc == finished_rpc:
+        rpc_tuples.remove(rpc_tuple)
+        break
+    else:
+      raise ValueError('We somehow finished an RPC that is not in rpc_tuples')
+
+
+def real_wait_any(active_rpcs):
+  """Work around the blocking nature of wait_any().
+
+  wait_any() checks for any finished RPCs, and returns one if found.
+  If no RPC is finished, it simply blocks on the last RPC in the list.
+  This is not the desired behavior because we are not able to detect
+  FAST-FAIL RPC results and retry them if wait_any() is blocked on a
+  request that is taking a long time to do actual work.
+
+  Instead, we do the same check, without blocking on any individual RPC.
+  """
+  if settings.local_mode:
+    # The development server has very different code for RPCs than the
+    # code used in the hosted environment.
+    return apiproxy_stub_map.UserRPC.wait_any(active_rpcs)
+  while True:
+    finished, _ = apiproxy_stub_map.UserRPC._UserRPC__check_one(active_rpcs)
+    if finished:
+      return finished
+    time.sleep(DELAY_BETWEEN_RPC_COMPLETION_POLLS)
+
+def _GetProjectTimestamps(query_project_ids, needed_shard_keys):
+  """Get a dict of modified_ts values for all specified project-shards."""
+  project_shard_timestamps = {}
+  if query_project_ids:
+    keys = []
+    for pid in query_project_ids:
+      for sid, _subquery in needed_shard_keys:
+        keys.append('%d;%d' % (pid, sid))
+  else:
+    keys = [('all;%d' % sid)
+            for sid, _subquery in needed_shard_keys]
+
+  timestamps_for_project = memcache.get_multi(
+      keys=keys, namespace=settings.memcache_namespace)
+  for key, timestamp in timestamps_for_project.items():
+    pid_str, sid_str = key.split(';')
+    if pid_str == 'all':
+      project_shard_timestamps['all', int(sid_str)] = timestamp
+    else:
+      project_shard_timestamps[int(pid_str), int(sid_str)] = timestamp
+
+  return project_shard_timestamps
+
+
+def _GetNonviewableIIDs(
+    query_project_ids, logged_in_user_id, needed_shard_ids, rpc_tuples,
+    nonviewable_iids, project_shard_timestamps, invalidation_timestep,
+    use_cached_searches):
+  """Build a set of at-risk IIDs, and accumulate RPCs to get uncached ones."""
+  if query_project_ids:
+    keys = []
+    for pid in query_project_ids:
+      for sid in needed_shard_ids:
+        keys.append('%d;%d;%d' % (pid, logged_in_user_id, sid))
+  else:
+    keys = [
+        ('all;%d;%d' % (logged_in_user_id, sid)) for sid in needed_shard_ids
+    ]
+
+  if use_cached_searches:
+    cached_dict = memcache.get_multi(
+        keys, key_prefix='nonviewable:', namespace=settings.memcache_namespace)
+  else:
+    cached_dict = {}
+
+  for sid in needed_shard_ids:
+    if query_project_ids:
+      for pid in query_project_ids:
+        _AccumulateNonviewableIIDs(
+            pid, logged_in_user_id, sid, cached_dict, nonviewable_iids,
+            project_shard_timestamps, rpc_tuples, invalidation_timestep)
+    else:
+      _AccumulateNonviewableIIDs(
+          None, logged_in_user_id, sid, cached_dict, nonviewable_iids,
+          project_shard_timestamps, rpc_tuples, invalidation_timestep)
+
+
+def _AccumulateNonviewableIIDs(
+    pid, logged_in_user_id, sid, cached_dict, nonviewable_iids,
+    project_shard_timestamps, rpc_tuples, invalidation_timestep):
+  """Use one of the retrieved cache entries or call a backend if needed."""
+  if pid is None:
+    key = 'all;%d;%d' % (logged_in_user_id, sid)
+  else:
+    key = '%d;%d;%d' % (pid, logged_in_user_id, sid)
+
+  if key in cached_dict:
+    issue_ids, cached_ts = cached_dict.get(key)
+    modified_ts = project_shard_timestamps.get((pid, sid))
+    if modified_ts is None or modified_ts > cached_ts:
+      logging.info('nonviewable too stale on (project %r, shard %r)',
+                   pid, sid)
+    else:
+      logging.info('adding %d nonviewable issue_ids', len(issue_ids))
+      nonviewable_iids[sid] = set(issue_ids)
+
+  if sid not in nonviewable_iids:
+    logging.info('nonviewable for %r not found', key)
+    logging.info('starting backend call for nonviewable iids %r', key)
+    rpc = _StartBackendNonviewableCall(
+      pid, logged_in_user_id, sid, invalidation_timestep)
+    rpc_tuple = (time.time(), sid, rpc)
+    rpc.callback = _MakeBackendCallback(
+        _HandleBackendNonviewableResponse, pid, logged_in_user_id, sid,
+        rpc_tuple, rpc_tuples, settings.backend_retries, nonviewable_iids,
+        invalidation_timestep)
+    rpc_tuples.append(rpc_tuple)
+
+
+def _GetCachedSearchResults(
+    cnxn, query_project_ids, needed_shard_keys, harmonized_config,
+    project_shard_timestamps, services, me_user_ids, can, group_by_spec,
+    sort_spec, warnings):
+  """Return a dict of cached search results that are not already stale.
+
+  If it were not for cross-project search, we would simply cache when we do a
+  search and then invalidate when an issue is modified.  But, with
+  cross-project search we don't know all the memcache entries that would
+  need to be invalidated.  So, instead, we write the search result cache
+  entries and then an initial modified_ts value for each project if it was
+  not already there. And, when we update an issue we write a new
+  modified_ts entry, which implicitly invalidate all search result
+  cache entries that were written earlier because they are now stale.  When
+  reading from the cache, we ignore any query project with modified_ts
+  after its search result cache timestamp, because it is stale.
+
+  Args:
+    cnxn: monorail connection to the database.
+    query_project_ids: list of project ID numbers for all projects being
+        searched.
+    needed_shard_keys: set of shard keys that need to be checked.
+    harmonized_config: ProjectIsueConfig with combined information for all
+        projects involved in this search.
+    project_shard_timestamps: a dict {(project_id, shard_id): timestamp, ...}
+        that tells when each shard was last invalidated.
+    services: connections to backends.
+    me_user_ids: Empty list when no user is logged in, or user ID of the logged
+        in user when doing an interactive search, or the viewed user ID when
+        viewing someone else's dashboard, or the subscribing user's ID when
+        evaluating subscriptions.  And, any linked accounts.
+    can: "canned query" number to scope the user's search.
+    group_by_spec: string that lists the grouping order.
+    sort_spec: string that lists the sort order.
+    warnings: list to accumulate warning messages.
+
+
+  Returns:
+    Tuple consisting of:
+      A dictionary {shard_id: [issue_id, ...], ...} of unfiltered search result
+      issue IDs. Only shard_ids found in memcache will be in that dictionary.
+      The result issue IDs must be permission checked before they can be
+      considered to be part of the user's result set.
+      A dictionary {shard_id: bool, ...}. The boolean is set to True if
+      the search results limit of the shard is hit.
+  """
+  projects_str = ','.join(str(pid) for pid in sorted(query_project_ids))
+  projects_str = projects_str or 'all'
+  canned_query = savedqueries_helpers.SavedQueryIDToCond(
+      cnxn, services.features, can)
+  canned_query, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+      me_user_ids, canned_query)
+  warnings.extend(warnings)
+
+  sd = sorting.ComputeSortDirectives(
+      harmonized_config, group_by_spec, sort_spec)
+  sd_str = ' '.join(sd)
+  memcache_key_prefix = '%s;%s' % (projects_str, canned_query)
+  limit_reached_key_prefix = '%s;%s' % (projects_str, canned_query)
+
+  cached_dict = memcache.get_multi(
+      ['%s;%s;%s;%d' % (memcache_key_prefix, subquery, sd_str, sid)
+       for sid, subquery in needed_shard_keys],
+      namespace=settings.memcache_namespace)
+  cached_search_limit_reached_dict = memcache.get_multi(
+      ['%s;%s;%s;search_limit_reached;%d' % (
+          limit_reached_key_prefix, subquery, sd_str, sid)
+       for sid, subquery in needed_shard_keys],
+      namespace=settings.memcache_namespace)
+
+  unfiltered_dict = {}
+  search_limit_reached_dict = {}
+  for shard_key in needed_shard_keys:
+    shard_id, subquery = shard_key
+    memcache_key = '%s;%s;%s;%d' % (
+        memcache_key_prefix, subquery, sd_str, shard_id)
+    limit_reached_key = '%s;%s;%s;search_limit_reached;%d' % (
+        limit_reached_key_prefix, subquery, sd_str, shard_id)
+    if memcache_key not in cached_dict:
+      logging.info('memcache miss on shard %r', shard_key)
+      continue
+
+    cached_iids, cached_ts = cached_dict[memcache_key]
+    if cached_search_limit_reached_dict.get(limit_reached_key):
+      search_limit_reached, _ = cached_search_limit_reached_dict[
+          limit_reached_key]
+    else:
+      search_limit_reached = False
+
+    stale = False
+    if query_project_ids:
+      for project_id in query_project_ids:
+        modified_ts = project_shard_timestamps.get((project_id, shard_id))
+        if modified_ts is None or modified_ts > cached_ts:
+          stale = True
+          logging.info('memcache too stale on shard %r because of %r',
+                       shard_id, project_id)
+          break
+    else:
+      modified_ts = project_shard_timestamps.get(('all', shard_id))
+      if modified_ts is None or modified_ts > cached_ts:
+        stale = True
+        logging.info('memcache too stale on shard %r because of all',
+                     shard_id)
+
+    if not stale:
+      unfiltered_dict[shard_key] = cached_iids
+      search_limit_reached_dict[shard_key] = search_limit_reached
+
+  return unfiltered_dict, search_limit_reached_dict
+
+
+def _MakeBackendRequestHeaders(failfast):
+  headers = {
+    # This is needed to allow frontends to talk to backends without going
+    # through a login screen on googleplex.com.
+    # http://wiki/Main/PrometheusInternal#Internal_Applications_and_APIs
+    'X-URLFetch-Service-Id': 'GOOGLEPLEX',
+    }
+  if failfast:
+    headers['X-AppEngine-FailFast'] = 'Yes'
+  return headers
+
+
+def _StartBackendSearchCall(
+    query_project_names,
+    shard_key,
+    invalidation_timestep,
+    me_user_ids,
+    logged_in_user_id,
+    new_url_num,
+    can=None,
+    sort_spec=None,
+    group_by_spec=None,
+    deadline=None,
+    failfast=True):
+  # type: (Sequence[str], Tuple(int, str), int, Sequence[int], int,
+  #     int, str, str, int, bool) ->
+  #     google.appengine.api.apiproxy_stub_map.UserRPC
+  """Ask a backend to query one shard of the database.
+
+  Args:
+    query_project_names: List of project names queried.
+    shard_key: Tuple specifying which DB shard to query.
+    invalidation_timestep: int timestep to use keep cached items fresh.
+    me_user_ids: Empty list when no user is logged in, or user ID of the logged
+        in user when doing an interactive search, or the viewed user ID when
+        viewing someone else's dashboard, or the subscribing user's ID when
+        evaluating subscriptions.  And, any linked accounts.
+    logged_in_user_id: Id of the logged in user.
+    new_url_num: the number of issues for BackendSearchPipeline to query.
+        Computed based on pagination offset + number of items per page.
+    can: Id of th canned query to use.
+    sort_spec: Str specifying how issues should be sorted.
+    group_by_spec: Str specifying how issues should be grouped.
+    deadline: Max time for the RPC to take before failing.
+    failfast: Whether to set the X-AppEngine-FailFast request header.
+
+  Returns:
+    UserRPC for the created RPC call.
+  """
+  shard_id, subquery = shard_key
+  backend_host = modules.get_hostname(module='besearch')
+  url = 'http://%s%s' % (
+      backend_host,
+      framework_helpers.FormatURL(
+          [],
+          urls.BACKEND_SEARCH,
+          projects=','.join(query_project_names),
+          q=subquery,
+          start=0,
+          num=new_url_num,
+          can=can,
+          sort=sort_spec,
+          groupby=group_by_spec,
+          logged_in_user_id=logged_in_user_id,
+          me_user_ids=','.join(str(uid) for uid in me_user_ids),
+          shard_id=shard_id,
+          invalidation_timestep=invalidation_timestep))
+  logging.info('\n\nCalling backend: %s', url)
+  rpc = urlfetch.create_rpc(
+      deadline=deadline or settings.backend_deadline)
+  headers = _MakeBackendRequestHeaders(failfast)
+  # follow_redirects=False is needed to avoid a login screen on googleplex.
+  urlfetch.make_fetch_call(rpc, url, follow_redirects=False, headers=headers)
+  return rpc
+
+
+def _StartBackendNonviewableCall(
+    project_id, logged_in_user_id, shard_id, invalidation_timestep,
+    deadline=None, failfast=True):
+  """Ask a backend to query one shard of the database."""
+  backend_host = modules.get_hostname(module='besearch')
+  url = 'http://%s%s' % (backend_host, framework_helpers.FormatURL(
+      None, urls.BACKEND_NONVIEWABLE,
+      project_id=project_id or '',
+      logged_in_user_id=logged_in_user_id or '',
+      shard_id=shard_id,
+      invalidation_timestep=invalidation_timestep))
+  logging.info('Calling backend nonviewable: %s', url)
+  rpc = urlfetch.create_rpc(deadline=deadline or settings.backend_deadline)
+  headers = _MakeBackendRequestHeaders(failfast)
+  # follow_redirects=False is needed to avoid a login screen on googleplex.
+  urlfetch.make_fetch_call(rpc, url, follow_redirects=False, headers=headers)
+  return rpc
+
+
+def _HandleBackendSearchResponse(
+    query_project_names, rpc_tuple, rpc_tuples, remaining_retries,
+    unfiltered_iids, search_limit_reached, invalidation_timestep,
+    error_responses, me_user_ids, logged_in_user_id, new_url_num, can,
+    sort_spec, group_by_spec):
+  # type: (Sequence[str], Tuple(int, Tuple(int, str),
+  #         google.appengine.api.apiproxy_stub_map.UserRPC),
+  #     Sequence[Tuple(int, Tuple(int, str),
+  #         google.appengine.api.apiproxy_stub_map.UserRPC)],
+  #     int, Mapping[Tuple(int, str), Sequence[int]],
+  #     Mapping[Tuple(int, str), bool], int, Collection[Tuple(int, str)],
+  #     Sequence[int], int, int, int, str, str) -> None
+  #
+  """Process one backend response and retry if there was an error.
+
+  SIDE EFFECTS: This function edits many of the passed in parameters in place.
+    For example, search_limit_reached and unfiltered_iids are updated with
+    response data from the RPC, keyed by shard_key.
+
+  Args:
+    query_project_names: List of projects to query.
+    rpc_tuple: Tuple containing an RPC response object, the time it happened,
+      and what shard the RPC was queried against.
+    rpc_tuples: List of RPC responses to mutate with any retry responses that
+      heppened.
+    remaining_retries: Number of times left to retry.
+    unfiltered_iids: Dict of Issue ids, before they've been filtered by
+      permissions.
+    search_limit_reached: Dict of whether the search limit for a particular
+      shard has been hit.
+    invalidation_timestep: int timestep to use keep cached items fresh.
+    error_responses:
+    me_user_ids: List of relevant user IDs. ie: the currently logged in user
+      and linked account IDs if applicable.
+    logged_in_user_id: Logged in user's ID.
+    new_url_num: the number of issues for BackendSearchPipeline to query.
+        Computed based on pagination offset + number of items per page.
+    can: Canned query ID to use.
+    sort_spec: str specifying how issues should be sorted.
+    group_by_spec: str specifying how issues should be grouped.
+  """
+  start_time, shard_key, rpc = rpc_tuple
+  duration_sec = time.time() - start_time
+
+  try:
+    response = rpc.get_result()
+    logging.info('call to backend took %d sec', duration_sec)
+    # Note that response.content has "})]'\n" prepended to it.
+    json_content = response.content[5:]
+    logging.info('got json text: %r length %r',
+                 json_content[:framework_constants.LOGGING_MAX_LENGTH],
+                 len(json_content))
+    if json_content == '':
+      raise Exception('Fast fail')
+    json_data = json.loads(json_content)
+    unfiltered_iids[shard_key] = json_data['unfiltered_iids']
+    search_limit_reached[shard_key] = json_data['search_limit_reached']
+    if json_data.get('error'):
+      # Don't raise an exception, just log, because these errors are more like
+      # 400s than 500s, and shouldn't be retried.
+      logging.error('Backend shard %r returned error "%r"' % (
+          shard_key, json_data.get('error')))
+      error_responses.add(shard_key)
+
+  except Exception as e:
+    if duration_sec > FAIL_FAST_LIMIT_SEC:  # Don't log fail-fast exceptions.
+      logging.exception(e)
+    if not remaining_retries:
+      logging.error('backend search retries exceeded')
+      error_responses.add(shard_key)
+      return  # Used all retries, so give up.
+
+    if duration_sec >= settings.backend_deadline:
+      logging.error('backend search on %r took too long', shard_key)
+      error_responses.add(shard_key)
+      return  # That backend shard is overloaded, so give up.
+
+    logging.error('backend call for shard %r failed, retrying', shard_key)
+    retry_rpc = _StartBackendSearchCall(
+        query_project_names,
+        shard_key,
+        invalidation_timestep,
+        me_user_ids,
+        logged_in_user_id,
+        new_url_num,
+        can=can,
+        sort_spec=sort_spec,
+        group_by_spec=group_by_spec,
+        failfast=remaining_retries > 2)
+    retry_rpc_tuple = (time.time(), shard_key, retry_rpc)
+    retry_rpc.callback = _MakeBackendCallback(
+        _HandleBackendSearchResponse, query_project_names, retry_rpc_tuple,
+        rpc_tuples, remaining_retries - 1, unfiltered_iids,
+        search_limit_reached, invalidation_timestep, error_responses,
+        me_user_ids, logged_in_user_id, new_url_num, can, sort_spec,
+        group_by_spec)
+    rpc_tuples.append(retry_rpc_tuple)
+
+
+def _HandleBackendNonviewableResponse(
+    project_id, logged_in_user_id, shard_id, rpc_tuple, rpc_tuples,
+    remaining_retries, nonviewable_iids, invalidation_timestep):
+  """Process one backend response and retry if there was an error."""
+  start_time, shard_id, rpc = rpc_tuple
+  duration_sec = time.time() - start_time
+
+  try:
+    response = rpc.get_result()
+    logging.info('call to backend nonviewable took %d sec', duration_sec)
+    # Note that response.content has "})]'\n" prepended to it.
+    json_content = response.content[5:]
+    logging.info('got json text: %r length %r',
+                 json_content[:framework_constants.LOGGING_MAX_LENGTH],
+                 len(json_content))
+    if json_content == '':
+      raise Exception('Fast fail')
+    json_data = json.loads(json_content)
+    nonviewable_iids[shard_id] = set(json_data['nonviewable'])
+
+  except Exception as e:
+    if duration_sec > FAIL_FAST_LIMIT_SEC:  # Don't log fail-fast exceptions.
+      logging.exception(e)
+
+    if not remaining_retries:
+      logging.warn('Used all retries, so give up on shard %r', shard_id)
+      return
+
+    if duration_sec >= settings.backend_deadline:
+      logging.error('nonviewable call on %r took too long', shard_id)
+      return  # That backend shard is overloaded, so give up.
+
+    logging.error(
+      'backend nonviewable call for shard %r;%r;%r failed, retrying',
+      project_id, logged_in_user_id, shard_id)
+    retry_rpc = _StartBackendNonviewableCall(
+        project_id, logged_in_user_id, shard_id, invalidation_timestep,
+        failfast=remaining_retries > 2)
+    retry_rpc_tuple = (time.time(), shard_id, retry_rpc)
+    retry_rpc.callback = _MakeBackendCallback(
+        _HandleBackendNonviewableResponse, project_id, logged_in_user_id,
+        shard_id, retry_rpc_tuple, rpc_tuples, remaining_retries - 1,
+        nonviewable_iids, invalidation_timestep)
+    rpc_tuples.append(retry_rpc_tuple)
+
+
+def _TotalLength(sharded_iids):
+  """Return the total length of all issue_iids lists."""
+  return sum(len(issue_iids) for issue_iids in sharded_iids.values())
+
+
+def _ReverseShards(sharded_iids):
+  """Reverse each issue_iids list in place."""
+  for shard_key in sharded_iids:
+    sharded_iids[shard_key].reverse()
+
+
+def _TrimEndShardedIIDs(sharded_iids, sample_iid_tuples, num_needed):
+  """Trim the IIDs to keep at least num_needed items.
+
+  Args:
+    sharded_iids: dict {shard_key: issue_id_list} for search results.  This is
+        modified in place to remove some trailing issue IDs.
+    sample_iid_tuples: list of (iid, shard_key) from a sorted list of sample
+        issues.
+    num_needed: int minimum total number of items to keep.  Some IIDs that are
+        known to belong in positions > num_needed will be trimmed off.
+
+  Returns:
+    The total number of IIDs removed from the IID lists.
+  """
+  # 1. Get (sample_iid, position_in_shard) for each sample.
+  sample_positions = _CalcSamplePositions(sharded_iids, sample_iid_tuples)
+
+  # 2. Walk through the samples, computing a combined lower bound at each
+  # step until we know that we have passed at least num_needed IIDs.
+  lower_bound_per_shard = {}
+  excess_samples = []
+  for i in range(len(sample_positions)):
+    _sample_iid, sample_shard_key, pos = sample_positions[i]
+    lower_bound_per_shard[sample_shard_key] = pos
+    overall_lower_bound = sum(lower_bound_per_shard.values())
+    if overall_lower_bound >= num_needed:
+      excess_samples = sample_positions[i + 1:]
+      break
+  else:
+    return 0  # We went through all samples and never reached num_needed.
+
+  # 3. Truncate each shard at the first excess sample in that shard.
+  already_trimmed = set()
+  num_trimmed = 0
+  for _sample_iid, sample_shard_key, pos in excess_samples:
+    if sample_shard_key not in already_trimmed:
+      num_trimmed += len(sharded_iids[sample_shard_key]) - pos
+      sharded_iids[sample_shard_key] = sharded_iids[sample_shard_key][:pos]
+      already_trimmed.add(sample_shard_key)
+
+  return num_trimmed
+
+
+# TODO(jrobbins): Convert this to a python generator.
+def _CalcSamplePositions(sharded_iids, sample_iids):
+  """Return [(iid, shard_key, position_in_shard), ...] for each sample."""
+  # We keep track of how far index() has scanned in each shard to avoid
+  # starting over at position 0 when looking for the next sample in
+  # the same shard.
+  scan_positions = collections.defaultdict(lambda: 0)
+  sample_positions = []
+  for sample_iid, sample_shard_key in sample_iids:
+    try:
+      pos = sharded_iids.get(sample_shard_key, []).index(
+          sample_iid, scan_positions[sample_shard_key])
+      scan_positions[sample_shard_key] = pos
+      sample_positions.append((sample_iid, sample_shard_key, pos))
+    except ValueError:
+      pass
+
+  return sample_positions
+
+
+def _SortIssues(issues, config, users_by_id, group_by_spec, sort_spec):
+  """Sort the found issues based on the request and config values.
+
+  Args:
+    issues: A list of issues to be sorted.
+    config: A ProjectIssueConfig that could impact sort order.
+    users_by_id: dictionary {user_id: user_view,...} for all users who
+      participate in any issue in the entire list.
+    group_by_spec: string that lists the grouping order
+    sort_spec: string that lists the sort order
+
+
+  Returns:
+    A sorted list of issues, based on parameters from mr and config.
+  """
+  issues = sorting.SortArtifacts(
+      issues, config, tracker_helpers.SORTABLE_FIELDS,
+      tracker_helpers.SORTABLE_FIELDS_POSTPROCESSORS, group_by_spec,
+      sort_spec, users_by_id=users_by_id)
+  return issues
diff --git a/search/query2ast.py b/search/query2ast.py
new file mode 100644
index 0000000..235f9b3
--- /dev/null
+++ b/search/query2ast.py
@@ -0,0 +1,899 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""A set of functions that integrate the GAE search index with Monorail."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import collections
+import datetime
+import logging
+import re
+import time
+
+from google.appengine.api import search
+
+from proto import ast_pb2
+from proto import tracker_pb2
+
+
+# TODO(jrobbins): Consider re-implementing this whole file by using a
+# BNF syntax specification and a parser generator or library.
+
+# encodings
+UTF8 = 'utf-8'
+
+# Operator used for OR statements.
+OR_SYMBOL = ' OR '
+
+# Token types used for parentheses parsing.
+SUBQUERY = ast_pb2.TokenType.SUBQUERY
+LEFT_PAREN = ast_pb2.TokenType.LEFT_PAREN
+RIGHT_PAREN = ast_pb2.TokenType.RIGHT_PAREN
+OR = ast_pb2.TokenType.OR
+
+# Field types and operators
+BOOL = tracker_pb2.FieldTypes.BOOL_TYPE
+DATE = tracker_pb2.FieldTypes.DATE_TYPE
+NUM = tracker_pb2.FieldTypes.INT_TYPE
+TXT = tracker_pb2.FieldTypes.STR_TYPE
+APPROVAL = tracker_pb2.FieldTypes.APPROVAL_TYPE
+
+EQ = ast_pb2.QueryOp.EQ
+NE = ast_pb2.QueryOp.NE
+LT = ast_pb2.QueryOp.LT
+GT = ast_pb2.QueryOp.GT
+LE = ast_pb2.QueryOp.LE
+GE = ast_pb2.QueryOp.GE
+TEXT_HAS = ast_pb2.QueryOp.TEXT_HAS
+NOT_TEXT_HAS = ast_pb2.QueryOp.NOT_TEXT_HAS
+IS_DEFINED = ast_pb2.QueryOp.IS_DEFINED
+IS_NOT_DEFINED = ast_pb2.QueryOp.IS_NOT_DEFINED
+KEY_HAS = ast_pb2.QueryOp.KEY_HAS
+
+# Mapping from user query comparison operators to our internal representation.
+OPS = {
+    ':': TEXT_HAS,
+    '=': EQ,
+    '!=': NE,
+    '<': LT,
+    '>': GT,
+    '<=': LE,
+    '>=': GE,
+}
+
+# When the query has a leading minus, switch the operator for its opposite.
+NEGATED_OPS = {
+    EQ: NE,
+    NE: EQ,
+    LT: GE,
+    GT: LE,
+    LE: GT,
+    GE: LT,
+    TEXT_HAS: NOT_TEXT_HAS,
+    # IS_DEFINED is handled separately.
+    }
+
+# This is a partial regular expression that matches all of our comparison
+# operators, such as =, 1=, >, and <.  Longer ones listed first so that the
+# shorter ones don't cause premature matches.
+OPS_PATTERN = '|'.join(
+    map(re.escape, sorted(list(OPS.keys()), key=lambda op: -len(op))))
+
+# This RE extracts search terms from a subquery string.
+TERM_RE = re.compile(
+    r'(-?"[^"]+")|'  # E.g., ["division by zero"]
+    r'(\S+(%s)[^ "]+)|'  # E.g., [stars>10]
+    r'(\w+(%s)"[^"]+")|'  # E.g., [summary:"memory leak"]
+    r'(-?[._\*\w][-._\*\w]+)'  # E.g., [-workaround]
+    % (OPS_PATTERN, OPS_PATTERN), flags=re.UNICODE)
+
+# This RE is used to further decompose a comparison term into prefix, op, and
+# value.  E.g., [stars>10] or [is:open] or [summary:"memory leak"].  The prefix
+# can include a leading "-" to negate the comparison.
+OP_RE = re.compile(
+    r'^(?P<prefix>[-_.\w]*?)'
+    r'(?P<op>%s)'
+    r'(?P<value>([-@\w][-\*,./:<=>@\w]*|"[^"]*"))$' %
+    OPS_PATTERN,
+    flags=re.UNICODE)
+
+
+# Predefined issue fields passed to the query parser.
+_ISSUE_FIELDS_LIST = [
+    (ast_pb2.ANY_FIELD, TXT),
+    ('attachment', TXT),  # attachment file names
+    ('attachments', NUM),  # number of attachment files
+    ('blocked', BOOL),
+    ('blockedon', TXT),
+    ('blockedon_id', NUM),
+    ('blocking', TXT),
+    ('blocking_id', NUM),
+    ('cc', TXT),
+    ('cc_id', NUM),
+    ('comment', TXT),
+    ('commentby', TXT),
+    ('commentby_id', NUM),
+    ('component', TXT),
+    ('component_id', NUM),
+    ('description', TXT),
+    ('gate', TXT),
+    ('hotlist', TXT),
+    ('hotlist_id', NUM),
+    ('id', NUM),
+    ('is_spam', BOOL),
+    ('label', TXT),
+    ('label_id', NUM),
+    ('mergedinto', NUM),
+    ('mergedinto_id', NUM),
+    ('open', BOOL),
+    ('owner', TXT),
+    ('ownerbouncing', BOOL),
+    ('owner_id', NUM),
+    ('project', TXT),
+    ('reporter', TXT),
+    ('reporter_id', NUM),
+    ('spam', BOOL),
+    ('stars', NUM),
+    ('starredby', TXT),
+    ('starredby_id', NUM),
+    ('status', TXT),
+    ('status_id', NUM),
+    ('summary', TXT),
+    ]
+
+_DATE_FIELDS = (
+    'closed',
+    'modified',
+    'opened',
+    'ownermodified',
+    'ownerlastvisit',
+    'statusmodified',
+    'componentmodified',
+    )
+
+# Add all _DATE_FIELDS to _ISSUE_FIELDS_LIST.
+_ISSUE_FIELDS_LIST.extend((date_field, DATE) for date_field in _DATE_FIELDS)
+
+_DATE_FIELD_SUFFIX_TO_OP = {
+    '-after': '>',
+    '-before': '<',
+}
+
+SET_BY_SUFFIX = '-by'
+SET_ON_SUFFIX = '-on'
+APPROVER_SUFFIX = '-approver'
+STATUS_SUFFIX = '-status'
+
+_APPROVAL_SUFFIXES = (
+    SET_BY_SUFFIX,
+    SET_ON_SUFFIX,
+    APPROVER_SUFFIX,
+    STATUS_SUFFIX,
+)
+
+BUILTIN_ISSUE_FIELDS = {
+    f_name: tracker_pb2.FieldDef(field_name=f_name, field_type=f_type)
+    for f_name, f_type in _ISSUE_FIELDS_LIST}
+
+
+# Do not treat strings that start with the below as key:value search terms.
+# See bugs.chromium.org/p/monorail/issues/detail?id=419 for more detail.
+NON_OP_PREFIXES = (
+    'http:',
+    'https:',
+)
+
+
+def ParseUserQuery(
+    query, scope, builtin_fields, harmonized_config, warnings=None,
+    now=None):
+  # type: (str, str, Mapping[str, proto.tracker_pb2.FieldDef],
+  #   proto.tracker_pb2.ProjectIssueConfig, Sequence[str], int) ->
+  #     proto.ast_pb2.QueryAST
+  """Parse a user query and return a set of structure terms.
+
+  Args:
+    query: string with user's query.  E.g., 'Priority=High'.
+    scope: string search terms that define the scope in which the
+        query should be executed.  They are expressed in the same
+        user query language.  E.g., adding the canned query.
+    builtin_fields: dict {field_name: FieldDef(field_name, type)}
+        mapping field names to FieldDef objects for built-in fields.
+    harmonized_config: config for all the projects being searched.
+        @@@ custom field name is not unique in cross project search.
+         - custom_fields = {field_name: [fd, ...]}
+         - query build needs to OR each possible interpretation
+         - could be label in one project and field in another project.
+        @@@ what about searching across all projects?
+    warnings: optional list to accumulate warning messages.
+    now: optional timestamp for tests, otherwise time.time() is used.
+
+  Returns:
+    A QueryAST with conjunctions (usually just one), where each has a list of
+    Condition PBs with op, fields, str_values and int_values.  E.g., the query
+    [priority=high leak OR stars>100] over open issues would return
+    QueryAST(
+      Conjunction(Condition(EQ, [open_fd], [], [1]),
+                  Condition(EQ, [label_fd], ['priority-high'], []),
+                  Condition(TEXT_HAS, any_field_fd, ['leak'], [])),
+      Conjunction(Condition(EQ, [open_fd], [], [1]),
+                  Condition(GT, [stars_fd], [], [100])))
+
+  Raises:
+    InvalidQueryError: If a problem was detected in the user's query.
+  """
+  if warnings is None:
+    warnings = []
+
+  # Convert the overall query into one or more OR'd subqueries.
+  subqueries = QueryToSubqueries(query)
+
+  # Make a dictionary of all fields: built-in + custom in each project.
+  combined_fields = collections.defaultdict(
+      list, {field_name: [field_def]
+             for field_name, field_def in builtin_fields.items()})
+  for fd in harmonized_config.field_defs:
+    if fd.field_type != tracker_pb2.FieldTypes.ENUM_TYPE:
+      # Only do non-enum fields because enums are stored as labels
+      combined_fields[fd.field_name.lower()].append(fd)
+      if fd.field_type == APPROVAL:
+        for approval_suffix in _APPROVAL_SUFFIXES:
+          combined_fields[fd.field_name.lower() + approval_suffix].append(fd)
+
+  conjunctions = [
+      _ParseConjunction(sq, scope, combined_fields, warnings, now=now)
+      for sq in subqueries]
+  return ast_pb2.QueryAST(conjunctions=conjunctions)
+
+
+def _ParseConjunction(subquery, scope, fields, warnings, now=None):
+  # type: (str, str, Mapping[str, proto.tracker_pb2.FieldDef], Sequence[str],
+  #     int) -> proto.ast_pb2.Condition
+  """Parse part of a user query into a Conjunction PB."""
+  scoped_query = ('%s %s' % (scope, subquery)).lower()
+  cond_strs = _ExtractConds(scoped_query, warnings)
+  conds = [_ParseCond(cond_str, fields, warnings, now=now)
+           for cond_str in cond_strs]
+  conds = [cond for cond in conds if cond]
+  return ast_pb2.Conjunction(conds=conds)
+
+
+def _ParseCond(cond_str, fields, warnings, now=None):
+  # type: (str, Mapping[str, proto.tracker_pb2.FieldDef], Sequence[str],
+  #     int) -> proto.ast_pb2.Condition
+  """Parse one user query condition string into a Condition PB."""
+  op_match = OP_RE.match(cond_str)
+  # Do not treat as key:value search terms if any of the special prefixes match.
+  special_prefixes_match = any(
+      cond_str.startswith(p) for p in NON_OP_PREFIXES)
+  if op_match and not special_prefixes_match:
+    prefix = op_match.group('prefix')
+    op = op_match.group('op')
+    val = op_match.group('value')
+    # Special case handling to continue to support old date query terms from
+    # code.google.com. See monorail:151 for more details.
+    if prefix.startswith(_DATE_FIELDS):
+      for date_suffix in _DATE_FIELD_SUFFIX_TO_OP:
+        if prefix.endswith(date_suffix):
+          prefix = prefix.rstrip(date_suffix)
+          op = _DATE_FIELD_SUFFIX_TO_OP[date_suffix]
+    return _ParseStructuredTerm(prefix, op, val, fields, now=now)
+
+  # Treat the cond as a full-text search term, which might be negated.
+  if cond_str.startswith('-'):
+    op = NOT_TEXT_HAS
+    cond_str = cond_str[1:]
+  else:
+    op = TEXT_HAS
+
+  # Construct a full-text Query object as a dry-run to validate that
+  # the syntax is acceptable.
+  try:
+    _fts_query = search.Query(cond_str)
+  except search.QueryError:
+    warnings.append('Ignoring full-text term: %s' % cond_str)
+    return None
+
+  # Flag a potential user misunderstanding.
+  if cond_str.lower() in ('and', 'or', 'not'):
+    warnings.append(
+        'The only supported boolean operator is OR (all capitals).')
+
+  return ast_pb2.MakeCond(
+      op, [BUILTIN_ISSUE_FIELDS[ast_pb2.ANY_FIELD]], [cond_str], [])
+
+
+def _ParseStructuredTerm(prefix, op_str, value, fields, now=None):
+  # type: (str, str, str, Mapping[str, proto.tracker_pb2.FieldDef]) ->
+  #     proto.ast_pb2.Condition
+  """Parse one user structured query term into an internal representation.
+
+  Args:
+    prefix: The query operator, usually a field name.  E.g., summary. It can
+      also be special operators like "is" to test boolean fields.
+    op_str: the comparison operator.  Usually ":" or "=", but can be any OPS.
+    value: the value to compare against, e.g., term to find in that field.
+    fields: dict {name_lower: [FieldDef, ...]} for built-in and custom fields.
+    now: optional timestamp for tests, otherwise time.time() is used.
+
+  Returns:
+    A Condition PB.
+  """
+  unquoted_value = value.strip('"')
+  # Quick-OR is a convenient way to write one condition that matches any one of
+  # multiple values, like set membership.  E.g., [Priority=High,Critical].
+  # Ignore empty values caused by duplicated or trailing commas. E.g.,
+  # [Priority=High,,Critical,] is equivalent to [Priority=High,Critical].
+  quick_or_vals = [v.strip() for v in unquoted_value.split(',') if v.strip()]
+
+  op = OPS[op_str]
+  negate = False
+  if prefix.startswith('-'):
+    negate = True
+    op = NEGATED_OPS.get(op, op)
+    prefix = prefix[1:]
+
+  if prefix == 'is' and unquoted_value in [
+      'open', 'blocked', 'spam', 'ownerbouncing']:
+    return ast_pb2.MakeCond(
+        NE if negate else EQ, fields[unquoted_value], [], [])
+
+  # Search entries with or without any value in the specified field.
+  if prefix == 'has':
+    op = IS_NOT_DEFINED if negate else IS_DEFINED
+    if '.' in unquoted_value:  # Possible search for phase field with any value.
+      phase_name, possible_field = unquoted_value.split('.', 1)
+      if possible_field in fields:
+        return ast_pb2.MakeCond(
+            op, fields[possible_field], [], [], phase_name=phase_name)
+    elif unquoted_value in fields:  # Look for that field with any value.
+      return ast_pb2.MakeCond(op, fields[unquoted_value], [], [])
+    else:  # Look for any label with that prefix.
+      return ast_pb2.MakeCond(op, fields['label'], [unquoted_value], [])
+
+  # Search entries with certain gates.
+  if prefix == 'gate':
+    return ast_pb2.MakeCond(op, fields['gate'], quick_or_vals, [])
+
+  # Determine hotlist query type.
+  # If prefix is not 'hotlist', quick_or_vals is empty, or qov
+  # does not contain ':', is_fields will remain True
+  is_fields = True
+  if prefix == 'hotlist':
+    try:
+      if ':' not in quick_or_vals[0]:
+        is_fields = False
+    except IndexError:
+      is_fields = False
+
+  phase_name = None
+  if '.' in prefix and is_fields:
+    split_prefix = prefix.split('.', 1)
+    if split_prefix[1] in fields:
+      phase_name, prefix = split_prefix
+
+  # search built-in and custom fields. E.g., summary.
+  if prefix in fields and is_fields:
+    # Note: if first matching field is date-type, we assume they all are.
+    # TODO(jrobbins): better handling for rare case where multiple projects
+    # define the same custom field name, and one is a date and another is not.
+    first_field = fields[prefix][0]
+    if first_field.field_type == DATE:
+      date_values = [_ParseDateValue(val, now=now) for val in quick_or_vals]
+      return ast_pb2.MakeCond(op, fields[prefix], [], date_values)
+    elif first_field.field_type == APPROVAL and prefix.endswith(SET_ON_SUFFIX):
+      date_values = [_ParseDateValue(val, now=now) for val in quick_or_vals]
+      return ast_pb2.MakeCond(
+          op,
+          fields[prefix], [],
+          date_values,
+          key_suffix=SET_ON_SUFFIX,
+          phase_name=phase_name)
+    else:
+      quick_or_ints = []
+      for qov in quick_or_vals:
+        try:
+          quick_or_ints.append(int(qov))
+        except ValueError:
+          pass
+      if first_field.field_type == APPROVAL:
+        for approval_suffix in _APPROVAL_SUFFIXES:
+          if prefix.endswith(approval_suffix):
+            return ast_pb2.MakeCond(op, fields[prefix], quick_or_vals,
+                                    quick_or_ints, key_suffix=approval_suffix,
+                                    phase_name=phase_name)
+      return ast_pb2.MakeCond(op, fields[prefix], quick_or_vals,
+                              quick_or_ints, phase_name=phase_name)
+
+  # Since it is not a field, treat it as labels, E.g., Priority.
+  quick_or_labels = ['%s-%s' % (prefix, v) for v in quick_or_vals]
+  # Convert substring match to key-value match if user typed 'foo:bar'.
+  if op == TEXT_HAS:
+    op = KEY_HAS
+  return ast_pb2.MakeCond(op, fields['label'], quick_or_labels, [])
+
+
+def _ExtractConds(query, warnings):
+  # type: (str, Sequence[str]) -> Sequence[str]
+  """Parse a query string into a list of individual condition strings.
+
+  Args:
+    query: UTF-8 encoded search query string.
+    warnings: list to accumulate warning messages.
+
+  Returns:
+    A list of query condition strings.
+  """
+  # Convert to unicode then search for distinct terms.
+  term_matches = TERM_RE.findall(query)
+
+  terms = []
+  for (phrase, word_label, _op1, phrase_label, _op2,
+       word) in term_matches:
+    # Case 1: Quoted phrases, e.g., ["hot dog"].
+    if phrase_label or phrase:
+      terms.append(phrase_label or phrase)
+
+    # Case 2: Comparisons
+    elif word_label:
+      special_prefixes_match = any(
+          word_label.startswith(p) for p in NON_OP_PREFIXES)
+      match = OP_RE.match(word_label)
+      if match and not special_prefixes_match:
+        label = match.group('prefix')
+        op = match.group('op')
+        word = match.group('value')
+        terms.append('%s%s"%s"' % (label, op, word))
+      else:
+        # It looked like a key:value cond, but not exactly, so treat it
+        # as fulltext search.  It is probably a tiny bit of source code.
+        terms.append('"%s"' % word_label)
+
+    # Case 3: Simple words.
+    elif word:
+      terms.append(word)
+
+    else:  # pragma: no coverage
+      warnings.append('Unparsable search term')
+
+  return terms
+
+
+def _ParseDateValue(val, now=None):
+  # type: (str, int) -> int
+  """Convert the user-entered date into timestamp."""
+  # Support timestamp value such as opened>1437671476
+  try:
+    return int(val)
+  except ValueError:
+    pass
+
+  # TODO(jrobbins): future: take timezones into account.
+  # TODO(jrobbins): for now, explain to users that "today" is
+  # actually now: the current time, not 12:01am in their timezone.
+  # In fact, it is not very useful because everything in the system
+  # happened before the current time.
+  if val == 'today':
+    return _CalculatePastDate(0, now=now)
+  elif val.startswith('today-'):
+    try:
+      days_ago = int(val.split('-')[1])
+    except ValueError:
+      raise InvalidQueryError('Could not parse date: ' + val)
+    return _CalculatePastDate(days_ago, now=now)
+
+  try:
+    if '/' in val:
+      year, month, day = [int(x) for x in val.split('/')]
+    elif '-' in val:
+      year, month, day = [int(x) for x in val.split('-')]
+    else:
+      raise InvalidQueryError('Could not parse date: ' + val)
+  except ValueError:
+    raise InvalidQueryError('Could not parse date: ' + val)
+
+  try:
+    return int(time.mktime(datetime.datetime(year, month, day).timetuple()))
+  except ValueError:
+    raise InvalidQueryError('Could not parse date: ' + val)
+
+
+def _CalculatePastDate(days_ago, now=None):
+  # type: (int, int) -> int
+  """Calculates the timestamp N days ago from now."""
+  if now is None:
+    now = int(time.time())
+  ts = now - days_ago * 24 * 60 * 60
+  return ts
+
+
+def QueryToSubqueries(query):
+  # type (str) -> Sequence[str]
+  """Splits a query into smaller queries based on Monorail's search syntax.
+
+  This function handles parsing parentheses and OR statements in Monorail's
+  search syntax. By doing this parsing for OR statements and parentheses up
+  front in FrontendSearchPipeline, we are able to convert complex queries
+  with lots of ORs into smaller, more easily cacheable query terms.
+
+  These outputted subqueries should collectively return the same query results
+  as the initial input query without containing any ORs or parentheses,
+  allowing later search layers to parse queries without worrying about ORs
+  or parentheses.
+
+  Some examples of possible queries and their expected output:
+
+  - '(A OR B) (C OR D) OR (E OR F)' -> ['A C', 'A D', 'B C', 'B D', 'E', 'F']
+  - '(A) OR (B)' -> ['A', 'B']
+  - '(A ((C) OR (D OR (E OR F))))' -> ['A C', 'A D', 'A E', 'A F']
+
+  Where A, B, C, D, etc could be any list of conjunctions. ie: "owner:me",
+  "Pri=1", "hello world Hotlist=test", "label!=a11y", etc
+
+  Note: Monorail implicitly ANDs any query terms separated by a space. For
+  the most part, AND functionality is handled at a later layer in search
+  processing. However, this case becomes important here when considering the
+  fact that a prentheses group can either be ANDed or ORed with terms that
+  surround it.
+
+  The _MultiplySubqueries helper is used to AND the results of different
+  groups together whereas concatenating lists is used to OR subqueries
+  together.
+
+  Args:
+    query: The initial query that was sent to the search.
+
+  Returns:
+    List of query fragments to be independently processed as search terms.
+
+  Raises:
+    InvalidQueryError if parentheses are unmatched.
+  """
+  tokens = _ValidateAndTokenizeQuery(query)
+
+  # Using an iterator allows us to keep our current loop position across
+  # helpers. This makes recursion a lot easier.
+  token_iterator = PeekIterator(tokens)
+
+  subqueries = _ParseQuery(token_iterator)
+
+  if not len(subqueries):
+    # Several cases, such as an empty query or a query with only parentheses
+    # will result in an empty set of subqueries. In these cases, we still want
+    # to give the search pipeline a single empty query to process.
+    return ['']
+
+  return subqueries
+
+
+def _ParseQuery(token_iterator):
+  # type (Sequence[proto.ast_pb2.QueryToken]) -> Sequence[str]
+  """Recursive helper to convert query tokens into a list of subqueries.
+
+  Parses a Query based on the following grammar (EBNF):
+
+    Query             := OrGroup { [OrOperator] OrGroup }
+    OrGroup           := AndGroup { AndGroup }
+    AndGroup          := Subquery | ParenthesesGroup
+    ParenthesesGroup  := "(" Query ")"
+    Subquery          := /.+/
+    OrOperator        := " OR "
+
+  An important nuance is that two groups can be next to each other, separated
+  only by a word boundary (ie: space or parentheses). In this case, they are
+  implicitly ANDed. In practice, because unparenthesized fragments ANDed by
+  spaces are stored as single tokens, we only need to handle the AND case when
+  a parentheses group is implicitly ANDed with an adjacent group.
+
+  Order of precedence is implemented by recursing through OR groups before
+  recursing through AND groups.
+
+  Args:
+    token_iterator: Iterator over a list of query tokens.
+
+  Returns:
+    List of query fragments to be processed as search terms.
+
+  Raises:
+    InvalidQueryError if tokens were inputted in a format that does not follow
+    our search grammar.
+  """
+  subqueries = []
+  try:
+    if token_iterator.peek().token_type == OR:
+      # Edge case: Ignore empty OR groups at the starte of a ParenthesesGroup.
+      # ie: "(OR A)" will be processed as "A"
+      next(token_iterator)
+
+    subqueries = _ParseOrGroup(token_iterator)
+
+    while token_iterator.peek().token_type == OR:
+      # Consume the OR tokens without doing anything with it.
+      next(token_iterator)
+
+      next_token = token_iterator.peek()
+      if next_token.token_type == RIGHT_PAREN:
+        # Edge case: Ignore empty OR groups at the end of a ParenthesesGroup.
+        # ie: "(A OR)" will be processed as "A"
+        return subqueries
+
+      next_subqueries = _ParseOrGroup(token_iterator)
+
+      # Concatenate results of OR groups together.
+      subqueries = subqueries + next_subqueries
+
+  except StopIteration:
+    pass
+  # Return when we've reached the end of the string.
+  return subqueries
+
+
+def _ParseOrGroup(token_iterator):
+  # type (Sequence[proto.ast_pb2.QueryToken]) -> Sequence[str]
+  """Recursive helper to convert a single "OrGroup" into subqueries.
+
+  An OrGroup here is based on the following grammar:
+
+    Query             := OrGroup { [OrOperator] OrGroup }
+    OrGroup           := AndGroup { AndGroup }
+    AndGroup          := Subquery | ParenthesesGroup
+    ParenthesesGroup  := "(" Query ")"
+    Subquery          := /.+/
+    OrOperator        := " OR "
+
+  Args:
+    token_iterator: Iterator over a list of query tokens.
+
+  Returns:
+    List of query fragments to be processed as search terms.
+
+  Raises:
+    InvalidQueryError if tokens were inputted in a format that does not follow
+    our search grammar.
+  """
+  subqueries = _ParseAndGroup(token_iterator)
+
+  try:
+    # Iterate until there are no more AND groups left to see.
+    # Subquery or left parentheses are the possible starts of an AndGroup.
+    while (token_iterator.peek().token_type == SUBQUERY or
+           token_iterator.peek().token_type == LEFT_PAREN):
+
+      # Find subqueries from the next AND group.
+      next_subqueries = _ParseAndGroup(token_iterator)
+
+      # Multiply all results across AND groups together.
+      subqueries = _MultiplySubqueries(subqueries, next_subqueries)
+  except StopIteration:
+    pass
+
+  return subqueries
+
+
+def _ParseAndGroup(token_iterator):
+  # type (Sequence[proto.ast_pb2.QueryToken]) -> Sequence[str]
+  """Recursive helper to convert a single "AndGroup" into subqueries.
+
+  An OrGroup here is based on the following grammar:
+
+    Query             := OrGroup { [OrOperator] OrGroup }
+    OrGroup           := AndGroup { AndGroup }
+    AndGroup          := Subquery | ParenthesesGroup
+    ParenthesesGroup  := "(" Query ")"
+    Subquery          := /.+/
+    OrOperator        := " OR "
+
+  Args:
+    token_iterator: Iterator over a list of query tokens.
+
+  Returns:
+    List of query fragments to be processed as search terms.
+
+  Raises:
+    InvalidQueryError if tokens were inputted in a format that does not follow
+    our search grammar.
+  """
+  try:
+    token = next(token_iterator)
+    if token.token_type == LEFT_PAREN:
+      if token_iterator.peek().token_type == RIGHT_PAREN:
+        # Don't recurse into the ParenthesesGroup if there's nothing inside.
+        next(token_iterator)
+        return []
+
+      # Recurse into the ParenthesesGroup.
+      subqueries = _ParseQuery(token_iterator)
+
+      # Next token should be a right parenthesis.
+      next(token_iterator)
+
+      return subqueries
+    elif token.token_type == SUBQUERY:
+      return [token.value]
+    else:
+      # This should not happen if other QueryToSubqueries helpers are working
+      # properly.
+      raise InvalidQueryError('Inputted tokens do not follow grammar.')
+  except StopIteration:
+    pass
+  return []
+
+
+def _ValidateAndTokenizeQuery(query):
+  # type: (str) -> Sequence[proto.ast_pb2.QueryToken]
+  """Converts the input query into a set of tokens for easier parsing.
+
+  Tokenizing the query string before parsing allows us to not have to as many
+  string manipulations while parsing, which simplifies our later code.
+
+  Args:
+    query: Query to tokenize.
+
+  Returns:
+    List of Token objects for use in query processing.
+
+  Raises:
+    InvalidQueryError if parentheses are unmatched.
+  """
+  tokens = []  # Function result
+  count = 0  # Used for checking if parentheses are balanced
+  s = ''  # Records current string fragment. Cleared when a token is added.
+
+  for ch in query:
+    if ch == '(':
+      count += 1
+
+      # Add subquery from before we hit this parenthesis.
+      tokens.extend(_TokenizeSubqueryOnOr(s))
+      s = ''
+
+      tokens.append(ast_pb2.QueryToken(token_type=LEFT_PAREN))
+    elif ch == ')':
+      count -= 1
+
+      if count < 0:
+        # More closing parentheses then open parentheses.
+        raise InvalidQueryError('Search query has unbalanced parentheses.')
+
+      # Add subquery from before we hit this parenthesis.
+      tokens.extend(_TokenizeSubqueryOnOr(s))
+      s = ''
+
+      tokens.append(ast_pb2.QueryToken(token_type=RIGHT_PAREN))
+    else:
+      s += ch
+
+  if count != 0:
+    raise InvalidQueryError('Search query has unbalanced parentheses.')
+
+  # Add any trailing tokens.
+  tokens.extend(_TokenizeSubqueryOnOr(s))
+
+  return tokens
+
+
+def _TokenizeSubqueryOnOr(subquery):
+  # type: (str) -> Sequence[proto.ast_pb2.QueryToken]
+  """Helper to split a subquery by OR and convert the result into tokens.
+
+  Args:
+    subquery: A string without parentheses to tokenize.
+
+  Returns:
+    Tokens for the subquery with OR tokens separating query strings if
+    applicable.
+  """
+  if len(subquery) == 0:
+    return []
+
+  result = []
+  fragments = subquery.split(OR_SYMBOL)
+  for f in fragments:
+    # Interleave the string fragments with OR tokens.
+    result.append(ast_pb2.QueryToken(token_type=SUBQUERY, value=f.strip()))
+    result.append(ast_pb2.QueryToken(token_type=OR))
+
+  # Remove trailing OR.
+  result.pop()
+
+  # Trim empty strings at the beginning or end. ie: if subquery is ' OR ',
+  # we want the list to be ['OR'], not ['', 'OR', ''].
+  if len(result) > 1 and result[0].value == '':
+    result.pop(0)
+  if len(result) > 1 and result[-1].value == '':
+    result.pop()
+  return result
+
+
+def _MultiplySubqueries(a, b):
+  # type: (Sequence[str], Sequence[str]) -> Sequence[str]
+  """Helper to AND subqueries from two separate lists.
+
+  Args:
+    a: First list of subqueries.
+    b: Second list of subqueries.
+
+  Returns:
+    List with n x m subqueries.
+  """
+  if not len(a):
+    return b
+  if not len(b):
+    return a
+  res = []
+  for q1 in a:
+    for q2 in b:
+      # AND two subqueries together by concatenating them.
+      query = (q1.strip() + ' ' + q2.strip()).strip()
+      res.append(query)
+  return res
+
+
+class PeekIterator:
+  """Simple iterator with peek() functionality.
+
+  Used by QueryToSubqueries to maintain state easily across recursive calls.
+  """
+
+  def __init__(self, source):
+    # type: (Sequence[Any])
+    self.__source = source
+    self.__i = 0
+
+  def peek(self):
+    # type: () -> Any
+    """Gets the next value in the iterator without side effects.
+
+    Returns:
+      Next value in iterator.
+
+    Raises:
+      StopIteration if you're at the end of the iterator.
+    """
+    if self.__i >= len(self.__source):
+      raise StopIteration
+    return self.__source[self.__i]
+
+  def __iter__(self):
+    # type: () -> Sequence[Any]
+    """Return self to make iterator iterable."""
+    return self
+
+  def __repr__(self):
+    # type: () -> str
+    """Allow logging current iterator value for debugging."""
+    try:
+      return str(self.peek())
+    except StopIteration:
+      pass
+    return 'End of PeekIterator'
+
+  def next(self):
+    # type: () -> Any
+    """Gets the next value in the iterator and increments pointer.
+
+    Returns:
+      Next value in iterator.
+
+    Raises:
+      StopIteration if you're at the end of the iterator.
+    """
+    if self.__i >= len(self.__source):
+      raise StopIteration
+    value = self.__source[self.__i]
+    self.__i += 1
+    return value
+
+
+class Error(Exception):
+  """Base exception class for this package."""
+  pass
+
+
+class InvalidQueryError(Error):
+  """Error raised when an invalid query is requested."""
+  pass
diff --git a/search/search_helpers.py b/search/search_helpers.py
new file mode 100644
index 0000000..0b3beb8
--- /dev/null
+++ b/search/search_helpers.py
@@ -0,0 +1,41 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+RESTRICT_VIEW_PATTERN = 'restrict-view-%'
+
+
+def GetPersonalAtRiskLabelIDs(
+  cnxn, user, config_svc, effective_ids, project, perms):
+  """Return list of label_ids for restriction labels that user can't view.
+
+  Args:
+    cnxn: An instance of MonorailConnection.
+    user: User PB for the signed in user making the request, or None for anon.
+    config_svc: An instance of ConfigService.
+    effective_ids: The effective IDs of the current user.
+    project: A project object for the current project.
+    perms: A PermissionSet for the current user.
+  Returns:
+    A list of LabelDef IDs the current user is forbidden to access.
+  """
+  if user and user.is_site_admin:
+    return []
+
+  at_risk_label_ids = []
+  label_def_rows = config_svc.GetLabelDefRowsAnyProject(
+    cnxn, where=[('LOWER(label) LIKE %s', [RESTRICT_VIEW_PATTERN])])
+
+  for label_id, _pid, _rank, label, _docstring, _hidden in label_def_rows:
+    label_lower = label.lower()
+    needed_perm = label_lower.split('-', 2)[-1]
+
+    if not perms.CanUsePerm(needed_perm, effective_ids, project, []):
+      at_risk_label_ids.append(label_id)
+
+  return at_risk_label_ids
diff --git a/search/searchpipeline.py b/search/searchpipeline.py
new file mode 100644
index 0000000..422a619
--- /dev/null
+++ b/search/searchpipeline.py
@@ -0,0 +1,90 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Helper functions and classes used in issue search and sorting."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import logging
+import re
+
+from features import savedqueries_helpers
+from search import query2ast
+from services import tracker_fulltext
+from services import fulltext_helpers
+from tracker import tracker_helpers
+
+
+# Users can use "is:starred" in queries to limit
+# search results to issues starred by that user.
+IS_STARRED_RE = re.compile(r'\b(?![-@.:])is:starred\b(?![-@.:])', re.I)
+
+# Users can use "me" in other fields to refer to the logged in user name.
+KEYWORD_ME_RE = re.compile(r'\b[-_a-z0-9]+[=:]me\b(?![-@.:=])', re.I)
+ME_RE = re.compile(r'(?<=[=:])me\b(?![-@.:=])', re.I)
+
+
+def _AccumulateIssueProjectsAndConfigs(
+    cnxn, project_dict, config_dict, services, issues):
+  """Fetch any projects and configs that we need but haven't already loaded.
+
+  Args:
+    cnxn: connection to SQL database.
+    project_dict: dict {project_id: project} of projects that we have
+        already retrieved.
+    config_dict: dict {project_id: project} of configs that we have
+        already retrieved.
+    services: connections to backends.
+    issues: list of issues, which may be parts of different projects.
+
+  Returns:
+    Nothing, but projects_dict will be updated to include all the projects that
+    contain the given issues, and config_dicts will be updated to incude all
+    the corresponding configs.
+  """
+  new_ids = {issue.project_id for issue in issues}
+  new_ids.difference_update(iter(project_dict.keys()))
+  new_projects_dict = services.project.GetProjects(cnxn, new_ids)
+  project_dict.update(new_projects_dict)
+  new_configs_dict = services.config.GetProjectConfigs(cnxn, new_ids)
+  config_dict.update(new_configs_dict)
+
+
+def ReplaceKeywordsWithUserIDs(me_user_ids, query):
+  """Substitutes User ID in terms such as is:starred and me.
+
+  This is done on the query string before it is parsed because the query string
+  is used as a key for cached search results in memcache.  A search for by one
+  user for owner:me should not retrieve results stored for some other user.
+
+  Args:
+    me_user_ids: [] when no user is logged in, or user ID of the logged in
+        user when doing an interactive search, or the viewed user ID when
+        viewing someone else's dashboard, or the subscribing user's ID when
+        evaluating subscriptions.  Also contains linked account IDs.
+    query: The query string.
+
+  Returns:
+    A pair (query, warnings) where query is a string with "me" and "is:starred"
+    removed or replaced by new terms that use the numeric user ID provided,
+    and warnings is a list of warning strings to display to the user.
+  """
+  warnings = []
+  if me_user_ids:
+    me_user_ids_str = ','.join(str(uid) for uid in me_user_ids)
+    star_term = 'starredby:%s' % me_user_ids_str
+    query = IS_STARRED_RE.sub(star_term, query)
+    if KEYWORD_ME_RE.search(query):
+      query = ME_RE.sub(me_user_ids_str, query)
+  else:
+    if IS_STARRED_RE.search(query):
+      warnings.append('"is:starred" ignored because you are not signed in.')
+      query = IS_STARRED_RE.sub('', query)
+    if KEYWORD_ME_RE.search(query):
+      warnings.append('"me" keyword ignored because you are not signed in.')
+      query = KEYWORD_ME_RE.sub('', query)
+
+  return query, warnings
diff --git a/search/test/__init__.py b/search/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/search/test/__init__.py
diff --git a/search/test/ast2ast_test.py b/search/test/ast2ast_test.py
new file mode 100644
index 0000000..9edeaf1
--- /dev/null
+++ b/search/test/ast2ast_test.py
@@ -0,0 +1,785 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Tests for the ast2ast module."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import unittest
+
+from proto import ast_pb2
+from proto import tracker_pb2
+from search import ast2ast
+from search import query2ast
+from services import service_manager
+from testing import fake
+from tracker import tracker_bizobj
+
+
+BUILTIN_ISSUE_FIELDS = query2ast.BUILTIN_ISSUE_FIELDS
+ANY_FIELD = query2ast.BUILTIN_ISSUE_FIELDS['any_field']
+OWNER_FIELD = query2ast.BUILTIN_ISSUE_FIELDS['owner']
+OWNER_ID_FIELD = query2ast.BUILTIN_ISSUE_FIELDS['owner_id']
+
+
+class AST2ASTTest(unittest.TestCase):
+
+  def setUp(self):
+    self.cnxn = 'fake cnxn'
+    self.config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)
+    self.config.component_defs.append(
+        tracker_bizobj.MakeComponentDef(
+            101, 789, 'UI', 'doc', False, [], [], 0, 0))
+    self.config.component_defs.append(
+        tracker_bizobj.MakeComponentDef(
+            102, 789, 'UI>Search', 'doc', False, [], [], 0, 0))
+    self.config.component_defs.append(
+        tracker_bizobj.MakeComponentDef(
+            201, 789, 'DB', 'doc', False, [], [], 0, 0))
+    self.config.component_defs.append(
+        tracker_bizobj.MakeComponentDef(
+            301, 789, 'Search', 'doc', False, [], [], 0, 0))
+    self.services = service_manager.Services(
+        user=fake.UserService(),
+        project=fake.ProjectService(),
+        issue=fake.IssueService(),
+        config=fake.ConfigService(),
+        features=fake.FeaturesService())
+    self.services.user.TestAddUser('a@example.com', 111)
+    self.project = self.services.project.TestAddProject(
+        'proj', project_id=100)
+
+  def testPreprocessAST_EmptyAST(self):
+    ast = ast_pb2.QueryAST()  # No conjunctions in it.
+    new_ast = ast2ast.PreprocessAST(
+        self.cnxn, ast, [789], self.services, self.config)
+    self.assertEqual(ast, new_ast)
+
+  def testPreprocessAST_Normal(self):
+    open_field = BUILTIN_ISSUE_FIELDS['open']
+    label_field = BUILTIN_ISSUE_FIELDS['label']
+    label_id_field = BUILTIN_ISSUE_FIELDS['label_id']
+    status_id_field = BUILTIN_ISSUE_FIELDS['status_id']
+    conds = [
+        ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [open_field], [], []),
+        ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [label_field], ['Hot'], [])]
+    self.services.config.TestAddLabelsDict({'Hot': 0})
+
+    ast = ast_pb2.QueryAST()
+    ast.conjunctions.append(ast_pb2.Conjunction(conds=conds))
+    new_ast = ast2ast.PreprocessAST(
+        self.cnxn, ast, [789], self.services, self.config)
+    self.assertEqual(2, len(new_ast.conjunctions[0].conds))
+    new_cond_1, new_cond_2 = new_ast.conjunctions[0].conds
+    self.assertEqual(ast_pb2.QueryOp.NE, new_cond_1.op)
+    self.assertEqual([status_id_field], new_cond_1.field_defs)
+    self.assertEqual([7, 8, 9], new_cond_1.int_values)
+    self.assertEqual([], new_cond_1.str_values)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond_2.op)
+    self.assertEqual([label_id_field], new_cond_2.field_defs)
+    self.assertEqual([0], new_cond_2.int_values)
+    self.assertEqual([], new_cond_2.str_values)
+
+  def testPreprocessIsOpenCond(self):
+    open_field = BUILTIN_ISSUE_FIELDS['open']
+    status_id_field = BUILTIN_ISSUE_FIELDS['status_id']
+
+    # is:open  -> status_id!=closed_status_ids
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [open_field], [], [])
+    new_cond = ast2ast._PreprocessIsOpenCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.NE, new_cond.op)
+    self.assertEqual([status_id_field], new_cond.field_defs)
+    self.assertEqual([7, 8, 9], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    # -is:open  -> status_id=closed_status_ids
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.NE, [open_field], [], [])
+    new_cond = ast2ast._PreprocessIsOpenCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([status_id_field], new_cond.field_defs)
+    self.assertEqual([7, 8, 9], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessBlockedOnCond_WithSingleProjectID(self):
+    blockedon_field = BUILTIN_ISSUE_FIELDS['blockedon']
+    blockedon_id_field = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=1, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected in (
+        (['1'], [101]),  # One existing issue.
+        (['Project1:1'], [101]),  # One existing issue with project prefix.
+        (['1', '2'], [101, 102]),  # Two existing issues.
+        (['3'], [])):  # Non-existant issue.
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blockedon_field], local_ids, [])
+      new_cond = ast2ast._PreprocessBlockedOnCond(
+          self.cnxn, cond, [1], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([blockedon_id_field], new_cond.field_defs)
+      self.assertEqual(expected, new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessBlockedOnCond_WithMultipleProjectIDs(self):
+    blockedon_field = BUILTIN_ISSUE_FIELDS['blockedon']
+    blockedon_id_field = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    self.services.project.TestAddProject('Project2', project_id=2)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=2, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected in (
+        (['Project1:1'], [101]),
+        (['Project1:1', 'Project2:2'], [101, 102])):
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blockedon_field], local_ids, [])
+      new_cond = ast2ast._PreprocessBlockedOnCond(
+          self.cnxn, cond, [1, 2], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([blockedon_id_field], new_cond.field_defs)
+      self.assertEqual(expected, new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessBlockedOnCond_WithMultipleProjectIDs_NoPrefix(self):
+    blockedon_field = BUILTIN_ISSUE_FIELDS['blockedon']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    self.services.project.TestAddProject('Project2', project_id=2)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=2, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids in (['1'], ['1', '2'], ['3']):
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blockedon_field], local_ids, [])
+      with self.assertRaises(ValueError) as cm:
+        ast2ast._PreprocessBlockedOnCond(
+            self.cnxn, cond, [1, 2], self.services, None, True)
+      self.assertEqual(
+          'Searching for issues accross multiple/all projects without '
+          'project prefixes is ambiguous and is currently not supported.',
+          cm.exception.message)
+
+  def testPreprocessBlockedOnCond_WithExternalIssues(self):
+    blockedon_field = BUILTIN_ISSUE_FIELDS['blockedon']
+    blockedon_id_field = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=1, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected_issues, expected_ext_issues in (
+        (['b/1234'], [], ['b/1234']),
+        (['Project1:1', 'b/1234'], [101], ['b/1234']),
+        (['1', 'b/1234', 'b/1551', 'Project1:2'],
+        [101, 102], ['b/1234', 'b/1551'])):
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blockedon_field], local_ids, [])
+      new_cond = ast2ast._PreprocessBlockedOnCond(
+          self.cnxn, cond, [1], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([blockedon_id_field], new_cond.field_defs)
+      self.assertEqual(expected_issues, new_cond.int_values)
+      self.assertEqual(expected_ext_issues, new_cond.str_values)
+
+  def testPreprocessIsBlockedCond(self):
+    blocked_field = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    for input_op, expected_op in (
+        (ast_pb2.QueryOp.EQ, ast_pb2.QueryOp.IS_DEFINED),
+        (ast_pb2.QueryOp.NE, ast_pb2.QueryOp.IS_NOT_DEFINED)):
+      cond = ast_pb2.MakeCond(
+          input_op, [blocked_field], [], [])
+      new_cond = ast2ast._PreprocessIsBlockedCond(
+          self.cnxn, cond, [100], self.services, None, True)
+      self.assertEqual(expected_op, new_cond.op)
+      self.assertEqual([blocked_field], new_cond.field_defs)
+      self.assertEqual([], new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessHasBlockedOnCond(self):
+    blocked_field = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    for op in (ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+      cond = ast_pb2.MakeCond(op, [blocked_field], [], [])
+      new_cond = ast2ast._PreprocessBlockedOnCond(
+          self.cnxn, cond, [100], self.services, None, True)
+      self.assertEqual(op, op)
+      self.assertEqual([blocked_field], new_cond.field_defs)
+      self.assertEqual([], new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessHasBlockingCond(self):
+    blocking_field = BUILTIN_ISSUE_FIELDS['blocking_id']
+    for op in (ast_pb2.QueryOp.IS_DEFINED, ast_pb2.QueryOp.IS_NOT_DEFINED):
+      cond = ast_pb2.MakeCond(op, [blocking_field], [], [])
+      new_cond = ast2ast._PreprocessBlockingCond(
+          self.cnxn, cond, [100], self.services, None, True)
+      self.assertEqual(op, op)
+      self.assertEqual([blocking_field], new_cond.field_defs)
+      self.assertEqual([], new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessBlockingCond_WithSingleProjectID(self):
+    blocking_field = BUILTIN_ISSUE_FIELDS['blocking']
+    blocking_id_field = BUILTIN_ISSUE_FIELDS['blocking_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=1, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected in (
+        (['1'], [101]),  # One existing issue.
+        (['Project1:1'], [101]),  # One existing issue with project prefix.
+        (['1', '2'], [101, 102]),  # Two existing issues.
+        (['3'], [])):  # Non-existant issue.
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blocking_field], local_ids, [])
+      new_cond = ast2ast._PreprocessBlockingCond(
+          self.cnxn, cond, [1], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([blocking_id_field], new_cond.field_defs)
+      self.assertEqual(expected, new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessBlockingCond_WithMultipleProjectIDs(self):
+    blocking_field = BUILTIN_ISSUE_FIELDS['blocking']
+    blocking_id_field = BUILTIN_ISSUE_FIELDS['blocking_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    self.services.project.TestAddProject('Project2', project_id=2)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=2, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected in (
+        (['Project1:1'], [101]),
+        (['Project1:1', 'Project2:2'], [101, 102])):
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blocking_field], local_ids, [])
+      new_cond = ast2ast._PreprocessBlockingCond(
+          self.cnxn, cond, [1, 2], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([blocking_id_field], new_cond.field_defs)
+      self.assertEqual(expected, new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessBlockingCond_WithMultipleProjectIDs_NoPrefix(self):
+    blocking_field = BUILTIN_ISSUE_FIELDS['blocking']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    self.services.project.TestAddProject('Project2', project_id=2)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=2, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids in (['1'], ['1', '2'], ['3']):
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blocking_field], local_ids, [])
+      with self.assertRaises(ValueError) as cm:
+        ast2ast._PreprocessBlockingCond(
+            self.cnxn, cond, [1, 2], self.services, None, True)
+      self.assertEqual(
+          'Searching for issues accross multiple/all projects without '
+          'project prefixes is ambiguous and is currently not supported.',
+          cm.exception.message)
+
+  def testPreprocessBlockingCond_WithExternalIssues(self):
+    blocking_field = BUILTIN_ISSUE_FIELDS['blocking']
+    blocking_id_field = BUILTIN_ISSUE_FIELDS['blocking_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=1, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected_issues, expected_ext_issues in (
+        (['b/1234'], [], ['b/1234']),
+        (['Project1:1', 'b/1234'], [101], ['b/1234']),
+        (['1', 'b/1234', 'b/1551', 'Project1:2'],
+        [101, 102], ['b/1234', 'b/1551'])):
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blocking_field], local_ids, [])
+      new_cond = ast2ast._PreprocessBlockingCond(
+          self.cnxn, cond, [1], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([blocking_id_field], new_cond.field_defs)
+      self.assertEqual(expected_issues, new_cond.int_values)
+      self.assertEqual(expected_ext_issues, new_cond.str_values)
+
+  def testPreprocessMergedIntoCond_WithSingleProjectID(self):
+    field = BUILTIN_ISSUE_FIELDS['mergedinto']
+    id_field = BUILTIN_ISSUE_FIELDS['mergedinto_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=1, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected in (
+        (['1'], [101]),  # One existing issue.
+        (['Project1:1'], [101]),  # One existing issue with project prefix.
+        (['1', '2'], [101, 102]),  # Two existing issues.
+        (['3'], [])):  # Non-existant issue.
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [field], local_ids, [])
+      new_cond = ast2ast._PreprocessMergedIntoCond(
+          self.cnxn, cond, [1], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([id_field], new_cond.field_defs)
+      self.assertEqual(expected, new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessMergedIntoCond_WithExternalIssues(self):
+    blocking_field = BUILTIN_ISSUE_FIELDS['mergedinto']
+    blocking_id_field = BUILTIN_ISSUE_FIELDS['mergedinto_id']
+    self.services.project.TestAddProject('Project1', project_id=1)
+    issue1 = fake.MakeTestIssue(
+        project_id=1, local_id=1, summary='sum', status='new', owner_id=2,
+        issue_id=101)
+    issue2 = fake.MakeTestIssue(
+        project_id=1, local_id=2, summary='sum', status='new', owner_id=2,
+        issue_id=102)
+    self.services.issue.TestAddIssue(issue1)
+    self.services.issue.TestAddIssue(issue2)
+
+    for local_ids, expected_issues, expected_ext_issues in (
+        (['b/1234'], [], ['b/1234']),
+        (['Project1:1', 'b/1234'], [101], ['b/1234']),
+        (['1', 'b/1234', 'b/1551', 'Project1:2'],
+        [101, 102], ['b/1234', 'b/1551'])):
+      cond = ast_pb2.MakeCond(
+          ast_pb2.QueryOp.TEXT_HAS, [blocking_field], local_ids, [])
+      new_cond = ast2ast._PreprocessMergedIntoCond(
+          self.cnxn, cond, [1], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([blocking_id_field], new_cond.field_defs)
+      self.assertEqual(expected_issues, new_cond.int_values)
+      self.assertEqual(expected_ext_issues, new_cond.str_values)
+
+  def testPreprocessIsSpamCond(self):
+    spam_field = BUILTIN_ISSUE_FIELDS['spam']
+    is_spam_field = BUILTIN_ISSUE_FIELDS['is_spam']
+    for input_op, int_values in (
+        (ast_pb2.QueryOp.EQ, [1]), (ast_pb2.QueryOp.NE, [0])):
+      cond = ast_pb2.MakeCond(
+          input_op, [spam_field], [], [])
+      new_cond = ast2ast._PreprocessIsSpamCond(
+          self.cnxn, cond, [789], self.services, None, True)
+      self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+      self.assertEqual([is_spam_field], new_cond.field_defs)
+      self.assertEqual(int_values, new_cond.int_values)
+      self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessStatusCond(self):
+    status_field = BUILTIN_ISSUE_FIELDS['status']
+    status_id_field = BUILTIN_ISSUE_FIELDS['status_id']
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.IS_DEFINED, [status_field], [], [])
+    new_cond = ast2ast._PreprocessStatusCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.IS_DEFINED, new_cond.op)
+    self.assertEqual([status_id_field], new_cond.field_defs)
+    self.assertEqual([], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [status_field], ['New', 'Assigned'], [])
+    new_cond = ast2ast._PreprocessStatusCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([status_id_field], new_cond.field_defs)
+    self.assertEqual([0, 1], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [status_field], [], [])
+    new_cond = ast2ast._PreprocessStatusCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual([], new_cond.int_values)
+
+  def testPrefixRegex(self):
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.IS_DEFINED, [BUILTIN_ISSUE_FIELDS['label']],
+        ['Priority', 'Severity'], [])
+    regex = ast2ast._MakePrefixRegex(cond)
+    self.assertRegexpMatches('Priority-1', regex)
+    self.assertRegexpMatches('Severity-3', regex)
+    self.assertNotRegexpMatches('My-Priority', regex)
+
+  def testKeyValueRegex(self):
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.KEY_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+        ['Type-Feature', 'Type-Security'], [])
+    regex = ast2ast._MakeKeyValueRegex(cond)
+    self.assertRegexpMatches('Type-Feature', regex)
+    self.assertRegexpMatches('Type-Bug-Security', regex)
+    self.assertNotRegexpMatches('Type-Bug', regex)
+    self.assertNotRegexpMatches('Security-Feature', regex)
+
+  def testKeyValueRegex_multipleKeys(self):
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.KEY_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+        ['Type-Bug', 'Security-Bug'], [])
+    with self.assertRaises(ValueError):
+      ast2ast._MakeKeyValueRegex(cond)
+
+  def testWordBoundryRegex(self):
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+        ['Type-Bug'], [])
+    regex = ast2ast._MakeKeyValueRegex(cond)
+    self.assertRegexpMatches('Type-Bug-Security', regex)
+    self.assertNotRegexpMatches('Type-BugSecurity', regex)
+
+  def testPreprocessLabelCond(self):
+    label_field = BUILTIN_ISSUE_FIELDS['label']
+    label_id_field = BUILTIN_ISSUE_FIELDS['label_id']
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.IS_DEFINED, [label_field], ['Priority'], [])
+    new_cond = ast2ast._PreprocessLabelCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.IS_DEFINED, new_cond.op)
+    self.assertEqual([label_id_field], new_cond.field_defs)
+    self.assertEqual([1, 2, 3], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    self.services.config.TestAddLabelsDict(
+        {
+            'Priority-Low': 0,
+            'Priority-High': 1
+        })
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [label_field],
+        ['Priority-Low', 'Priority-High'], [])
+    self.services.config.TestAddLabelsDict(
+        {
+            'Priority-Low': 0,
+            'Priority-High': 1
+        })
+    new_cond = ast2ast._PreprocessLabelCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([label_id_field], new_cond.field_defs)
+    self.assertEqual([0, 1], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.KEY_HAS, [label_field],
+        ['Priority-Low', 'Priority-High'], [])
+    new_cond = ast2ast._PreprocessLabelCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([label_id_field], new_cond.field_defs)
+    self.assertEqual([1, 2, 3], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessComponentCond_QuickOR(self):
+    component_field = BUILTIN_ISSUE_FIELDS['component']
+    component_id_field = BUILTIN_ISSUE_FIELDS['component_id']
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.IS_DEFINED, [component_field], ['UI', 'DB'], [])
+    new_cond = ast2ast._PreprocessComponentCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.IS_DEFINED, new_cond.op)
+    self.assertEqual([component_id_field], new_cond.field_defs)
+    self.assertEqual([101, 102, 201], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [component_field], ['UI', 'DB'], [])
+    new_cond = ast2ast._PreprocessComponentCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([component_id_field], new_cond.field_defs)
+    self.assertEqual([101, 102, 201], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [component_field], [], [])
+    new_cond = ast2ast._PreprocessComponentCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual([], new_cond.int_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [component_field], ['unknown@example.com'],
+        [])
+    new_cond = ast2ast._PreprocessComponentCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual([], new_cond.int_values)
+
+  def testPreprocessComponentCond_RootedAndNonRooted(self):
+    component_field = BUILTIN_ISSUE_FIELDS['component']
+    component_id_field = BUILTIN_ISSUE_FIELDS['component_id']
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [component_field], ['UI'], [])
+    new_cond = ast2ast._PreprocessComponentCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([component_id_field], new_cond.field_defs)
+    self.assertEqual([101, 102], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [component_field], ['UI'], [])
+    new_cond = ast2ast._PreprocessComponentCond(
+        self.cnxn, cond, [789], self.services, self.config, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([component_id_field], new_cond.field_defs)
+    self.assertEqual([101], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+  def testPreprocessExactUsers_IsDefined(self):
+    """Anyone can search for [has:owner]."""
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.IS_DEFINED, [OWNER_FIELD], ['a@example.com'], [])
+    new_cond = ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], True)
+    self.assertEqual(ast_pb2.QueryOp.IS_DEFINED, new_cond.op)
+    self.assertEqual([OWNER_ID_FIELD], new_cond.field_defs)
+    self.assertEqual([], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    # Non-members do not raise an exception.
+    ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], False)
+
+
+  def testPreprocessExactUsers_UserFound(self):
+    """Anyone can search for a know user, [owner:user@example.com]."""
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [OWNER_FIELD], ['a@example.com'], [])
+    new_cond = ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([OWNER_ID_FIELD], new_cond.field_defs)
+    self.assertEqual([111], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    # Non-members do not raise an exception.
+    ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], False)
+
+  def testPreprocessExactUsers_UserSpecifiedByID(self):
+    """Anyone may search for users by ID, [owner:1234]."""
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [OWNER_FIELD], ['123'], [])
+    new_cond = ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual([OWNER_ID_FIELD], new_cond.field_defs)
+    self.assertEqual([123], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    # Non-members do not raise an exception.
+    ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], False)
+
+  def testPreprocessExactUsers_NonEquality(self):
+    """Project members may search for [owner_id>111]."""
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.GE, [OWNER_ID_FIELD], ['111'], [])
+    new_cond = ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], True)
+    self.assertEqual(cond, new_cond)
+
+    with self.assertRaises(ast2ast.MalformedQuery):
+      ast2ast._PreprocessExactUsers(
+          self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], False)
+
+  def testPreprocessExactUsers_UserNotFound(self):
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [OWNER_FIELD], ['unknown@example.com'], [])
+    new_cond = ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], True)
+    self.assertEqual(cond, new_cond)
+
+    with self.assertRaises(ast2ast.MalformedQuery):
+      ast2ast._PreprocessExactUsers(
+          self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], False)
+
+  def testPreprocessExactUsers_KeywordMe(self):
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [OWNER_FIELD], ['me'], [])
+    new_cond = ast2ast._PreprocessExactUsers(
+        self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], True)
+    self.assertEqual(cond, new_cond)
+
+    new_cond = ast2ast._PreprocessExactUsers(
+          self.cnxn, cond, self.services.user, [OWNER_ID_FIELD], False)
+    self.assertEqual(cond, new_cond)
+
+  def testPreprocessHotlistCond(self):
+    hotlist_field = BUILTIN_ISSUE_FIELDS['hotlist']
+    hotlist_id_field = BUILTIN_ISSUE_FIELDS['hotlist_id']
+
+    self.services.user.TestAddUser('gatsby@example.org', 111)
+    self.services.user.TestAddUser('daisy@example.com', 222)
+    self.services.user.TestAddUser('nick@example.org', 333)
+
+    # Setup hotlists
+    self.services.features.TestAddHotlist(
+        'Hotlist1', owner_ids=[111], hotlist_id=10)
+    self.services.features.TestAddHotlist(
+        'Hotlist2', owner_ids=[111], hotlist_id=20)
+    self.services.features.TestAddHotlist(
+        'Hotlist3', owner_ids=[222], hotlist_id=30)
+    self.services.features.TestAddHotlist(
+        'Hotlist4', owner_ids=[222], hotlist_id=40)
+    self.services.features.TestAddHotlist(
+        'Hotlist5', owner_ids=[333], hotlist_id=50)
+    self.services.features.TestAddHotlist(
+        'Hotlist6', owner_ids=[333], hotlist_id=60)
+
+    hotlist_query_vals = [
+        'gatsby@example.org:Hotlist1',
+        'nick@example.org:',
+        'daisy@example.com:Hotlist3', 'Hotlist4']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [hotlist_field], hotlist_query_vals, [])
+    actual = ast2ast._PreprocessHotlistCond(
+        self.cnxn, cond, [1], self.services, None, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, actual.op)
+    self.assertEqual([hotlist_id_field], actual.field_defs)
+    self.assertItemsEqual([10, 30, 40, 50, 60], actual.int_values)
+
+  def testPreprocessHotlistCond_UserNotFound(self):
+    hotlist_field = BUILTIN_ISSUE_FIELDS['hotlist']
+    hotlist_query_vals = ['gatsby@chromium.org:Hotlist1', 'Hotlist3']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [hotlist_field], hotlist_query_vals, [])
+    actual = ast2ast._PreprocessHotlistCond(
+        self.cnxn, cond, [1], self.services, None, True)
+    self.assertEqual(cond, actual)
+
+  def testPreprocessCustomCond_User(self):
+    fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='TPM',
+        field_type=tracker_pb2.FieldTypes.USER_TYPE)
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['a@example.com'], [])
+    new_cond = ast2ast._PreprocessCustomCond(
+        self.cnxn, cond, self.services, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual(cond.field_defs, new_cond.field_defs)
+    self.assertEqual([111], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['111'], [])
+    new_cond = ast2ast._PreprocessCustomCond(
+        self.cnxn, cond, self.services, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual(cond.field_defs, new_cond.field_defs)
+    self.assertEqual([111], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['unknown@example.com'], [])
+    new_cond = ast2ast._PreprocessCustomCond(
+        self.cnxn, cond, self.services, True)
+    self.assertEqual(cond, new_cond)
+
+  def testPreprocessCustomCond_NonUser(self):
+    fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='TPM',
+        field_type=tracker_pb2.FieldTypes.INT_TYPE)
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['foo'], [123])
+    new_cond = ast2ast._PreprocessCustomCond(
+        self.cnxn, cond, self.services, True)
+    self.assertEqual(cond, new_cond)
+
+    fd.field_type = tracker_pb2.FieldTypes.STR_TYPE
+    new_cond = ast2ast._PreprocessCustomCond(
+        self.cnxn, cond, self.services, True)
+    self.assertEqual(cond, new_cond)
+
+  def testPreprocessCustomCond_ApprovalUser(self):
+    fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='UXReview',
+        field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['a@example.com'], [],
+        key_suffix=query2ast.APPROVER_SUFFIX)
+    new_cond = ast2ast._PreprocessCustomCond(
+        self.cnxn, cond, self.services, True)
+    self.assertEqual(ast_pb2.QueryOp.EQ, new_cond.op)
+    self.assertEqual(cond.field_defs, new_cond.field_defs)
+    self.assertEqual([111], new_cond.int_values)
+    self.assertEqual([], new_cond.str_values)
+    self.assertEqual(query2ast.APPROVER_SUFFIX, new_cond.key_suffix)
+
+  def testPreprocessCond_NoChange(self):
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.TEXT_HAS, [ANY_FIELD], ['foo'], [])
+    self.assertEqual(
+        cond, ast2ast._PreprocessCond(self.cnxn, cond, [], None, None, True))
+
+  def testTextOpToIntOp(self):
+    self.assertEqual(ast_pb2.QueryOp.EQ,
+                     ast2ast._TextOpToIntOp(ast_pb2.QueryOp.TEXT_HAS))
+    self.assertEqual(ast_pb2.QueryOp.EQ,
+                     ast2ast._TextOpToIntOp(ast_pb2.QueryOp.KEY_HAS))
+    self.assertEqual(ast_pb2.QueryOp.NE,
+                     ast2ast._TextOpToIntOp(ast_pb2.QueryOp.NOT_TEXT_HAS))
+
+    for enum_name, _enum_id in ast_pb2.QueryOp.to_dict().items():
+      no_change_op = ast_pb2.QueryOp(enum_name)
+      if no_change_op not in (
+          ast_pb2.QueryOp.TEXT_HAS,
+          ast_pb2.QueryOp.NOT_TEXT_HAS,
+          ast_pb2.QueryOp.KEY_HAS):
+        self.assertEqual(no_change_op,
+                         ast2ast._TextOpToIntOp(no_change_op))
diff --git a/search/test/ast2select_test.py b/search/test/ast2select_test.py
new file mode 100644
index 0000000..f20d524
--- /dev/null
+++ b/search/test/ast2select_test.py
@@ -0,0 +1,1731 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Tests for the ast2select module."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import datetime
+import time
+import unittest
+
+from framework import sql
+from proto import ast_pb2
+from proto import tracker_pb2
+from search import ast2select
+from search import query2ast
+from tracker import tracker_bizobj
+
+
+BUILTIN_ISSUE_FIELDS = query2ast.BUILTIN_ISSUE_FIELDS
+ANY_FIELD = query2ast.BUILTIN_ISSUE_FIELDS['any_field']
+
+
+class AST2SelectTest(unittest.TestCase):
+
+  def setUp(self):
+    self.config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)
+
+  def testBuildSQLQuery_EmptyAST(self):
+    ast = ast_pb2.QueryAST(conjunctions=[ast_pb2.Conjunction()])  # No conds
+    left_joins, where, unsupported = ast2select.BuildSQLQuery(ast)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([], unsupported)
+
+  def testBuildSQLQuery_Normal(self):
+    owner_field = BUILTIN_ISSUE_FIELDS['owner']
+    reporter_id_field = BUILTIN_ISSUE_FIELDS['reporter_id']
+    conds = [
+        ast_pb2.MakeCond(
+            ast_pb2.QueryOp.TEXT_HAS, [owner_field], ['example.com'], []),
+        ast_pb2.MakeCond(
+            ast_pb2.QueryOp.EQ, [reporter_id_field], [], [111])]
+    ast = ast_pb2.QueryAST(conjunctions=[ast_pb2.Conjunction(conds=conds)])
+    left_joins, where, unsupported = ast2select.BuildSQLQuery(ast)
+    self.assertEqual(
+        [('User AS Cond0 ON (Issue.owner_id = Cond0.user_id '
+          'OR Issue.derived_owner_id = Cond0.user_id)', [])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('(LOWER(Cond0.email) LIKE %s)', ['%example.com%']),
+         ('Issue.reporter_id = %s', [111])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockingIDCond_SingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
+          'Cond1.kind = %s AND Cond1.issue_id = %s',
+          ['blockedon', 1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.dst_issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockingIDCond_NegatedSingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
+          'Cond1.kind = %s AND Cond1.issue_id = %s',
+          ['blockedon', 1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.dst_issue_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockingIDCond_MultiValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1, 2, 3])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
+          'Cond1.kind = %s AND Cond1.issue_id IN (%s,%s,%s)',
+          ['blockedon', 1, 2, 3])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.dst_issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockingIDCond_NegatedMultiValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1, 2, 3])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
+          'Cond1.kind = %s AND Cond1.issue_id IN (%s,%s,%s)',
+          ['blockedon', 1, 2, 3])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.dst_issue_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockingIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    txt_cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [fd], ['b/1'], [])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        txt_cond, 'Cond1', 'Issue1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([txt_cond], unsupported)
+
+  def testBlockingIDCond_ExtIssues(self):
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    ne_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], ['b/1', 'b/2'], [])
+    eq_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['b/1', 'b/2'], [])
+
+    for cond, where_str in [(eq_cond, 'DIR.issue_id IS NOT NULL'),
+      (ne_cond, 'DIR.issue_id IS NULL')]:
+      left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+          cond, 'DIR', 'Issue1', snapshot_mode=False)
+      self.assertEqual(
+          [('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+            'DIR.kind = %s AND DIR.ext_issue_identifier IN (%s,%s)',
+            ['blocking', 'b/1', 'b/2'])],
+          left_joins)
+      self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+      self.assertEqual(
+          [(where_str, [])],
+          where)
+      self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+      self.assertEqual([], unsupported)
+
+  def testBlockingIDCond_CombinedIssues(self):
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['b/1', 'b/2'], [1, 2])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        ('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
+          'Cond1.kind = %s AND Cond1.issue_id IN (%s,%s)',
+          ['blockedon', 1, 2]), left_joins[0])
+    self.assertEqual(
+         ('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+          'DIR.kind = %s AND DIR.ext_issue_identifier IN (%s,%s)',
+          ['blocking', 'b/1', 'b/2']), left_joins[1])
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertTrue(sql._IsValidJoin(left_joins[1][0]))
+    self.assertEqual(
+        [('Cond1.dst_issue_id IS NOT NULL', []),
+        ('DIR.issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertTrue(sql._IsValidWhereCond(where[1][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockedOnIDCond_SingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.kind = %s AND Cond1.dst_issue_id = %s',
+          ['blockedon', 1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockedOnIDCond_NegatedSingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.kind = %s AND Cond1.dst_issue_id = %s',
+          ['blockedon', 1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.issue_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockedOnIDCond_MultiValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1, 2, 3])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s,%s)',
+          ['blockedon', 1, 2, 3])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockedOnIDCond_NegatedMultiValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1, 2, 3])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s,%s)',
+          ['blockedon', 1, 2, 3])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.issue_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testBlockedOnIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    txt_cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [fd], ['b/1'], [])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+        txt_cond, 'Cond1', 'Issue1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([txt_cond], unsupported)
+
+  def testBlockedOnIDCond_ExtIssues(self):
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    eq_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['b/1', 'b/2'], [])
+    ne_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], ['b/1', 'b/2'], [])
+
+    for cond, where_str in [(eq_cond, 'DIR.issue_id IS NOT NULL'),
+      (ne_cond, 'DIR.issue_id IS NULL')]:
+      left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+          cond, 'DIR', 'Issue1', snapshot_mode=False)
+      self.assertEqual(
+          [('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+            'DIR.kind = %s AND DIR.ext_issue_identifier IN (%s,%s)',
+            ['blockedon', 'b/1', 'b/2'])],
+          left_joins)
+      self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+      self.assertEqual(
+          [(where_str, [])],
+          where)
+      self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+      self.assertEqual([], unsupported)
+
+  def testBlockedOnIDCond_CombinedIssues(self):
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['b/1', 'b/2'], [1, 2])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        ('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s)',
+          ['blockedon', 1, 2]), left_joins[0])
+    self.assertEqual(
+         ('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+          'DIR.kind = %s AND DIR.ext_issue_identifier IN (%s,%s)',
+          ['blockedon', 'b/1', 'b/2']), left_joins[1])
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertTrue(sql._IsValidJoin(left_joins[1][0]))
+    self.assertEqual(
+        [('Cond1.issue_id IS NOT NULL', []),
+        ('DIR.issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertTrue(sql._IsValidWhereCond(where[1][0]))
+    self.assertEqual([], unsupported)
+
+  def testMergedIntoIDCond_MultiValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['mergedinto_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1, 2, 3])
+
+    left_joins, where, unsupported = ast2select._ProcessMergedIntoIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s,%s)',
+          ['mergedinto', 1, 2, 3])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testMergedIntoIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['mergedinto_id']
+    txt_cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [fd], ['b/1', 'b/2', 'b/3'], [])
+
+    left_joins, where, unsupported = ast2select._ProcessMergedIntoIDCond(
+        txt_cond, 'Cond1', 'Issue1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([txt_cond], unsupported)
+
+  def testMergedIntoIDCond_ExtIssues(self):
+    fd = BUILTIN_ISSUE_FIELDS['mergedinto_id']
+    eq_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['b/1', 'b/2'], [])
+    ne_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], ['b/1', 'b/2'], [])
+
+    for cond, expected in [(eq_cond, ['b/1', 'b/2']),
+      (ne_cond, ['b/1', 'b/2'])]:
+      left_joins, where, unsupported = ast2select._ProcessMergedIntoIDCond(
+          cond, 'Cond1', 'Issue1', snapshot_mode=False)
+      self.assertEqual(
+          [('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+            'DIR.kind = %s AND DIR.ext_issue_identifier IN (%s,%s)',
+            ['mergedinto'] + expected)],
+          left_joins)
+      self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+      self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+      self.assertEqual([], unsupported)
+
+  def testMergedIntoIDCond_CombinedIssues(self):
+    fd = BUILTIN_ISSUE_FIELDS['mergedinto_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['b/1', 'b/2'], [1, 2])
+
+    left_joins, where, unsupported = ast2select._ProcessMergedIntoIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s)',
+          ['mergedinto', 1, 2]),
+         ('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+          'DIR.kind = %s AND DIR.ext_issue_identifier IN (%s,%s)',
+          ['mergedinto', 'b/1', 'b/2'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.issue_id IS NOT NULL', []),
+        ('DIR.issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testHasBlockedCond(self):
+    for op, expected in ((ast_pb2.QueryOp.IS_DEFINED, 'IS NOT NULL'),
+                         (ast_pb2.QueryOp.IS_NOT_DEFINED, 'IS NULL')):
+      fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+      cond = ast_pb2.MakeCond(op, [fd], [], [])
+
+      left_joins, where, unsupported = ast2select._ProcessBlockedOnIDCond(
+          cond, 'Cond1', None, snapshot_mode=False)
+      self.assertEqual(
+          ('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
+            'Cond1.kind = %s', ['blockedon']),
+          left_joins[0])
+      self.assertEqual(
+          ('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+            'DIR.kind = %s', ['blockedon']),
+          left_joins[1])
+      self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+      self.assertTrue(sql._IsValidJoin(left_joins[1][0]))
+      self.assertEqual([('(Cond1.issue_id %s OR DIR.issue_id %s)'
+          % (expected, expected), [])], where)
+      self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+      self.assertEqual([], unsupported)
+
+  def testHasBlockedCond_SnapshotMode(self):
+    op = ast_pb2.QueryOp.IS_DEFINED
+    fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
+    cond = ast_pb2.MakeCond(op, [fd], [], [])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testHasBlockingCond(self):
+    for op, expected in ((ast_pb2.QueryOp.IS_DEFINED, 'IS NOT NULL'),
+                         (ast_pb2.QueryOp.IS_NOT_DEFINED, 'IS NULL')):
+      fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+      cond = ast_pb2.MakeCond(op, [fd], [], [])
+
+      left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(cond,
+          'Cond1', None, snapshot_mode=False)
+      self.assertEqual(
+          ('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
+            'Cond1.kind = %s', ['blockedon']),
+          left_joins[0])
+      self.assertEqual(
+          ('DanglingIssueRelation AS DIR ON Issue.id = DIR.issue_id AND '
+            'DIR.kind = %s', ['blocking']),
+          left_joins[1])
+      self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+      self.assertTrue(sql._IsValidJoin(left_joins[1][0]))
+      self.assertEqual([('(Cond1.dst_issue_id %s OR DIR.issue_id %s)'
+          % (expected, expected), [])], where)
+      self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+      self.assertEqual([], unsupported)
+
+  def testHasBlockingCond_SnapshotMode(self):
+    op = ast_pb2.QueryOp.IS_DEFINED
+    fd = BUILTIN_ISSUE_FIELDS['blocking_id']
+    cond = ast_pb2.MakeCond(op, [fd], [], [])
+
+    left_joins, where, unsupported = ast2select._ProcessBlockingIDCond(
+        cond, 'Cond1', 'Issue1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessOwnerCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['owner']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessOwnerCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('User AS Cond1 ON (Issue.owner_id = Cond1.user_id '
+          'OR Issue.derived_owner_id = Cond1.user_id)', [])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('(LOWER(Cond1.email) LIKE %s)', ['%example.com%'])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessOwnerCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['owner']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessOwnerCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('User AS Cond1 ON IssueSnapshot.owner_id = Cond1.user_id', [])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('(LOWER(Cond1.email) LIKE %s)', ['%example.com%'])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessOwnerIDCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['owner_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessOwnerIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual([], left_joins)
+    self.assertEqual(
+        [('(Issue.owner_id = %s OR Issue.derived_owner_id = %s)',
+          [111, 111])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessOwnerIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['owner_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessOwnerIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([('IssueSnapshot.owner_id = %s', [111])], where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessOwnerLastVisitCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['ownerlastvisit']
+    NOW = 1234567890
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.LT, [fd], [], [NOW])
+    left_joins, where, unsupported = ast2select._ProcessOwnerLastVisitCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('User AS Cond1 ON (Issue.owner_id = Cond1.user_id OR '
+          'Issue.derived_owner_id = Cond1.user_id)',
+          [])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.last_visit_timestamp < %s',
+          [NOW])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessOwnerLastVisitCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['ownerlastvisit']
+    NOW = 1234567890
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.LT, [fd], [], [NOW])
+    left_joins, where, unsupported = ast2select._ProcessOwnerLastVisitCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessIsOwnerBouncing(self):
+    fd = BUILTIN_ISSUE_FIELDS['ownerbouncing']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [])
+    left_joins, where, unsupported = ast2select._ProcessIsOwnerBouncing(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('User AS Cond1 ON (Issue.owner_id = Cond1.user_id OR '
+          'Issue.derived_owner_id = Cond1.user_id)',
+          [])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('(Cond1.email_bounce_timestamp IS NOT NULL AND'
+          ' Cond1.email_bounce_timestamp != %s)',
+          [0])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessIsOwnerBouncing_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['ownerbouncing']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [])
+    left_joins, where, unsupported = ast2select._ProcessIsOwnerBouncing(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessReporterCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['reporter']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessReporterCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('User AS Cond1 ON Issue.reporter_id = Cond1.user_id', [])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('(LOWER(Cond1.email) LIKE %s)', ['%example.com%'])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessReporterCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['reporter']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessReporterCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('User AS Cond1 ON IssueSnapshot.reporter_id = Cond1.user_id', [])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('(LOWER(Cond1.email) LIKE %s)', ['%example.com%'])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessReporterIDCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['reporter_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessReporterIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual([], left_joins)
+    self.assertEqual(
+        [('Issue.reporter_id = %s', [111])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessReporterIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['reporter_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessReporterIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual(
+        [('IssueSnapshot.reporter_id = %s', [111])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_SinglePositive(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('(Issue2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND (LOWER(Spare1.email) LIKE %s)) '
+          'ON Issue.id = Cond1.issue_id AND Issue.shard = Cond1.issue_shard',
+          ['%example.com%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_SinglePositive_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('(IssueSnapshot2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND (LOWER(Spare1.email) LIKE %s)) '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id',
+          ['%example.com%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_MultiplePositive(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['.com', '.org'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('(Issue2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND '
+          '(LOWER(Spare1.email) LIKE %s OR LOWER(Spare1.email) LIKE %s)) '
+          'ON Issue.id = Cond1.issue_id AND Issue.shard = Cond1.issue_shard',
+          ['%.com%', '%.org%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_MultiplePositive_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['.com', '.org'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('(IssueSnapshot2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND '
+          '(LOWER(Spare1.email) LIKE %s OR LOWER(Spare1.email) LIKE %s)) '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id',
+          ['%.com%', '%.org%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_SingleNegative(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.NOT_TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('(Issue2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND (LOWER(Spare1.email) LIKE %s)) '
+          'ON Issue.id = Cond1.issue_id AND Issue.shard = Cond1.issue_shard',
+          ['%example.com%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_SingleNegative_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.NOT_TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('(IssueSnapshot2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND (LOWER(Spare1.email) LIKE %s)) '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id',
+          ['%example.com%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_Multiplenegative(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.NOT_TEXT_HAS, [fd], ['.com', '.org'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('(Issue2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND '
+          '(LOWER(Spare1.email) LIKE %s OR LOWER(Spare1.email) LIKE %s)) '
+          'ON Issue.id = Cond1.issue_id AND Issue.shard = Cond1.issue_shard',
+          ['%.com%', '%.org%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcCond_Multiplenegative_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.NOT_TEXT_HAS, [fd], ['.com', '.org'], [])
+    left_joins, where, unsupported = ast2select._ProcessCcCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('(IssueSnapshot2Cc AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.cc_id = Spare1.user_id AND '
+          '(LOWER(Spare1.email) LIKE %s OR LOWER(Spare1.email) LIKE %s)) '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id',
+          ['%.com%', '%.org%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcIDCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessCcIDCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2Cc AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.cc_id = %s',
+         [111])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.cc_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCcIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['cc_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessCcIDCond(cond, 'Cond1',
+        'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('IssueSnapshot2Cc AS Cond1 '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id '
+          'AND Cond1.cc_id = %s',
+         [111])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.cc_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessStarredByCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['starredby']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessStarredByCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('(IssueStar AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.user_id = Spare1.user_id AND '
+          '(LOWER(Spare1.email) LIKE %s)) '
+          'ON Issue.id = Cond1.issue_id', ['%example.com%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessStarredByCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['starredby']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessStarredByCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessStarredByIDCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['starredby_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessStarredByIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueStar AS Cond1 ON Issue.id = Cond1.issue_id '
+          'AND Cond1.user_id = %s', [111])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.user_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessStarredByIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['starredby_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessStarredByIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessCommentByCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['commentby']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessCommentByCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('(Comment AS Cond1 JOIN User AS Spare1 '
+          'ON Cond1.commenter_id = Spare1.user_id '
+          'AND (LOWER(Spare1.email) LIKE %s)) '
+          'ON Issue.id = Cond1.issue_id AND Cond1.deleted_by IS NULL',
+          ['%example.com%'])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Spare1.email IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCommentByCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['commentby']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
+    left_joins, where, unsupported = ast2select._ProcessCommentByCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessCommentByIDCond_EqualsUserID(self):
+    fd = BUILTIN_ISSUE_FIELDS['commentby_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessCommentByIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Comment AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.commenter_id = %s AND Cond1.deleted_by IS NULL',
+          [111])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.commenter_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCommentByIDCond_EqualsUserID_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['commentby_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessCommentByIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessCommentByIDCond_QuickOr(self):
+    fd = BUILTIN_ISSUE_FIELDS['commentby_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111, 222])
+    left_joins, where, unsupported = ast2select._ProcessCommentByIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Comment AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.commenter_id IN (%s,%s) '
+          'AND Cond1.deleted_by IS NULL',
+          [111, 222])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.commenter_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCommentByIDCond_NotEqualsUserID(self):
+    fd = BUILTIN_ISSUE_FIELDS['commentby_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [111])
+    left_joins, where, unsupported = ast2select._ProcessCommentByIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Comment AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.commenter_id = %s AND Cond1.deleted_by IS NULL',
+          [111])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.commenter_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessStatusIDCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['status_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [2])
+    left_joins, where, unsupported = ast2select._ProcessStatusIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual([], left_joins)
+    self.assertEqual(
+        [('(Issue.status_id = %s OR Issue.derived_status_id = %s)', [2, 2])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessStatusIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['status_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [2])
+    left_joins, where, unsupported = ast2select._ProcessStatusIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([('IssueSnapshot.status_id = %s', [2])], where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessSummaryCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['summary']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['sum'], [])
+    left_joins, where, unsupported = ast2select._ProcessSummaryCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssueSummary AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.summary = %s', ['sum'])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.issue_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessSummaryCond_SnapshotMode(self):
+    """Issue summary is not currently included in issue snapshot, so ignore."""
+    fd = BUILTIN_ISSUE_FIELDS['summary']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['sum'], [])
+    left_joins, where, unsupported = ast2select._ProcessSummaryCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessLabelIDCond_NoValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [])
+    with self.assertRaises(ast2select.NoPossibleResults):
+      ast2select._ProcessLabelIDCond(cond, 'Cond1', 'Spare1',
+          snapshot_mode=False)
+
+  def testProcessLabelIDCond_SingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1])
+    left_joins, where, unsupported = ast2select._ProcessLabelIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2Label AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.label_id = %s', [1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.label_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessLabelIDCond_SingleValue_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1])
+    left_joins, where, unsupported = ast2select._ProcessLabelIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('IssueSnapshot2Label AS Cond1 '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id AND '
+          'Cond1.label_id = %s', [1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.label_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessLabelIDCond_MultipleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1, 2])
+    left_joins, where, unsupported = ast2select._ProcessLabelIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2Label AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.label_id IN (%s,%s)', [1, 2])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.label_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessLabelIDCond_NegatedNoValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [])
+    left_joins, where, unsupported = ast2select._ProcessLabelIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([], unsupported)
+
+  def testProcessLabelIDCond_NegatedSingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1])
+    left_joins, where, unsupported = ast2select._ProcessLabelIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2Label AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.label_id = %s', [1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.label_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessLabelIDCond_NegatedSingleValue_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1])
+    left_joins, where, unsupported = ast2select._ProcessLabelIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('IssueSnapshot2Label AS Cond1 '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id AND '
+          'Cond1.label_id = %s', [1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.label_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessLabelIDCond_NegatedMultipleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['label_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1, 2])
+    left_joins, where, unsupported = ast2select._ProcessLabelIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2Label AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.label_id IN (%s,%s)', [1, 2])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.label_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessComponentIDCond(self):
+    fd = BUILTIN_ISSUE_FIELDS['component_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [101])
+    left_joins, where, unsupported = ast2select._ProcessComponentIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2Component AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.component_id = %s', [101])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.component_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessComponentIDCond_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['component_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [101])
+    left_joins, where, unsupported = ast2select._ProcessComponentIDCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('IssueSnapshot2Component AS Cond1 '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id AND '
+          'Cond1.component_id = %s', [101])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.component_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessApprovalFieldCond_Status(self):
+    approval_fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='UXReview',
+        field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [approval_fd], ['Approved'], [],
+        key_suffix=query2ast.STATUS_SUFFIX)
+    left_joins, where, _unsupported = ast2select._ProcessApprovalFieldCond(
+        cond, 'Cond1', 'Spare1', False)
+    self.assertEqual(
+        [('Issue2ApprovalValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.approval_id = %s AND LOWER(Cond1.status) = %s',
+          [1, 'approved'])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.approval_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+
+  def testProcessApprovalFieldCond_SetOn(self):
+    approval_fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='UXReview',
+        field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    int_time = int(time.mktime(datetime.datetime(2016, 10, 5).timetuple()))
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.NOT_TEXT_HAS, [approval_fd], [], [int_time],
+        key_suffix=query2ast.SET_ON_SUFFIX)
+    left_joins, where, _unsupported = ast2select._ProcessApprovalFieldCond(
+        cond, 'Cond1', 'Spare1', False)
+    self.assertEqual(
+        [('Issue2ApprovalValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.approval_id = %s AND Cond1.set_on = %s',
+          [1, int_time])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.approval_id IS NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+
+  def testProcessApprovalFieldCond_SetBy(self):
+    approval_fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='UXReview',
+        field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [approval_fd], ['user2@email.com'], [],
+        key_suffix=query2ast.SET_BY_SUFFIX)
+    left_joins, where, _unsupported = ast2select._ProcessApprovalFieldCond(
+        cond, 'Cond1', 'Spare1', False)
+    self.assertEqual(
+        [('User AS Spare1 ON LOWER(Spare1.email) = %s', ['user2@email.com']),
+         ('Issue2ApprovalValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.approval_id = %s AND Cond1.setter_id = Spare1.user_id',
+          [1])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.approval_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+
+  def testProcessApprovalFieldCond_ApproverID(self):
+    approval_fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='UXReview',
+        field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [approval_fd], [], [111],
+        key_suffix=query2ast.APPROVER_SUFFIX)
+    left_joins, where, _unsupported = ast2select._ProcessApprovalFieldCond(
+        cond, 'Cond1', 'Spare1', False)
+    self.assertEqual(
+        [('IssueApproval2Approver AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.approval_id = %s AND Cond1.approver_id = %s',
+          [1, 111])], left_joins)
+    self.assertEqual(
+        [('Cond1.approval_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+
+
+  def testProcessApprovalFieldCond_IsDefined(self):
+    approval_fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='UXReview',
+        field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.IS_DEFINED, [approval_fd], [], [])
+    left_joins, where, _unsupported = ast2select._ProcessApprovalFieldCond(
+        cond, 'Cond1', 'Spare1', False)
+    self.assertEqual(
+        [('Issue2ApprovalValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.approval_id = %s',
+          [1])], left_joins)
+    self.assertEqual(
+        [('Cond1.approval_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+
+  def testProcessCustomFieldCond_IntType(self):
+    fd = tracker_pb2.FieldDef(
+      field_id=1, project_id=789, field_name='EstDays',
+      field_type=tracker_pb2.FieldTypes.INT_TYPE)
+    val = 42
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [val])
+    left_joins, where, unsupported = ast2select._ProcessCustomFieldCond(
+        cond, 'Cond1', 'Spare1', 'Phase', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2FieldValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.field_id = %s AND '
+          'Cond1.int_value = %s', [1, val])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.field_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCustomFieldCond_StrType(self):
+    fd = tracker_pb2.FieldDef(
+      field_id=1, project_id=789, field_name='Nickname',
+      field_type=tracker_pb2.FieldTypes.STR_TYPE)
+    val = 'Fuzzy'
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [val], [])
+    left_joins, where, unsupported = ast2select._ProcessCustomFieldCond(
+        cond, 'Cond1', 'Spare1','Phase1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2FieldValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.field_id = %s AND '
+          'Cond1.str_value = %s', [1, val])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.field_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCustomFieldCond_StrType_SnapshotMode(self):
+    fd = tracker_pb2.FieldDef(
+      field_id=1, project_id=789, field_name='Nickname',
+      field_type=tracker_pb2.FieldTypes.STR_TYPE)
+    val = 'Fuzzy'
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [val], [])
+    left_joins, where, unsupported = ast2select._ProcessCustomFieldCond(
+        cond, 'Cond1', 'Spare1', 'Phase1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessCustomFieldCond_UserType_ByID(self):
+    fd = tracker_pb2.FieldDef(
+      field_id=1, project_id=789, field_name='ExecutiveProducer',
+      field_type=tracker_pb2.FieldTypes.USER_TYPE)
+    val = 111
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [val])
+    left_joins, where, unsupported = ast2select._ProcessCustomFieldCond(
+        cond, 'Cond1', 'Spare1', 'Phase1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2FieldValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.field_id = %s AND '
+          'Cond1.user_id = %s', [1, val])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.field_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCustomFieldCond_UserType_ByEmail(self):
+    fd = tracker_pb2.FieldDef(
+      field_id=1, project_id=789, field_name='ExecutiveProducer',
+      field_type=tracker_pb2.FieldTypes.USER_TYPE)
+    val = 'exec@example.com'
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [val], [])
+    left_joins, where, unsupported = ast2select._ProcessCustomFieldCond(
+        cond, 'Cond1', 'Spare1', 'Phase1',  snapshot_mode=False)
+    self.assertEqual(
+        [('User AS Spare1 ON '
+          'LOWER(Spare1.email) = %s', [val]),
+         ('Issue2FieldValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.field_id = %s AND '
+          'Cond1.user_id = Spare1.user_id', [1])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertTrue(sql._IsValidJoin(left_joins[1][0]))
+    self.assertEqual(
+        [('Cond1.field_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCustomFieldCond_DateType(self):
+    fd = tracker_pb2.FieldDef(
+      field_id=1, project_id=789, field_name='Deadline',
+      field_type=tracker_pb2.FieldTypes.DATE_TYPE)
+    val = int(time.mktime(datetime.datetime(2016, 10, 5).timetuple()))
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [val])
+    left_joins, where, unsupported = ast2select._ProcessCustomFieldCond(
+        cond, 'Cond1', 'Spare1', 'Phase1', snapshot_mode=False)
+    self.assertEqual(
+        [('Issue2FieldValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Issue.shard = Cond1.issue_shard AND '
+          'Cond1.field_id = %s AND '
+          'Cond1.date_value = %s', [1, val])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('Cond1.field_id IS NOT NULL', [])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessCustomFieldCond_PhaseName(self):
+    fd = tracker_pb2.FieldDef(
+      field_id=1, project_id=789, field_name='Milestone',
+      field_type=tracker_pb2.FieldTypes.INT_TYPE)
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [72],
+                            phase_name='Canary')
+    left_joins, where, unsupported = ast2select._ProcessCustomFieldCond(
+        cond, 'Cond1', 'User1', 'Phase1', snapshot_mode=False)
+    self.assertEqual(
+        [('IssuePhaseDef AS Phase1 ON LOWER(Phase1.name) = %s', ['Canary']),
+        ('Issue2FieldValue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+         'Issue.shard = Cond1.issue_shard AND '
+         'Cond1.field_id = %s AND Cond1.int_value = %s AND '
+         'Cond1.phase_id = Phase1.id', [1, 72])],
+        left_joins)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessAttachmentCond_HasAttachment(self):
+    fd = BUILTIN_ISSUE_FIELDS['attachment']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.IS_DEFINED, [fd], [], [])
+    left_joins, where, unsupported = ast2select._ProcessAttachmentCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual([], left_joins)
+    self.assertEqual(
+        [('(Issue.attachment_count IS NOT NULL AND '
+          'Issue.attachment_count != %s)',
+          [0])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.IS_NOT_DEFINED, [fd], [], [])
+    left_joins, where, unsupported = ast2select._ProcessAttachmentCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual([], left_joins)
+    self.assertEqual(
+        [('(Issue.attachment_count IS NULL OR '
+          'Issue.attachment_count = %s)',
+          [0])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessAttachmentCond_HasAttachment_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['attachment']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.IS_DEFINED, [fd], [], [])
+    left_joins, where, unsupported = ast2select._ProcessAttachmentCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], where)
+    self.assertEqual([cond], unsupported)
+
+  def testProcessAttachmentCond_TextHas(self):
+    fd = BUILTIN_ISSUE_FIELDS['attachment']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.TEXT_HAS, [fd], ['jpg'], [])
+    left_joins, where, unsupported = ast2select._ProcessAttachmentCond(
+        cond, 'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Attachment AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.deleted = %s',
+          [False])],
+        left_joins)
+    self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
+    self.assertEqual(
+        [('(Cond1.filename LIKE %s)', ['%jpg%'])],
+        where)
+    self.assertTrue(sql._IsValidWhereCond(where[0][0]))
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistIDCond_MultiValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1, 2])
+    left_joins, where, unsupported = ast2select._ProcessHotlistIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Hotlist2Issue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.hotlist_id IN (%s,%s)', [1, 2])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.hotlist_id IS NOT NULL', [])],
+        where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistIDCond_MultiValue_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1, 2])
+    left_joins, where, unsupported = ast2select._ProcessHotlistIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('IssueSnapshot2Hotlist AS Cond1 '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id AND '
+          'Cond1.hotlist_id IN (%s,%s)', [1, 2])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.hotlist_id IS NOT NULL', [])],
+        where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistIDCond_SingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1])
+    left_joins, where, unsupported = ast2select._ProcessHotlistIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Hotlist2Issue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.hotlist_id = %s', [1])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.hotlist_id IS NOT NULL', [])],
+        where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistIDCond_NegatedMultiValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1, 2])
+    left_joins, where, unsupported = ast2select._ProcessHotlistIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Hotlist2Issue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.hotlist_id IN (%s,%s)', [1, 2])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.hotlist_id IS NULL', [])],
+        where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistIDCond_NegatedMultiValue_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1, 2])
+    left_joins, where, unsupported = ast2select._ProcessHotlistIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('IssueSnapshot2Hotlist AS Cond1 '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id AND '
+          'Cond1.hotlist_id IN (%s,%s)', [1, 2])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.hotlist_id IS NULL', [])],
+        where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistIDCond_NegatedSingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1])
+    left_joins, where, unsupported = ast2select._ProcessHotlistIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+        [('Hotlist2Issue AS Cond1 ON Issue.id = Cond1.issue_id AND '
+          'Cond1.hotlist_id = %s', [1])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.hotlist_id IS NULL', [])],
+        where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistIDCond_NegatedSingleValue_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist_id']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1])
+    left_joins, where, unsupported = ast2select._ProcessHotlistIDCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+        [('IssueSnapshot2Hotlist AS Cond1 '
+          'ON IssueSnapshot.id = Cond1.issuesnapshot_id AND '
+          'Cond1.hotlist_id = %s', [1])],
+        left_joins)
+    self.assertEqual(
+        [('Cond1.hotlist_id IS NULL', [])],
+        where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistCond_SingleValue(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['invalid:spa'], [])
+    left_joins, where, unsupported = ast2select._ProcessHotlistCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+      [('(Hotlist2Issue JOIN Hotlist AS Cond1 ON '
+        'Hotlist2Issue.hotlist_id = Cond1.id AND (LOWER(Cond1.name) LIKE %s))'
+        ' ON Issue.id = Hotlist2Issue.issue_id', ['%spa%'])],
+      left_joins)
+    self.assertEqual([('Cond1.name IS NOT NULL', [])], where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistCond_SingleValue_SnapshotMode(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], ['invalid:spa'], [])
+    left_joins, where, unsupported = ast2select._ProcessHotlistCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=True)
+    self.assertEqual(
+      [('(IssueSnapshot2Hotlist JOIN Hotlist AS Cond1 ON '
+        'IssueSnapshot2Hotlist.hotlist_id = Cond1.id '
+        'AND (LOWER(Cond1.name) LIKE %s)) '
+        'ON IssueSnapshot.id = IssueSnapshot2Hotlist.issuesnapshot_id',
+        ['%spa%'])],
+      left_joins)
+    self.assertEqual([('Cond1.name IS NOT NULL', [])], where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistCond_SingleValue2(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd],
+                            ['invalid:spa', 'port', 'invalid2:barc'], [])
+    left_joins, where, unsupported = ast2select._ProcessHotlistCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+      [('(Hotlist2Issue JOIN Hotlist AS Cond1 ON '
+        'Hotlist2Issue.hotlist_id = Cond1.id AND (LOWER(Cond1.name) LIKE %s OR '
+        'LOWER(Cond1.name) LIKE %s OR LOWER(Cond1.name) LIKE %s)) ON '
+        'Issue.id = Hotlist2Issue.issue_id', ['%spa%', '%port%', '%barc%'])],
+      left_joins)
+    self.assertEqual([('Cond1.name IS NOT NULL', [])], where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistCond_SingleValue3(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], ['invalid:spa'], [])
+    left_joins, where, unsupported = ast2select._ProcessHotlistCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+      [('(Hotlist2Issue JOIN Hotlist AS Cond1 ON '
+        'Hotlist2Issue.hotlist_id = Cond1.id AND (LOWER(Cond1.name) LIKE %s))'
+        ' ON Issue.id = Hotlist2Issue.issue_id', ['%spa%'])],
+      left_joins)
+    self.assertEqual([('Cond1.name IS NULL', [])], where)
+    self.assertEqual([], unsupported)
+
+  def testProcessHotlistCond_SingleValue4(self):
+    fd = BUILTIN_ISSUE_FIELDS['hotlist']
+    cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NOT_TEXT_HAS, [fd],
+                            ['invalid:spa', 'port', 'invalid2:barc'], [])
+    left_joins, where, unsupported = ast2select._ProcessHotlistCond(cond,
+        'Cond1', 'Spare1', snapshot_mode=False)
+    self.assertEqual(
+      [('(Hotlist2Issue JOIN Hotlist AS Cond1 ON '
+        'Hotlist2Issue.hotlist_id = Cond1.id AND (LOWER(Cond1.name) LIKE %s OR '
+        'LOWER(Cond1.name) LIKE %s OR LOWER(Cond1.name) LIKE %s)) ON '
+        'Issue.id = Hotlist2Issue.issue_id', ['%spa%', '%port%', '%barc%'])],
+      left_joins)
+    self.assertEqual([('Cond1.name IS NULL', [])], where)
+    self.assertEqual([], unsupported)
+
+  def testProcessPhaseCond_HasGateEQ(self):
+    fd = BUILTIN_ISSUE_FIELDS['gate']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.EQ, [fd], ['canary', 'stable'], [])
+    left_joins, where, unsupported = ast2select._ProcessPhaseCond(
+        cond, 'Cond1', 'Phase1', False)
+    self.assertEqual(
+        [('(Issue2ApprovalValue AS Cond1 JOIN IssuePhaseDef AS Phase1 '
+          'ON Cond1.phase_id = Phase1.id AND '
+          'LOWER(Phase1.name) IN (%s,%s)) '
+          'ON Issue.id = Cond1.issue_id', ['canary', 'stable'])],
+        left_joins)
+    self.assertEqual([('Phase1.name IS NOT NULL', [])], where)
+    self.assertEqual([], unsupported)
+
+  def testProcessPhaseCond_NoGateTEXT(self):
+    fd = BUILTIN_ISSUE_FIELDS['gate']
+    cond = ast_pb2.MakeCond(
+        ast_pb2.QueryOp.NOT_TEXT_HAS, [fd], ['canary', 'stable'], [])
+    left_joins, where, unsupported = ast2select._ProcessPhaseCond(
+        cond, 'Cond1', 'Phase1', False)
+    self.assertEqual(
+        [('(Issue2ApprovalValue AS Cond1 JOIN IssuePhaseDef AS Phase1 '
+          'ON Cond1.phase_id = Phase1.id AND '
+          '(LOWER(Phase1.name) LIKE %s '
+          'OR LOWER(Phase1.name) LIKE %s)) '
+          'ON Issue.id = Cond1.issue_id', ['%canary%', '%stable%'])],
+        left_joins)
+    self.assertEqual([('Phase1.name IS NULL', [])], where)
+    self.assertEqual([], unsupported)
+
+  def testCompare_IntTypes(self):
+    val_type = tracker_pb2.FieldTypes.INT_TYPE
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.IS_DEFINED, val_type, 'col', [1, 2])
+    self.assertEqual('(Alias.col IS NOT NULL AND Alias.col != %s)', cond_str)
+    self.assertEqual([0], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.EQ, val_type, 'col', [1])
+    self.assertEqual('Alias.col = %s', cond_str)
+    self.assertEqual([1], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.EQ, val_type, 'col', [1, 2])
+    self.assertEqual('Alias.col IN (%s,%s)', cond_str)
+    self.assertEqual([1, 2], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.NE, val_type, 'col', [])
+    self.assertEqual('TRUE', cond_str)
+    self.assertEqual([], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.NE, val_type, 'col', [1])
+    self.assertEqual('(Alias.col IS NULL OR Alias.col != %s)', cond_str)
+    self.assertEqual([1], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.NE, val_type, 'col', [1, 2])
+    self.assertEqual('(Alias.col IS NULL OR Alias.col NOT IN (%s,%s))',
+                     cond_str)
+    self.assertEqual([1, 2], cond_args)
+
+  def testCompare_STRTypes(self):
+    val_type = tracker_pb2.FieldTypes.STR_TYPE
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.IS_DEFINED, val_type, 'col', ['a', 'b'])
+    self.assertEqual('(Alias.col IS NOT NULL AND Alias.col != %s)', cond_str)
+    self.assertEqual([''], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.EQ, val_type, 'col', ['a'])
+    self.assertEqual('Alias.col = %s', cond_str)
+    self.assertEqual(['a'], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.EQ, val_type, 'col', ['a', 'b'])
+    self.assertEqual('Alias.col IN (%s,%s)', cond_str)
+    self.assertEqual(['a', 'b'], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.NE, val_type, 'col', [])
+    self.assertEqual('TRUE', cond_str)
+    self.assertEqual([], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.NE, val_type, 'col', ['a'])
+    self.assertEqual('(Alias.col IS NULL OR Alias.col != %s)', cond_str)
+    self.assertEqual(['a'], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.NE, val_type, 'col', ['a', 'b'])
+    self.assertEqual('(Alias.col IS NULL OR Alias.col NOT IN (%s,%s))',
+                     cond_str)
+    self.assertEqual(['a', 'b'], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.TEXT_HAS, val_type, 'col', ['a'])
+    self.assertEqual('(Alias.col LIKE %s)', cond_str)
+    self.assertEqual(['%a%'], cond_args)
+
+    cond_str, cond_args = ast2select._Compare(
+        'Alias', ast_pb2.QueryOp.NOT_TEXT_HAS, val_type, 'col', ['a'])
+    self.assertEqual('(Alias.col IS NULL OR Alias.col NOT LIKE %s)', cond_str)
+    self.assertEqual(['%a%'], cond_args)
+
+  def testCompareAlreadyJoined(self):
+    cond_str, cond_args = ast2select._CompareAlreadyJoined(
+        'Alias', ast_pb2.QueryOp.EQ, 'col')
+    self.assertEqual('Alias.col IS NOT NULL', cond_str)
+    self.assertEqual([], cond_args)
+
+    cond_str, cond_args = ast2select._CompareAlreadyJoined(
+        'Alias', ast_pb2.QueryOp.NE, 'col')
+    self.assertEqual('Alias.col IS NULL', cond_str)
+    self.assertEqual([], cond_args)
diff --git a/search/test/ast2sort_test.py b/search/test/ast2sort_test.py
new file mode 100644
index 0000000..9d365e8
--- /dev/null
+++ b/search/test/ast2sort_test.py
@@ -0,0 +1,373 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Tests for the ast2sort module."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import unittest
+
+from proto import tracker_pb2
+from search import ast2sort
+from search import query2ast
+
+
+BUILTIN_ISSUE_FIELDS = query2ast.BUILTIN_ISSUE_FIELDS
+ANY_FIELD = query2ast.BUILTIN_ISSUE_FIELDS['any_field']
+
+
+class AST2SortTest(unittest.TestCase):
+
+  def setUp(self):
+    self.harmonized_labels = [
+        (101, 0, 'Hot'), (102, 1, 'Cold'), (103, None, 'Odd')]
+    self.harmonized_statuses = [
+        (201, 0, 'New'), (202, 1, 'Assigned'), (203, None, 'OnHold')]
+    self.harmonized_fields = []
+    self.fmt = lambda string, **kwords: string
+
+  def testBuildSortClauses_EmptySortDirectives(self):
+    left_joins, order_by = ast2sort.BuildSortClauses(
+        [], self.harmonized_labels, self.harmonized_statuses,
+        self.harmonized_fields)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], order_by)
+
+  def testBuildSortClauses_Normal(self):
+    left_joins, order_by = ast2sort.BuildSortClauses(
+        ['stars', 'status', 'pri', 'reporter', 'id'], self.harmonized_labels,
+        self.harmonized_statuses, self.harmonized_fields)
+    expected_left_joins = [
+        ('User AS Sort3 ON Issue.reporter_id = Sort3.user_id', [])]
+    expected_order_by = [
+        ('Issue.star_count ASC', []),
+        ('FIELD(IF(ISNULL(Issue.status_id), Issue.derived_status_id, '
+         'Issue.status_id), %s,%s) DESC', [201, 202]),
+        ('FIELD(IF(ISNULL(Issue.status_id), Issue.derived_status_id, '
+         'Issue.status_id), %s) DESC', [203]),
+        ('ISNULL(Sort3.email) ASC', []),
+        ('Sort3.email ASC', []),
+        ('Issue.local_id ASC', [])]
+    self.assertEqual(expected_left_joins, left_joins)
+    self.assertEqual(expected_order_by, order_by)
+
+  def testProcessProjectSD(self):
+    left_joins, order_by = ast2sort._ProcessProjectSD(self.fmt)
+    self.assertEqual([], left_joins)
+    self.assertEqual(
+        [('Issue.project_id {sort_dir}', [])],
+        order_by)
+
+  def testProcessReporterSD(self):
+    left_joins, order_by = ast2sort._ProcessReporterSD(self.fmt)
+    self.assertEqual(
+        [('User AS {alias} ON Issue.reporter_id = {alias}.user_id', [])],
+        left_joins)
+    self.assertEqual(
+        [('ISNULL({alias}.email) {sort_dir}', []),
+         ('{alias}.email {sort_dir}', [])],
+        order_by)
+
+  def testProcessOwnerSD(self):
+    left_joins, order_by = ast2sort._ProcessOwnerSD(self.fmt)
+    self.assertEqual(
+        [('User AS {alias}_exp ON Issue.owner_id = {alias}_exp.user_id', []),
+         ('User AS {alias}_der ON '
+          'Issue.derived_owner_id = {alias}_der.user_id', [])],
+        left_joins)
+    self.assertEqual(
+        [('(ISNULL({alias}_exp.email) AND ISNULL({alias}_der.email)) '
+          '{sort_dir}', []),
+         ('CONCAT({alias}_exp.email, {alias}_der.email) {sort_dir}', [])],
+        order_by)
+
+  def testProcessCcSD(self):
+    left_joins, order_by = ast2sort._ProcessCcSD(self.fmt)
+    self.assertEqual(
+        [('Issue2Cc AS {alias} ON Issue.id = {alias}.issue_id '
+          'LEFT JOIN User AS {alias}_user '
+          'ON {alias}.cc_id = {alias}_user.user_id', [])],
+        left_joins)
+    self.assertEqual(
+        [('ISNULL({alias}_user.email) {sort_dir}', []),
+         ('{alias}_user.email {sort_dir}', [])],
+        order_by)
+
+  def testProcessComponentSD(self):
+    left_joins, order_by = ast2sort._ProcessComponentSD(self.fmt)
+    self.assertEqual(
+        [('Issue2Component AS {alias} ON Issue.id = {alias}.issue_id '
+          'LEFT JOIN ComponentDef AS {alias}_component '
+          'ON {alias}.component_id = {alias}_component.id', [])],
+        left_joins)
+    self.assertEqual(
+        [('ISNULL({alias}_component.path) {sort_dir}', []),
+         ('{alias}_component.path {sort_dir}', [])],
+        order_by)
+
+  def testProcessSummarySD(self):
+    left_joins, order_by = ast2sort._ProcessSummarySD(self.fmt)
+    self.assertEqual(
+        [('IssueSummary AS {alias} ON Issue.id = {alias}.issue_id', [])],
+        left_joins)
+    self.assertEqual(
+        [('{alias}.summary {sort_dir}', [])],
+        order_by)
+
+  def testProcessStatusSD(self):
+    pass  # TODO(jrobbins): fill in this test case
+
+  def testProcessBlockedSD(self):
+    left_joins, order_by = ast2sort._ProcessBlockedSD(self.fmt)
+    self.assertEqual(
+        [('IssueRelation AS {alias} ON Issue.id = {alias}.issue_id '
+          'AND {alias}.kind = %s', ['blockedon'])],
+        left_joins)
+    self.assertEqual(
+        [('ISNULL({alias}.dst_issue_id) {sort_dir}', [])],
+        order_by)
+
+  def testProcessBlockedOnSD(self):
+    left_joins, order_by = ast2sort._ProcessBlockedOnSD(self.fmt)
+    self.assertEqual(
+        [('IssueRelation AS {alias} ON Issue.id = {alias}.issue_id '
+          'AND {alias}.kind = %s', ['blockedon'])],
+        left_joins)
+    self.assertEqual(
+        [('ISNULL({alias}.dst_issue_id) {sort_dir}', []),
+         ('{alias}.dst_issue_id {sort_dir}', [])],
+        order_by)
+
+  def testProcessBlockingSD(self):
+    left_joins, order_by = ast2sort._ProcessBlockingSD(self.fmt)
+    self.assertEqual(
+        [('IssueRelation AS {alias} ON Issue.id = {alias}.dst_issue_id '
+          'AND {alias}.kind = %s', ['blockedon'])],
+        left_joins)
+    self.assertEqual(
+        [('ISNULL({alias}.issue_id) {sort_dir}', []),
+         ('{alias}.issue_id {sort_dir}', [])],
+        order_by)
+
+  def testProcessMergedIntoSD(self):
+    left_joins, order_by = ast2sort._ProcessMergedIntoSD(self.fmt)
+    self.assertEqual(
+        [('IssueRelation AS {alias} ON Issue.id = {alias}.issue_id '
+          'AND {alias}.kind = %s', ['mergedinto'])],
+        left_joins)
+    self.assertEqual(
+        [('ISNULL({alias}.dst_issue_id) {sort_dir}', []),
+         ('{alias}.dst_issue_id {sort_dir}', [])],
+        order_by)
+
+  def testProcessCustomAndLabelSD(self):
+    pass  # TODO(jrobbins): fill in this test case
+
+  def testProcessCustomAndLabelSD_PhaseField(self):
+    harmonized_labels = []
+    bear_fd = tracker_pb2.FieldDef(
+        field_id=1, field_name='DropBear', project_id=789,
+        field_type=tracker_pb2.FieldTypes.INT_TYPE)
+    bear2_fd = tracker_pb2.FieldDef(
+        field_id=2, field_name='DropBear', project_id=788,
+        field_type=tracker_pb2.FieldTypes.STR_TYPE)
+    koala_fd = tracker_pb2.FieldDef(
+        field_id=3, field_name='koala', project_id=789,
+        field_type=tracker_pb2.FieldTypes.INT_TYPE)
+    bear_app_fd = tracker_pb2.FieldDef(
+        field_id=4, field_name='dropbear', project_id=789,
+        field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    harmonized_fields = [bear_fd, bear2_fd, koala_fd, bear_app_fd]
+    phase_name = 'stable'
+    alias = 'Sort0'
+    sort_dir = 'DESC'
+    sd = 'stable.dropbear'
+    left_joins, order_by = ast2sort._ProcessCustomAndLabelSD(
+        sd, harmonized_labels, harmonized_fields, alias, sort_dir,
+        self.fmt)
+
+    expected_joins = []
+    expected_order = []
+    int_left_joins, int_order_by = ast2sort._CustomFieldSortClauses(
+        [bear_fd, bear2_fd], tracker_pb2.FieldTypes.INT_TYPE, 'int_value',
+        alias, sort_dir, phase_name=phase_name)
+    str_left_joins, str_order_by = ast2sort._CustomFieldSortClauses(
+        [bear_fd, bear2_fd], tracker_pb2.FieldTypes.STR_TYPE, 'str_value',
+        alias, sort_dir, phase_name=phase_name)
+    user_left_joins, user_order_by = ast2sort._CustomFieldSortClauses(
+        [bear_fd, bear2_fd], tracker_pb2.FieldTypes.USER_TYPE, 'user_id',
+        alias, sort_dir, phase_name=phase_name)
+    label_left_joinss, label_order_by = ast2sort._LabelSortClauses(
+        sd, harmonized_labels, self.fmt)
+    expected_joins.extend(
+        int_left_joins + str_left_joins + user_left_joins + label_left_joinss)
+    expected_order.extend(
+        int_order_by + str_order_by + user_order_by + label_order_by)
+    self.assertEqual(left_joins, expected_joins)
+    self.assertEqual(order_by, expected_order)
+
+  def testApprovalFieldSortClauses_Status(self):
+    approval_fd_list = [
+        tracker_pb2.FieldDef(field_id=2, project_id=789,
+                             field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE),
+        tracker_pb2.FieldDef(field_id=4, project_id=788,
+                             field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    ]
+    left_joins, order_by = ast2sort._ApprovalFieldSortClauses(
+        approval_fd_list, '-status', self.fmt)
+
+    self.assertEqual(
+        [('{tbl_name} AS {alias}_approval '
+          'ON Issue.id = {alias}_approval.issue_id '
+          'AND {alias}_approval.approval_id IN ({approval_ids_ph})', [2, 4])],
+        left_joins)
+
+    self.assertEqual(
+        [('FIELD({alias}_approval.status, {approval_status_ph}) {rev_sort_dir}',
+          ast2sort.APPROVAL_STATUS_SORT_ORDER)],
+        order_by)
+
+  def testApprovalFieldSortClauses_Approver(self):
+    approval_fd_list = [
+        tracker_pb2.FieldDef(field_id=2, project_id=789,
+                             field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE),
+        tracker_pb2.FieldDef(field_id=4, project_id=788,
+                             field_type=tracker_pb2.FieldTypes.APPROVAL_TYPE)
+    ]
+    left_joins, order_by = ast2sort._ApprovalFieldSortClauses(
+        approval_fd_list, '-approver', self.fmt)
+
+    self.assertEqual(
+        [('{tbl_name} AS {alias}_approval '
+          'ON Issue.id = {alias}_approval.issue_id '
+          'AND {alias}_approval.approval_id IN ({approval_ids_ph})', [2, 4]),
+         ('User AS {alias}_approval_user '
+          'ON {alias}_approval.approver_id = {alias}_approval_user.user_id',
+          [])],
+        left_joins)
+
+    self.assertEqual(
+        [('ISNULL({alias}_approval_user.email) {sort_dir}', []),
+         ('{alias}_approval_user.email {sort_dir}', [])],
+        order_by)
+
+  def testLabelSortClauses_NoSuchLabels(self):
+    sd = 'somethingelse'
+    harmonized_labels = [
+      (101, 0, 'Type-Defect'),
+      (102, 1, 'Type-Enhancement'),
+      (103, 2, 'Type-Task'),
+      (104, 0, 'Priority-High'),
+      (199, None, 'Type-Laundry'),
+      ]
+    left_joins, order_by = ast2sort._LabelSortClauses(
+      sd, harmonized_labels, self.fmt)
+    self.assertEqual([], left_joins)
+    self.assertEqual([], order_by)
+
+  def testLabelSortClauses_Normal(self):
+    sd = 'type'
+    harmonized_labels = [
+      (101, 0, 'Type-Defect'),
+      (102, 1, 'Type-Enhancement'),
+      (103, 2, 'Type-Task'),
+      (104, 0, 'Priority-High'),
+      (199, None, 'Type-Laundry'),
+      ]
+    left_joins, order_by = ast2sort._LabelSortClauses(
+      sd, harmonized_labels, self.fmt)
+    self.assertEqual(1, len(left_joins))
+    self.assertEqual(
+      ('Issue2Label AS {alias} ON Issue.id = {alias}.issue_id AND '
+       '{alias}.label_id IN ({all_label_ph})',
+       [101, 102, 103, 199]),
+      left_joins[0])
+    self.assertEqual(2, len(order_by))
+    self.assertEqual(
+      ('FIELD({alias}.label_id, {wk_label_ph}) {rev_sort_dir}',
+       [101, 102, 103]),
+      order_by[0])
+    self.assertEqual(
+      ('FIELD({alias}.label_id, {odd_label_ph}) {rev_sort_dir}',
+       [199]),
+      order_by[1])
+
+  def testCustomFieldSortClauses_Normal(self):
+    fd_list = [
+      tracker_pb2.FieldDef(field_id=1, project_id=789,
+                           field_type=tracker_pb2.FieldTypes.INT_TYPE),
+      tracker_pb2.FieldDef(field_id=2, project_id=788,
+                           field_type=tracker_pb2.FieldTypes.STR_TYPE),
+    ]
+    left_joins, order_by = ast2sort._CustomFieldSortClauses(
+        fd_list, tracker_pb2.FieldTypes.INT_TYPE, 'int_value', 'Sort0', 'DESC')
+
+    self.assertEqual(
+        left_joins, [
+            ('Issue2FieldValue AS Sort0_int_value '
+             'ON Issue.id = Sort0_int_value.issue_id '
+             'AND Sort0_int_value.field_id IN (%s)', [1]),
+        ])
+    self.assertEqual(
+        order_by, [
+            ('ISNULL(Sort0_int_value.int_value) DESC', []),
+            ('Sort0_int_value.int_value DESC', []),
+        ])
+
+  def testCustomFieldSortClauses_PhaseUser(self):
+    fd_list = [
+      tracker_pb2.FieldDef(field_id=1, project_id=789,
+                           field_type=tracker_pb2.FieldTypes.INT_TYPE),
+      tracker_pb2.FieldDef(field_id=2, project_id=788,
+                           field_type=tracker_pb2.FieldTypes.STR_TYPE),
+      tracker_pb2.FieldDef(field_id=3, project_id=788,
+                           field_type=tracker_pb2.FieldTypes.USER_TYPE),
+    ]
+    left_joins, order_by = ast2sort._CustomFieldSortClauses(
+        fd_list, tracker_pb2.FieldTypes.USER_TYPE, 'user_id', 'Sort0', 'DESC',
+        phase_name='Stable')
+
+    self.assertEqual(
+        left_joins, [
+            ('Issue2FieldValue AS Sort0_user_id '
+             'ON Issue.id = Sort0_user_id.issue_id '
+             'AND Sort0_user_id.field_id IN (%s)', [3]),
+            ('IssuePhaseDef AS Sort0_user_id_phase '
+             'ON Sort0_user_id.phase_id = Sort0_user_id_phase.id '
+             'AND LOWER(Sort0_user_id_phase.name) = LOWER(%s)', ['Stable']),
+            ('User AS Sort0_user_id_user '
+             'ON Sort0_user_id.user_id = Sort0_user_id_user.user_id', []),
+        ])
+    self.assertEqual(
+        order_by, [
+            ('ISNULL(Sort0_user_id_user.email) DESC', []),
+            ('Sort0_user_id_user.email DESC', []),
+        ])
+
+  def testOneSortDirective_NativeSortable(self):
+    left_joins, order_by = ast2sort._OneSortDirective(
+        1, 'opened', self.harmonized_labels, self.harmonized_statuses,
+        self.harmonized_fields)
+    self.assertEqual([], left_joins)
+    self.assertEqual([('Issue.opened ASC', [])], order_by)
+
+    left_joins, order_by = ast2sort._OneSortDirective(
+        1, 'stars', self.harmonized_labels, self.harmonized_statuses,
+        self.harmonized_fields)
+    self.assertEqual([], left_joins)
+    self.assertEqual([('Issue.star_count ASC', [])], order_by)
+
+    left_joins, order_by = ast2sort._OneSortDirective(
+        1, '-stars', self.harmonized_labels, self.harmonized_statuses,
+        self.harmonized_fields)
+    self.assertEqual([], left_joins)
+    self.assertEqual([('Issue.star_count DESC', [])], order_by)
+
+    left_joins, order_by = ast2sort._OneSortDirective(
+        1, 'componentmodified', self.harmonized_labels,
+        self.harmonized_statuses, self.harmonized_fields)
+    self.assertEqual([], left_joins)
+    self.assertEqual([('Issue.component_modified ASC', [])], order_by)
diff --git a/search/test/backendnonviewable_test.py b/search/test/backendnonviewable_test.py
new file mode 100644
index 0000000..6c50fb7
--- /dev/null
+++ b/search/test/backendnonviewable_test.py
@@ -0,0 +1,165 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Unittests for monorail.search.backendnonviewable."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import unittest
+import mox
+
+from google.appengine.api import memcache
+from google.appengine.ext import testbed
+
+from framework import permissions
+from search import backendnonviewable
+from services import service_manager
+from testing import fake
+from testing import testing_helpers
+
+
+class BackendNonviewableTest(unittest.TestCase):
+
+  def setUp(self):
+    self.services = service_manager.Services(
+        project=fake.ProjectService(),
+        config=fake.ConfigService(),
+        issue=fake.IssueService(),
+        )
+    self.project = self.services.project.TestAddProject(
+      'proj', project_id=789)
+    self.mr = testing_helpers.MakeMonorailRequest()
+    self.mr.specified_project_id = 789
+    self.mr.shard_id = 2
+    self.mr.invalidation_timestep = 12345
+
+    self.servlet = backendnonviewable.BackendNonviewable(
+        'req', 'res', services=self.services)
+
+    self.mox = mox.Mox()
+    self.testbed = testbed.Testbed()
+    self.testbed.activate()
+    self.testbed.init_memcache_stub()
+
+  def tearDown(self):
+    self.testbed.deactivate()
+    self.mox.UnsetStubs()
+    self.mox.ResetAll()
+
+  def testHandleRequest(self):
+    pass  # TODO(jrobbins): fill in this test.
+
+  def testGetNonviewableIIDs_OwnerOrAdmin(self):
+    """Check the special case for users who are never restricted."""
+    perms = permissions.OWNER_ACTIVE_PERMISSIONSET
+    nonviewable_iids = self.servlet.GetNonviewableIIDs(
+      self.mr.cnxn, self.mr.auth.user_pb, {111}, self.project, perms, 2)
+    self.assertEqual([], nonviewable_iids)
+
+  def testGetNonviewableIIDs_RegularUser(self):
+    pass  # TODO(jrobbins)
+
+  def testGetNonviewableIIDs_Anon(self):
+    pass  # TODO(jrobbins)
+
+  def testGetAtRiskIIDs_NothingEverAtRisk(self):
+    """Handle the case where the site has no restriction labels."""
+    fake_restriction_label_rows = []
+    fake_restriction_label_ids = []
+    fake_at_risk_iids = []
+    self.mox.StubOutWithMock(self.services.config, 'GetLabelDefRowsAnyProject')
+    self.services.config.GetLabelDefRowsAnyProject(
+        self.mr.cnxn, where=[('LOWER(label) LIKE %s', ['restrict-view-%'])]
+        ).AndReturn(fake_restriction_label_rows)
+    self.mox.StubOutWithMock(self.services.issue, 'GetIIDsByLabelIDs')
+    self.services.issue.GetIIDsByLabelIDs(
+        self.mr.cnxn, fake_restriction_label_ids, 789, 2
+        ).AndReturn(fake_at_risk_iids)
+    self.mox.ReplayAll()
+
+    at_risk_iids = self.servlet.GetAtRiskIIDs(
+        self.mr.cnxn, self.mr.auth.user_pb, self.mr.auth.effective_ids,
+        self.project, self.mr.perms, self.mr.shard_id)
+    self.mox.VerifyAll()
+    self.assertEqual([], at_risk_iids)
+
+  def testGetAtRiskIIDs_NoIssuesAtRiskRightNow(self):
+    """Handle the case where the project has no restricted issues."""
+    fake_restriction_label_rows = [
+        (123, 789, 1, 'Restrict-View-A', 'doc', False),
+        (234, 789, 2, 'Restrict-View-B', 'doc', False),
+        ]
+    fake_restriction_label_ids = [123, 234]
+    fake_at_risk_iids = []
+    self.mox.StubOutWithMock(self.services.config, 'GetLabelDefRowsAnyProject')
+    self.services.config.GetLabelDefRowsAnyProject(
+        self.mr.cnxn, where=[('LOWER(label) LIKE %s', ['restrict-view-%'])]
+        ).AndReturn(fake_restriction_label_rows)
+    self.mox.StubOutWithMock(self.services.issue, 'GetIIDsByLabelIDs')
+    self.services.issue.GetIIDsByLabelIDs(
+        self.mr.cnxn, fake_restriction_label_ids, 789, 2
+        ).AndReturn(fake_at_risk_iids)
+    self.mox.ReplayAll()
+
+    at_risk_iids = self.servlet.GetAtRiskIIDs(
+        self.mr.cnxn, self.mr.auth.user_pb, self.mr.auth.effective_ids,
+        self.project, self.mr.perms, self.mr.shard_id)
+    self.mox.VerifyAll()
+    self.assertEqual([], at_risk_iids)
+
+  def testGetAtRiskIIDs_SomeAtRisk(self):
+    """Handle the case where the project has some restricted issues."""
+    fake_restriction_label_rows = [
+        (123, 789, 1, 'Restrict-View-A', 'doc', False),
+        (234, 789, 2, 'Restrict-View-B', 'doc', False),
+        ]
+    fake_restriction_label_ids = [123, 234]
+    fake_at_risk_iids = [432, 543]
+    self.mox.StubOutWithMock(self.services.config, 'GetLabelDefRowsAnyProject')
+    self.services.config.GetLabelDefRowsAnyProject(
+      self.mr.cnxn, where=[('LOWER(label) LIKE %s', ['restrict-view-%'])]
+      ).AndReturn(fake_restriction_label_rows)
+    self.mox.StubOutWithMock(self.services.issue, 'GetIIDsByLabelIDs')
+    self.services.issue.GetIIDsByLabelIDs(
+      self.mr.cnxn, fake_restriction_label_ids, 789, 2
+      ).AndReturn(fake_at_risk_iids)
+    self.mox.ReplayAll()
+
+    at_risk_iids = self.servlet.GetAtRiskIIDs(
+        self.mr.cnxn, self.mr.auth.user_pb, self.mr.auth.effective_ids,
+        self.project, self.mr.perms, self.mr.shard_id)
+    self.mox.VerifyAll()
+    self.assertEqual([432, 543], at_risk_iids)
+
+  def testGetViewableIIDs_Anon(self):
+    """Anon users are never participants in any issues."""
+    ok_iids = self.servlet.GetViewableIIDs(
+      self.mr.cnxn, set(), 789, 2)
+    self.assertEqual([], ok_iids)
+
+  def testGetViewableIIDs_NoIssues(self):
+    """This visitor does not participate in any issues."""
+    self.mox.StubOutWithMock(self.services.issue, 'GetIIDsByParticipant')
+    self.services.issue.GetIIDsByParticipant(
+      self.mr.cnxn, {111}, [789], 2).AndReturn([])
+    self.mox.ReplayAll()
+
+    ok_iids = self.servlet.GetViewableIIDs(
+      self.mr.cnxn, {111}, 789, 2)
+    self.mox.VerifyAll()
+    self.assertEqual([], ok_iids)
+
+  def testGetViewableIIDs_SomeIssues(self):
+    """This visitor  participates in some issues."""
+    self.mox.StubOutWithMock(self.services.issue, 'GetIIDsByParticipant')
+    self.services.issue.GetIIDsByParticipant(
+      self.mr.cnxn, {111}, [789], 2).AndReturn([543, 654])
+    self.mox.ReplayAll()
+
+    ok_iids = self.servlet.GetViewableIIDs(
+      self.mr.cnxn, {111}, 789, 2)
+    self.mox.VerifyAll()
+    self.assertEqual([543, 654], ok_iids)
diff --git a/search/test/backendsearch_test.py b/search/test/backendsearch_test.py
new file mode 100644
index 0000000..dd5ed18
--- /dev/null
+++ b/search/test/backendsearch_test.py
@@ -0,0 +1,126 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Unittests for monorail.search.backendsearch."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import unittest
+import mox
+
+import settings
+from search import backendsearch
+from search import backendsearchpipeline
+from services import service_manager
+from testing import fake
+from testing import testing_helpers
+
+
+class BackendSearchTest(unittest.TestCase):
+
+  def setUp(self):
+    self.services = service_manager.Services(
+        issue=fake.IssueService(),
+        )
+    self.mr = testing_helpers.MakeMonorailRequest(
+        path='/_backend/besearch?q=Priority:High&shard=2')
+    self.mr.query_project_names = ['proj']
+    self.mr.specified_logged_in_user_id = 111
+    self.mr.specified_me_user_ids = [222]
+    self.mr.shard_id = 2
+    self.servlet = backendsearch.BackendSearch(
+        'req', 'res', services=self.services)
+    self.mox = mox.Mox()
+
+  def tearDown(self):
+    self.mox.UnsetStubs()
+    self.mox.ResetAll()
+
+  def testHandleRequest_NoResults(self):
+    """Handle the case where the search has no results."""
+    pipeline = testing_helpers.Blank(
+        SearchForIIDs=lambda: None,
+        result_iids=[],
+        search_limit_reached=False,
+        error=None)
+    self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
+    backendsearchpipeline.BackendSearchPipeline(
+      self.mr, self.services, 100, ['proj'], 111, [222]
+      ).AndReturn(pipeline)
+    self.mox.ReplayAll()
+
+    json_data = self.servlet.HandleRequest(self.mr)
+    self.mox.VerifyAll()
+    self.assertEqual([], json_data['unfiltered_iids'])
+    self.assertFalse(json_data['search_limit_reached'])
+    self.assertEqual(None, json_data['error'])
+
+  def testHandleRequest_ResultsInOnePagainationPage(self):
+    """Prefetch all result issues and return them."""
+    allowed_iids = [1, 2, 3, 4, 5, 6, 7, 8]
+    pipeline = testing_helpers.Blank(
+        SearchForIIDs=lambda: None,
+        result_iids=allowed_iids,
+        search_limit_reached=False,
+        error=None)
+    self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
+    backendsearchpipeline.BackendSearchPipeline(
+      self.mr, self.services, 100, ['proj'], 111, [222]
+      ).AndReturn(pipeline)
+    self.mox.StubOutWithMock(self.services.issue, 'GetIssues')
+    # All issues are prefetched because they fit  on the first pagination page.
+    self.services.issue.GetIssues(self.mr.cnxn, allowed_iids, shard_id=2)
+    self.mox.ReplayAll()
+
+    json_data = self.servlet.HandleRequest(self.mr)
+    self.mox.VerifyAll()
+    self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], json_data['unfiltered_iids'])
+    self.assertFalse(json_data['search_limit_reached'])
+    self.assertEqual(None, json_data['error'])
+
+  def testHandleRequest_ResultsExceedPagainationPage(self):
+    """Return all result issue IDs, but only prefetch the first page."""
+    self.mr.num = 5
+    pipeline = testing_helpers.Blank(
+        SearchForIIDs=lambda: None,
+        result_iids=[1, 2, 3, 4, 5, 6, 7, 8],
+        search_limit_reached=False,
+        error=None)
+    self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
+    backendsearchpipeline.BackendSearchPipeline(
+      self.mr, self.services, 100, ['proj'], 111, [222]
+      ).AndReturn(pipeline)
+    self.mox.StubOutWithMock(self.services.issue, 'GetIssues')
+    # First 5 issues are prefetched because num=5
+    self.services.issue.GetIssues(self.mr.cnxn, [1, 2, 3, 4, 5], shard_id=2)
+    self.mox.ReplayAll()
+
+    json_data = self.servlet.HandleRequest(self.mr)
+    self.mox.VerifyAll()
+    # All are IDs are returned to the frontend.
+    self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], json_data['unfiltered_iids'])
+    self.assertFalse(json_data['search_limit_reached'])
+    self.assertEqual(None, json_data['error'])
+
+  def testHandleRequest_QueryError(self):
+    """Handle the case where the search has no results."""
+    error = ValueError('Malformed query')
+    pipeline = testing_helpers.Blank(
+        SearchForIIDs=lambda: None,
+        result_iids=[],
+        search_limit_reached=False,
+        error=error)
+    self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
+    backendsearchpipeline.BackendSearchPipeline(
+      self.mr, self.services, 100, ['proj'], 111, [222]
+      ).AndReturn(pipeline)
+    self.mox.ReplayAll()
+
+    json_data = self.servlet.HandleRequest(self.mr)
+    self.mox.VerifyAll()
+    self.assertEqual([], json_data['unfiltered_iids'])
+    self.assertFalse(json_data['search_limit_reached'])
+    self.assertEqual(error.message, json_data['error'])
diff --git a/search/test/backendsearchpipeline_test.py b/search/test/backendsearchpipeline_test.py
new file mode 100644
index 0000000..212f5a6
--- /dev/null
+++ b/search/test/backendsearchpipeline_test.py
@@ -0,0 +1,250 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Tests for the backendsearchpipeline module."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import mox
+import unittest
+
+from google.appengine.api import memcache
+from google.appengine.ext import testbed
+
+import settings
+from framework import framework_helpers
+from framework import sorting
+from framework import sql
+from proto import ast_pb2
+from proto import tracker_pb2
+from search import backendsearchpipeline
+from search import ast2ast
+from search import query2ast
+from services import service_manager
+from services import tracker_fulltext
+from testing import fake
+from testing import testing_helpers
+from tracker import tracker_bizobj
+
+
+class BackendSearchPipelineTest(unittest.TestCase):
+
+  def setUp(self):
+    self.cnxn = 'fake cnxn'
+    self.services = service_manager.Services(
+        user=fake.UserService(),
+        usergroup=fake.UserGroupService(),
+        project=fake.ProjectService(),
+        issue=fake.IssueService(),
+        config=fake.ConfigService(),
+        cache_manager=fake.CacheManager())
+    self.services.user.TestAddUser('a@example.com', 111)
+    self.project = self.services.project.TestAddProject('proj', project_id=789)
+    self.mr = testing_helpers.MakeMonorailRequest(
+      path='/p/proj/issues/list?q=Priority:High',
+      project=self.project)
+    self.mr.me_user_id = 999  # This value is not used by backend search
+    self.mr.shard_id = 2
+    self.mr.invalidation_timestep = 12345
+
+    self.mox = mox.Mox()
+    self.testbed = testbed.Testbed()
+    self.testbed.activate()
+    self.testbed.init_user_stub()
+    self.testbed.init_memcache_stub()
+    sorting.InitializeArtValues(self.services)
+
+  def tearDown(self):
+    self.testbed.deactivate()
+    self.mox.UnsetStubs()
+    self.mox.ResetAll()
+
+  def SetUpPromises(self, exp_query):
+    self.mox.StubOutWithMock(framework_helpers, 'Promise')
+    framework_helpers.Promise(
+        backendsearchpipeline._GetQueryResultIIDs, self.mr.cnxn,
+        self.services, 'is:open', exp_query, [789],
+        mox.IsA(tracker_pb2.ProjectIssueConfig), ['project', 'id'],
+        ('Issue.shard = %s', [2]), 2, self.mr.invalidation_timestep
+        ).AndReturn('fake promise 1')
+
+  def testMakePromises_Anon(self):
+    """A backend pipeline does not personalize the query of anon users."""
+    self.SetUpPromises('Priority:High')
+    self.mox.ReplayAll()
+    backendsearchpipeline.BackendSearchPipeline(
+      self.mr, self.services, 100, ['proj'], None, [])
+    self.mox.VerifyAll()
+
+  def testMakePromises_SignedIn(self):
+    """A backend pipeline immediately personalizes and runs the query."""
+    self.mr.query = 'owner:me'
+    self.SetUpPromises('owner:111')
+    self.mox.ReplayAll()
+    backendsearchpipeline.BackendSearchPipeline(
+      self.mr, self.services, 100, ['proj'], 111, [111])
+    self.mox.VerifyAll()
+
+  def testSearchForIIDs(self):
+    self.SetUpPromises('Priority:High')
+    self.mox.ReplayAll()
+    be_pipeline = backendsearchpipeline.BackendSearchPipeline(
+      self.mr, self.services, 100, ['proj'], 111, [111])
+    be_pipeline.result_iids_promise = testing_helpers.Blank(
+      WaitAndGetValue=lambda: ([10002, 10052], False, None))
+    be_pipeline.SearchForIIDs()
+    self.mox.VerifyAll()
+    self.assertEqual([10002, 10052], be_pipeline.result_iids)
+    self.assertEqual(False, be_pipeline.search_limit_reached)
+
+
+class BackendSearchPipelineMethodsTest(unittest.TestCase):
+
+  def setUp(self):
+    self.cnxn = 'fake cnxn'
+    self.config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)
+    self.services = service_manager.Services(
+        user=fake.UserService(),
+        usergroup=fake.UserGroupService(),
+        project=fake.ProjectService(),
+        issue=fake.IssueService(),
+        config=fake.ConfigService(),
+        cache_manager=fake.CacheManager())
+    self.services.user.TestAddUser('a@example.com', 111)
+    self.project = self.services.project.TestAddProject('proj', project_id=789)
+    self.mr = testing_helpers.MakeMonorailRequest(
+      path='/p/proj/issues/list?q=Priority:High',
+      project=self.project)
+
+    self.mox = mox.Mox()
+    self.testbed = testbed.Testbed()
+    self.testbed.activate()
+    self.testbed.init_user_stub()
+    self.testbed.init_memcache_stub()
+
+  def tearDown(self):
+    self.testbed.deactivate()
+    self.mox.UnsetStubs()
+    self.mox.ResetAll()
+
+  def testSearchProjectCan_Normal(self):
+    query_ast = query2ast.ParseUserQuery(
+      'Priority:High', 'is:open', query2ast.BUILTIN_ISSUE_FIELDS,
+      self.config)
+    simplified_query_ast = ast2ast.PreprocessAST(
+      self.cnxn, query_ast, [789], self.services, self.config)
+    conj = simplified_query_ast.conjunctions[0]
+    self.mox.StubOutWithMock(tracker_fulltext, 'SearchIssueFullText')
+    tracker_fulltext.SearchIssueFullText(
+      [789], conj, 2).AndReturn((None, False))
+    self.mox.StubOutWithMock(self.services.issue, 'RunIssueQuery')
+    self.services.issue.RunIssueQuery(
+      self.cnxn, mox.IsA(list), mox.IsA(list), mox.IsA(list),
+      shard_id=2).AndReturn(([10002, 10052], False))
+    self.mox.ReplayAll()
+    result, capped, err = backendsearchpipeline.SearchProjectCan(
+      self.cnxn, self.services, [789], query_ast, 2, self.config)
+    self.mox.VerifyAll()
+    self.assertEqual([10002, 10052], result)
+    self.assertFalse(capped)
+    self.assertEqual(None, err)
+
+  def testSearchProjectCan_DBCapped(self):
+    query_ast = query2ast.ParseUserQuery(
+      'Priority:High', 'is:open', query2ast.BUILTIN_ISSUE_FIELDS,
+      self.config)
+    simplified_query_ast = ast2ast.PreprocessAST(
+      self.cnxn, query_ast, [789], self.services, self.config)
+    conj = simplified_query_ast.conjunctions[0]
+    self.mox.StubOutWithMock(tracker_fulltext, 'SearchIssueFullText')
+    tracker_fulltext.SearchIssueFullText(
+      [789], conj, 2).AndReturn((None, False))
+    self.mox.StubOutWithMock(self.services.issue, 'RunIssueQuery')
+    self.services.issue.RunIssueQuery(
+      self.cnxn, mox.IsA(list), mox.IsA(list), mox.IsA(list),
+      shard_id=2).AndReturn(([10002, 10052], True))
+    self.mox.ReplayAll()
+    result, capped, err = backendsearchpipeline.SearchProjectCan(
+      self.cnxn, self.services, [789], query_ast, 2, self.config)
+    self.mox.VerifyAll()
+    self.assertEqual([10002, 10052], result)
+    self.assertTrue(capped)
+    self.assertEqual(None, err)
+
+  def testSearchProjectCan_FTSCapped(self):
+    query_ast = query2ast.ParseUserQuery(
+      'Priority:High', 'is:open', query2ast.BUILTIN_ISSUE_FIELDS,
+      self.config)
+    simplified_query_ast = ast2ast.PreprocessAST(
+      self.cnxn, query_ast, [789], self.services, self.config)
+    conj = simplified_query_ast.conjunctions[0]
+    self.mox.StubOutWithMock(tracker_fulltext, 'SearchIssueFullText')
+    tracker_fulltext.SearchIssueFullText(
+      [789], conj, 2).AndReturn(([10002, 10052], True))
+    self.mox.StubOutWithMock(self.services.issue, 'RunIssueQuery')
+    self.services.issue.RunIssueQuery(
+      self.cnxn, mox.IsA(list), mox.IsA(list), mox.IsA(list),
+      shard_id=2).AndReturn(([10002, 10052], False))
+    self.mox.ReplayAll()
+    result, capped, err = backendsearchpipeline.SearchProjectCan(
+      self.cnxn, self.services, [789], query_ast, 2, self.config)
+    self.mox.VerifyAll()
+    self.assertEqual([10002, 10052], result)
+    self.assertTrue(capped)
+    self.assertEqual(None, err)
+
+  def testGetQueryResultIIDs(self):
+    sd = ['project', 'id']
+    slice_term = ('Issue.shard = %s', [2])
+    query_ast = query2ast.ParseUserQuery(
+      'Priority:High', 'is:open', query2ast.BUILTIN_ISSUE_FIELDS,
+      self.config)
+    query_ast = backendsearchpipeline._FilterSpam(query_ast)
+
+    self.mox.StubOutWithMock(backendsearchpipeline, 'SearchProjectCan')
+    backendsearchpipeline.SearchProjectCan(
+      self.cnxn, self.services, [789], query_ast, 2, self.config,
+      sort_directives=sd, where=[slice_term],
+      query_desc='getting query issue IDs'
+      ).AndReturn(([10002, 10052], False, None))
+    self.mox.ReplayAll()
+    result, capped, err = backendsearchpipeline._GetQueryResultIIDs(
+      self.cnxn, self.services, 'is:open', 'Priority:High',
+      [789], self.config, sd, slice_term, 2, 12345)
+    self.mox.VerifyAll()
+    self.assertEqual([10002, 10052], result)
+    self.assertFalse(capped)
+    self.assertEqual(None, err)
+    self.assertEqual(
+      ([10002, 10052], 12345),
+      memcache.get('789;is:open;Priority:High;project id;2'))
+
+  def testGetSpamQueryResultIIDs(self):
+    sd = ['project', 'id']
+    slice_term = ('Issue.shard = %s', [2])
+    query_ast = query2ast.ParseUserQuery(
+      'Priority:High is:spam', 'is:open', query2ast.BUILTIN_ISSUE_FIELDS,
+      self.config)
+
+    query_ast = backendsearchpipeline._FilterSpam(query_ast)
+
+    self.mox.StubOutWithMock(backendsearchpipeline, 'SearchProjectCan')
+    backendsearchpipeline.SearchProjectCan(
+      self.cnxn, self.services, [789], query_ast, 2, self.config,
+      sort_directives=sd, where=[slice_term],
+      query_desc='getting query issue IDs'
+      ).AndReturn(([10002, 10052], False, None))
+    self.mox.ReplayAll()
+    result, capped, err = backendsearchpipeline._GetQueryResultIIDs(
+      self.cnxn, self.services, 'is:open', 'Priority:High is:spam',
+      [789], self.config, sd, slice_term, 2, 12345)
+    self.mox.VerifyAll()
+    self.assertEqual([10002, 10052], result)
+    self.assertFalse(capped)
+    self.assertEqual(None, err)
+    self.assertEqual(
+      ([10002, 10052], 12345),
+      memcache.get('789;is:open;Priority:High is:spam;project id;2'))
diff --git a/search/test/frontendsearchpipeline_test.py b/search/test/frontendsearchpipeline_test.py
new file mode 100644
index 0000000..b2e7fb3
--- /dev/null
+++ b/search/test/frontendsearchpipeline_test.py
@@ -0,0 +1,1339 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Tests for the frontendsearchpipeline module."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import mox
+import unittest
+
+from google.appengine.api import memcache
+from google.appengine.api import modules
+from google.appengine.ext import testbed
+from google.appengine.api import urlfetch
+
+import settings
+from framework import framework_helpers
+from framework import sorting
+from framework import urls
+from proto import ast_pb2
+from proto import project_pb2
+from proto import tracker_pb2
+from search import frontendsearchpipeline
+from search import searchpipeline
+from search import query2ast
+from services import service_manager
+from testing import fake
+from testing import testing_helpers
+from tracker import tracker_bizobj
+
+
+# Just an example timestamp.  The value does not matter.
+NOW = 2444950132
+
+
+class FrontendSearchPipelineTest(unittest.TestCase):
+
+  def setUp(self):
+    self.config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)
+    self.services = service_manager.Services(
+        user=fake.UserService(),
+        project=fake.ProjectService(),
+        issue=fake.IssueService(),
+        config=fake.ConfigService(),
+        cache_manager=fake.CacheManager())
+    self.services.user.TestAddUser('a@example.com', 111)
+    self.project = self.services.project.TestAddProject('proj', project_id=789)
+    self.mr = testing_helpers.MakeMonorailRequest(
+      path='/p/proj/issues/list', project=self.project)
+    self.mr.me_user_id = 111
+
+    self.issue_1 = fake.MakeTestIssue(
+      789, 1, 'one', 'New', 111, labels=['Priority-High'])
+    self.services.issue.TestAddIssue(self.issue_1)
+    self.issue_2 = fake.MakeTestIssue(
+      789, 2, 'two', 'New', 111, labels=['Priority-Low'])
+    self.services.issue.TestAddIssue(self.issue_2)
+    self.issue_3 = fake.MakeTestIssue(
+      789, 3, 'three', 'New', 111, labels=['Priority-Medium'])
+    self.services.issue.TestAddIssue(self.issue_3)
+    self.mr.sort_spec = 'Priority'
+
+    self.cnxn = self.mr.cnxn
+    self.project = self.mr.project
+    self.auth = self.mr.auth
+    self.me_user_id = self.mr.me_user_id
+    self.query = self.mr.query
+    self.query_project_names = self.mr.query_project_names
+    self.items_per_page = self.mr.num # defaults to 100
+    self.paginate_start = self.mr.start
+    self.paginate_end = self.paginate_start + self.items_per_page
+    self.can = self.mr.can
+    self.group_by_spec = self.mr.group_by_spec
+    self.sort_spec = self.mr.sort_spec
+    self.warnings = self.mr.warnings
+    self.errors = self.mr.errors
+    self.use_cached_searches = self.mr.use_cached_searches
+    self.profiler = self.mr.profiler
+
+    self.mox = mox.Mox()
+    self.testbed = testbed.Testbed()
+    self.testbed.activate()
+    self.testbed.init_user_stub()
+    self.testbed.init_memcache_stub()
+    sorting.InitializeArtValues(self.services)
+
+  def tearDown(self):
+    self.testbed.deactivate()
+    self.mox.UnsetStubs()
+    self.mox.ResetAll()
+
+  def testSearchForIIDs_AllResultsCached_AllAtRiskCached(self):
+    unfiltered_iids = {(1, 'p:v'): [1001, 1011]}
+    nonviewable_iids = {1: set()}
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_StartBackendSearch')
+    frontendsearchpipeline._StartBackendSearch(
+        self.cnxn, ['proj'], [789], mox.IsA(tracker_pb2.ProjectIssueConfig),
+        unfiltered_iids, {}, nonviewable_iids, set(), self.services,
+        self.me_user_id, self.auth.user_id or 0, self.paginate_end,
+        self.query.split(' OR '), self.can, self.group_by_spec, self.sort_spec,
+        self.warnings, self.use_cached_searches).AndReturn([])
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_FinishBackendSearch')
+    frontendsearchpipeline._FinishBackendSearch([])
+    self.mox.ReplayAll()
+
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    pipeline.unfiltered_iids = unfiltered_iids
+    pipeline.nonviewable_iids = nonviewable_iids
+    pipeline.SearchForIIDs()
+    self.mox.VerifyAll()
+    self.assertEqual(2, pipeline.total_count)
+    self.assertEqual([1001, 1011], pipeline.filtered_iids[(1, 'p:v')])
+
+  def testSearchForIIDs_CrossProject_AllViewable(self):
+    self.services.project.TestAddProject('other', project_id=790)
+    unfiltered_iids = {(1, 'p:v'): [1001, 1011, 2001]}
+    nonviewable_iids = {1: set()}
+    self.query_project_names = ['other']
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_StartBackendSearch')
+    frontendsearchpipeline._StartBackendSearch(
+        self.cnxn, ['other', 'proj'], [789, 790],
+        mox.IsA(tracker_pb2.ProjectIssueConfig), unfiltered_iids, {},
+        nonviewable_iids, set(), self.services,
+        self.me_user_id, self.auth.user_id or 0, self.paginate_end,
+        self.query.split(' OR '), self.can, self.group_by_spec, self.sort_spec,
+        self.warnings, self.use_cached_searches).AndReturn([])
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_FinishBackendSearch')
+    frontendsearchpipeline._FinishBackendSearch([])
+    self.mox.ReplayAll()
+
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+
+    pipeline.unfiltered_iids = unfiltered_iids
+    pipeline.nonviewable_iids = nonviewable_iids
+    pipeline.SearchForIIDs()
+    self.mox.VerifyAll()
+    self.assertEqual(3, pipeline.total_count)
+    self.assertEqual([1001, 1011, 2001], pipeline.filtered_iids[(1, 'p:v')])
+
+  def testSearchForIIDs_CrossProject_MembersOnlyOmitted(self):
+    self.services.project.TestAddProject(
+        'other', project_id=790, access=project_pb2.ProjectAccess.MEMBERS_ONLY)
+    unfiltered_iids = {(1, 'p:v'): [1001, 1011]}
+    nonviewable_iids = {1: set()}
+    # project 'other' gets filtered out before the backend call.
+    self.mr.query_project_names = ['other']
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_StartBackendSearch')
+    frontendsearchpipeline._StartBackendSearch(
+        self.cnxn, ['proj'], [789], mox.IsA(tracker_pb2.ProjectIssueConfig),
+        unfiltered_iids, {}, nonviewable_iids, set(), self.services,
+        self.me_user_id, self.auth.user_id or 0, self.paginate_end,
+        self.query.split(' OR '), self.can, self.group_by_spec, self.sort_spec,
+        self.warnings, self.use_cached_searches).AndReturn([])
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_FinishBackendSearch')
+    frontendsearchpipeline._FinishBackendSearch([])
+    self.mox.ReplayAll()
+
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    pipeline.unfiltered_iids = unfiltered_iids
+    pipeline.nonviewable_iids = nonviewable_iids
+    pipeline.SearchForIIDs()
+    self.mox.VerifyAll()
+    self.assertEqual(2, pipeline.total_count)
+    self.assertEqual([1001, 1011], pipeline.filtered_iids[(1, 'p:v')])
+
+  def testMergeAndSortIssues_EmptyResult(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    pipeline.filtered_iids = {0: [], 1: [], 2: []}
+
+    pipeline.MergeAndSortIssues()
+    self.assertEqual([], pipeline.allowed_iids)
+    self.assertEqual([], pipeline.allowed_results)
+    self.assertEqual({}, pipeline.users_by_id)
+
+  def testMergeAndSortIssues_Normal(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    # In this unit test case we are not calling SearchForIIDs(), instead just
+    # set pipeline.filtered_iids directly.
+    pipeline.filtered_iids = {
+      0: [],
+      1: [self.issue_1.issue_id],
+      2: [self.issue_2.issue_id],
+      3: [self.issue_3.issue_id]
+      }
+
+    pipeline.MergeAndSortIssues()
+    self.assertEqual(
+      [self.issue_1.issue_id, self.issue_2.issue_id, self.issue_3.issue_id],
+      pipeline.allowed_iids)
+    self.assertEqual(
+      [self.issue_1, self.issue_3, self.issue_2],  # high, medium, low.
+      pipeline.allowed_results)
+    self.assertEqual([0, 111], list(pipeline.users_by_id.keys()))
+
+  def testDetermineIssuePosition_Normal(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    # In this unit test case we are not calling SearchForIIDs(), instead just
+    # set pipeline.filtered_iids directly.
+    pipeline.filtered_iids = {
+      0: [],
+      1: [self.issue_1.issue_id],
+      2: [self.issue_2.issue_id],
+      3: [self.issue_3.issue_id]
+      }
+
+    prev_iid, index, next_iid = pipeline.DetermineIssuePosition(self.issue_3)
+    # The total ordering is issue_1, issue_3, issue_2 for high, med, low.
+    self.assertEqual(self.issue_1.issue_id, prev_iid)
+    self.assertEqual(1, index)
+    self.assertEqual(self.issue_2.issue_id, next_iid)
+
+  def testDetermineIssuePosition_NotInResults(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    # In this unit test case we are not calling SearchForIIDs(), instead just
+    # set pipeline.filtered_iids directly.
+    pipeline.filtered_iids = {
+      0: [],
+      1: [self.issue_1.issue_id],
+      2: [self.issue_2.issue_id],
+      3: []
+      }
+
+    prev_iid, index, next_iid = pipeline.DetermineIssuePosition(self.issue_3)
+    # The total ordering is issue_1, issue_3, issue_2 for high, med, low.
+    self.assertEqual(None, prev_iid)
+    self.assertEqual(None, index)
+    self.assertEqual(None, next_iid)
+
+  def testDetermineIssuePositionInShard_IssueIsInShard(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    # Let's assume issues 1, 2, and 3 are all in the same shard.
+    pipeline.filtered_iids = {
+      0: [self.issue_1.issue_id, self.issue_2.issue_id, self.issue_3.issue_id],
+      }
+
+    # The total ordering is issue_1, issue_3, issue_2 for high, med, low.
+    prev_cand, index, next_cand = pipeline._DetermineIssuePositionInShard(
+      0, self.issue_1, {})
+    self.assertEqual(None, prev_cand)
+    self.assertEqual(0, index)
+    self.assertEqual(self.issue_3, next_cand)
+
+    prev_cand, index, next_cand = pipeline._DetermineIssuePositionInShard(
+      0, self.issue_3, {})
+    self.assertEqual(self.issue_1, prev_cand)
+    self.assertEqual(1, index)
+    self.assertEqual(self.issue_2, next_cand)
+
+    prev_cand, index, next_cand = pipeline._DetermineIssuePositionInShard(
+      0, self.issue_2, {})
+    self.assertEqual(self.issue_3, prev_cand)
+    self.assertEqual(2, index)
+    self.assertEqual(None, next_cand)
+
+  def testDetermineIssuePositionInShard_IssueIsNotInShard(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+
+    # The total ordering is issue_1, issue_3, issue_2 for high, med, low.
+    pipeline.filtered_iids = {
+      0: [self.issue_2.issue_id, self.issue_3.issue_id],
+      }
+    prev_cand, index, next_cand = pipeline._DetermineIssuePositionInShard(
+      0, self.issue_1, {})
+    self.assertEqual(None, prev_cand)
+    self.assertEqual(0, index)
+    self.assertEqual(self.issue_3, next_cand)
+
+    pipeline.filtered_iids = {
+      0: [self.issue_1.issue_id, self.issue_2.issue_id],
+      }
+    prev_cand, index, next_cand = pipeline._DetermineIssuePositionInShard(
+      0, self.issue_3, {})
+    self.assertEqual(self.issue_1, prev_cand)
+    self.assertEqual(1, index)
+    self.assertEqual(self.issue_2, next_cand)
+
+    pipeline.filtered_iids = {
+      0: [self.issue_1.issue_id, self.issue_3.issue_id],
+      }
+    prev_cand, index, next_cand = pipeline._DetermineIssuePositionInShard(
+      0, self.issue_2, {})
+    self.assertEqual(self.issue_3, prev_cand)
+    self.assertEqual(2, index)
+    self.assertEqual(None, next_cand)
+
+  def testFetchAllSamples_Empty(self):
+    filtered_iids = {}
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    samples_by_shard, sample_iids_to_shard = pipeline._FetchAllSamples(
+        filtered_iids)
+    self.assertEqual({}, samples_by_shard)
+    self.assertEqual({}, sample_iids_to_shard)
+
+  def testFetchAllSamples_SmallResultsPerShard(self):
+    filtered_iids = {
+        0: [100, 110, 120],
+        1: [101, 111, 121],
+        }
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+
+    samples_by_shard, sample_iids_to_shard = pipeline._FetchAllSamples(
+        filtered_iids)
+    self.assertEqual(2, len(samples_by_shard))
+    self.assertEqual(0, len(sample_iids_to_shard))
+
+  def testFetchAllSamples_Normal(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    issues = self.MakeIssues(23)
+    filtered_iids = {
+        0: [issue.issue_id for issue in issues],
+        }
+
+    samples_by_shard, sample_iids_to_shard = pipeline._FetchAllSamples(
+        filtered_iids)
+    self.assertEqual(1, len(samples_by_shard))
+    self.assertEqual(2, len(samples_by_shard[0]))
+    self.assertEqual(2, len(sample_iids_to_shard))
+    for sample_iid in sample_iids_to_shard:
+      shard_key = sample_iids_to_shard[sample_iid]
+      self.assertIn(sample_iid, filtered_iids[shard_key])
+
+  def testChooseSampleIssues_Empty(self):
+    """When the search gave no results, there cannot be any samples."""
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    issue_ids = []
+    on_hand_issues, needed_iids = pipeline._ChooseSampleIssues(issue_ids)
+    self.assertEqual({}, on_hand_issues)
+    self.assertEqual([], needed_iids)
+
+  def testChooseSampleIssues_Small(self):
+    """When the search gave few results, don't bother with samples."""
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    issue_ids = [78901, 78902]
+    on_hand_issues, needed_iids = pipeline._ChooseSampleIssues(issue_ids)
+    self.assertEqual({}, on_hand_issues)
+    self.assertEqual([], needed_iids)
+
+  def MakeIssues(self, num_issues):
+    issues = []
+    for i in range(num_issues):
+      issue = fake.MakeTestIssue(789, 100 + i, 'samp test', 'New', 111)
+      issues.append(issue)
+      self.services.issue.TestAddIssue(issue)
+    return issues
+
+  def testChooseSampleIssues_Normal(self):
+    """We will choose at least one sample for every 10 results in a shard."""
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    issues = self.MakeIssues(23)
+    issue_ids = [issue.issue_id for issue in issues]
+    on_hand_issues, needed_iids = pipeline._ChooseSampleIssues(issue_ids)
+    self.assertEqual({}, on_hand_issues)
+    self.assertEqual(2, len(needed_iids))
+    for sample_iid in needed_iids:
+      self.assertIn(sample_iid, issue_ids)
+
+  def testLookupNeededUsers(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+
+    pipeline._LookupNeededUsers([])
+    self.assertEqual([], list(pipeline.users_by_id.keys()))
+
+    pipeline._LookupNeededUsers([self.issue_1, self.issue_2, self.issue_3])
+    self.assertEqual([0, 111], list(pipeline.users_by_id.keys()))
+
+  def testPaginate_List(self):
+    pipeline = frontendsearchpipeline.FrontendSearchPipeline(
+        self.cnxn,
+        self.services,
+        self.auth,
+        self.me_user_id,
+        self.query,
+        self.query_project_names,
+        self.items_per_page,
+        self.paginate_start,
+        self.can,
+        self.group_by_spec,
+        self.sort_spec,
+        self.warnings,
+        self.errors,
+        self.use_cached_searches,
+        self.profiler,
+        project=self.project)
+    pipeline.allowed_iids = [
+      self.issue_1.issue_id, self.issue_2.issue_id, self.issue_3.issue_id]
+    pipeline.allowed_results = [self.issue_1, self.issue_2, self.issue_3]
+    pipeline.total_count = len(pipeline.allowed_results)
+    pipeline.Paginate()
+    self.assertEqual(
+      [self.issue_1, self.issue_2, self.issue_3],
+      pipeline.visible_results)
+    self.assertFalse(pipeline.pagination.limit_reached)
+
+
+class FrontendSearchPipelineMethodsTest(unittest.TestCase):
+
+  def setUp(self):
+    self.mox = mox.Mox()
+    self.testbed = testbed.Testbed()
+    self.testbed.activate()
+    self.testbed.init_user_stub()
+    self.testbed.init_memcache_stub()
+
+    self.project_id = 789
+    self.default_config = tracker_bizobj.MakeDefaultProjectIssueConfig(
+        self.project_id)
+    self.services = service_manager.Services(
+        project=fake.ProjectService())
+    self.project = self.services.project.TestAddProject(
+        'proj', project_id=self.project_id)
+
+  def tearDown(self):
+    self.testbed.deactivate()
+    self.mox.UnsetStubs()
+    self.mox.ResetAll()
+
+  def testMakeBackendCallback(self):
+    called_with = []
+
+    def func(a, b):
+      called_with.append((a, b))
+
+    callback = frontendsearchpipeline._MakeBackendCallback(func, 10, 20)
+    callback()
+    self.assertEqual([(10, 20)], called_with)
+
+  def testParseUserQuery_CheckQuery(self):
+    warnings = []
+    msg = frontendsearchpipeline._CheckQuery(
+        'cnxn', self.services, 'ok query', self.default_config,
+        [self.project_id], True, warnings=warnings)
+    self.assertIsNone(msg)
+    self.assertEqual([], warnings)
+
+    warnings = []
+    msg = frontendsearchpipeline._CheckQuery(
+        'cnxn', self.services, 'modified:0-0-0', self.default_config,
+        [self.project_id], True, warnings=warnings)
+    self.assertEqual(
+        'Could not parse date: 0-0-0',
+        msg)
+
+    warnings = []
+    msg = frontendsearchpipeline._CheckQuery(
+        'cnxn', self.services, 'blocking:3.14', self.default_config,
+        [self.project_id], True, warnings=warnings)
+    self.assertEqual(
+        'Could not parse issue reference: 3.14',
+        msg)
+    self.assertEqual([], warnings)
+
+  def testStartBackendSearch(self):
+    # TODO(jrobbins): write this test.
+    pass
+
+  def testFinishBackendSearch(self):
+    # TODO(jrobbins): write this test.
+    pass
+
+  def testGetProjectTimestamps_NoneSet(self):
+    project_shard_timestamps = frontendsearchpipeline._GetProjectTimestamps(
+      [], [])
+    self.assertEqual({}, project_shard_timestamps)
+
+    project_shard_timestamps = frontendsearchpipeline._GetProjectTimestamps(
+      [], [(0, (0, 'p:v')), (1, (1, 'p:v')), (2, (2, 'p:v'))])
+    self.assertEqual({}, project_shard_timestamps)
+
+    project_shard_timestamps = frontendsearchpipeline._GetProjectTimestamps(
+      [789], [(0, (0, 'p:v')), (1, (1, 'p:v')), (2, (2, 'p:v'))])
+    self.assertEqual({}, project_shard_timestamps)
+
+  def testGetProjectTimestamps_SpecificProjects(self):
+    memcache.set('789;0', NOW)
+    memcache.set('789;1', NOW - 1000)
+    memcache.set('789;2', NOW - 3000)
+    project_shard_timestamps = frontendsearchpipeline._GetProjectTimestamps(
+      [789], [(0, (0, 'p:v')), (1, (1, 'p:v')), (2, (2, 'p:v'))])
+    self.assertEqual(
+      { (789, 0): NOW,
+        (789, 1): NOW - 1000,
+        (789, 2): NOW - 3000,
+        },
+      project_shard_timestamps)
+
+    memcache.set('790;0', NOW)
+    memcache.set('790;1', NOW - 10000)
+    memcache.set('790;2', NOW - 30000)
+    project_shard_timestamps = frontendsearchpipeline._GetProjectTimestamps(
+      [789, 790], [(0, (0, 'p:v')), (1, (1, 'p:v')), (2, (2, 'p:v'))])
+    self.assertEqual(
+      { (789, 0): NOW,
+        (789, 1): NOW - 1000,
+        (789, 2): NOW - 3000,
+        (790, 0): NOW,
+        (790, 1): NOW - 10000,
+        (790, 2): NOW - 30000,
+        },
+      project_shard_timestamps)
+
+  def testGetProjectTimestamps_SiteWide(self):
+    memcache.set('all;0', NOW)
+    memcache.set('all;1', NOW - 10000)
+    memcache.set('all;2', NOW - 30000)
+    project_shard_timestamps = frontendsearchpipeline._GetProjectTimestamps(
+      [], [(0, (0, 'p:v')), (1, (1, 'p:v')), (2, (2, 'p:v'))])
+    self.assertEqual(
+      { ('all', 0): NOW,
+        ('all', 1): NOW - 10000,
+        ('all', 2): NOW - 30000,
+        },
+      project_shard_timestamps)
+
+  def testGetNonviewableIIDs_SearchMissSoNoOp(self):
+    """If search cache missed, don't bother looking up nonviewable IIDs."""
+    unfiltered_iids_dict = {}  # No cached search results found.
+    rpc_tuples = []  # Nothing should accumulate here in this case.
+    nonviewable_iids = {}  # Nothing should accumulate here in this case.
+    processed_invalidations_up_to = 12345
+    frontendsearchpipeline._GetNonviewableIIDs(
+        [789], 111, list(unfiltered_iids_dict.keys()), rpc_tuples,
+        nonviewable_iids, {}, processed_invalidations_up_to, True)
+    self.assertEqual([], rpc_tuples)
+    self.assertEqual({}, nonviewable_iids)
+
+  def testGetNonviewableIIDs_SearchHitThenNonviewableHit(self):
+    """If search cache hit, get nonviewable info from cache."""
+    unfiltered_iids_dict = {
+      1: [10001, 10021],
+      2: ['the search result issue_ids do not matter'],
+      }
+    rpc_tuples = []  # Nothing should accumulate here in this case.
+    nonviewable_iids = {}  # Our mock results should end up here.
+    processed_invalidations_up_to = 12345
+    memcache.set('nonviewable:789;111;1',
+                 ([10001, 10031], processed_invalidations_up_to - 10))
+    memcache.set('nonviewable:789;111;2',
+                 ([10002, 10042], processed_invalidations_up_to - 30))
+
+    project_shard_timestamps = {
+      (789, 1): 0,  # not stale
+      (789, 2): 0,  # not stale
+      }
+    frontendsearchpipeline._GetNonviewableIIDs(
+        [789], 111, list(unfiltered_iids_dict.keys()), rpc_tuples,
+        nonviewable_iids, project_shard_timestamps,
+        processed_invalidations_up_to, True)
+    self.assertEqual([], rpc_tuples)
+    self.assertEqual({1: {10001, 10031}, 2: {10002, 10042}}, nonviewable_iids)
+
+  def testGetNonviewableIIDs_SearchHitNonviewableMissSoStartRPC(self):
+    """If search hit and n-v miss, create RPCs to get nonviewable info."""
+    self.mox.StubOutWithMock(
+        frontendsearchpipeline, '_StartBackendNonviewableCall')
+    unfiltered_iids_dict = {
+      2: ['the search result issue_ids do not matter'],
+      }
+    rpc_tuples = []  # One RPC object should accumulate here.
+    nonviewable_iids = {}  # This will stay empty until RPCs complete.
+    processed_invalidations_up_to = 12345
+    # Nothing is set in memcache for this case.
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    frontendsearchpipeline._StartBackendNonviewableCall(
+      789, 111, 2, processed_invalidations_up_to).AndReturn(a_fake_rpc)
+    self.mox.ReplayAll()
+
+    frontendsearchpipeline._GetNonviewableIIDs(
+        [789], 111, list(unfiltered_iids_dict.keys()), rpc_tuples,
+        nonviewable_iids, {}, processed_invalidations_up_to, True)
+    self.mox.VerifyAll()
+    _, sid_0, rpc_0 = rpc_tuples[0]
+    self.assertEqual(2, sid_0)
+    self.assertEqual({}, nonviewable_iids)
+    self.assertEqual(a_fake_rpc, rpc_0)
+    self.assertIsNotNone(a_fake_rpc.callback)
+
+  def testAccumulateNonviewableIIDs_MemcacheHitForProject(self):
+    processed_invalidations_up_to = 12345
+    cached_dict = {
+      '789;111;2': ([10002, 10042], processed_invalidations_up_to - 10),
+      '789;111;3': ([10003, 10093], processed_invalidations_up_to - 30),
+      }
+    rpc_tuples = []  # Nothing should accumulate here.
+    nonviewable_iids = {1: {10001}}  # This will gain the shard 2 values.
+    project_shard_timestamps = {
+      (789, 1): 0,  # not stale
+      (789, 2): 0,  # not stale
+      }
+    frontendsearchpipeline._AccumulateNonviewableIIDs(
+      789, 111, 2, cached_dict, nonviewable_iids, project_shard_timestamps,
+      rpc_tuples, processed_invalidations_up_to)
+    self.assertEqual([], rpc_tuples)
+    self.assertEqual({1: {10001}, 2: {10002, 10042}}, nonviewable_iids)
+
+  def testAccumulateNonviewableIIDs_MemcacheStaleForProject(self):
+    self.mox.StubOutWithMock(
+      frontendsearchpipeline, '_StartBackendNonviewableCall')
+    processed_invalidations_up_to = 12345
+    cached_dict = {
+      '789;111;2': ([10002, 10042], processed_invalidations_up_to - 10),
+      '789;111;3': ([10003, 10093], processed_invalidations_up_to - 30),
+      }
+    rpc_tuples = []  # Nothing should accumulate here.
+    nonviewable_iids = {1: {10001}}  # Nothing added here until RPC completes
+    project_shard_timestamps = {
+      (789, 1): 0,  # not stale
+      (789, 2): processed_invalidations_up_to,  # stale!
+      }
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    frontendsearchpipeline._StartBackendNonviewableCall(
+      789, 111, 2, processed_invalidations_up_to).AndReturn(a_fake_rpc)
+    self.mox.ReplayAll()
+
+    frontendsearchpipeline._AccumulateNonviewableIIDs(
+      789, 111, 2, cached_dict, nonviewable_iids, project_shard_timestamps,
+      rpc_tuples, processed_invalidations_up_to)
+    self.mox.VerifyAll()
+    _, sid_0, rpc_0 = rpc_tuples[0]
+    self.assertEqual(2, sid_0)
+    self.assertEqual(a_fake_rpc, rpc_0)
+    self.assertIsNotNone(a_fake_rpc.callback)
+    self.assertEqual({1: {10001}}, nonviewable_iids)
+
+  def testAccumulateNonviewableIIDs_MemcacheHitForWholeSite(self):
+    processed_invalidations_up_to = 12345
+    cached_dict = {
+      'all;111;2': ([10002, 10042], processed_invalidations_up_to - 10),
+      'all;111;3': ([10003, 10093], processed_invalidations_up_to - 30),
+      }
+    rpc_tuples = []  # Nothing should accumulate here.
+    nonviewable_iids = {1: {10001}}  # This will gain the shard 2 values.
+    project_shard_timestamps = {
+      (None, 1): 0,  # not stale
+      (None, 2): 0,  # not stale
+      }
+    frontendsearchpipeline._AccumulateNonviewableIIDs(
+      None, 111, 2, cached_dict, nonviewable_iids, project_shard_timestamps,
+      rpc_tuples, processed_invalidations_up_to)
+    self.assertEqual([], rpc_tuples)
+    self.assertEqual({1: {10001}, 2: {10002, 10042}}, nonviewable_iids)
+
+  def testAccumulateNonviewableIIDs_MemcacheMissSoStartRPC(self):
+    self.mox.StubOutWithMock(
+        frontendsearchpipeline, '_StartBackendNonviewableCall')
+    cached_dict = {}  # Nothing here, so it is an at-risk cache miss.
+    rpc_tuples = []  # One RPC should accumulate here.
+    nonviewable_iids = {1: {10001}}  # Nothing added here until RPC completes.
+    processed_invalidations_up_to = 12345
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    frontendsearchpipeline._StartBackendNonviewableCall(
+      789, 111, 2, processed_invalidations_up_to).AndReturn(a_fake_rpc)
+    self.mox.ReplayAll()
+
+    frontendsearchpipeline._AccumulateNonviewableIIDs(
+      789, 111, 2, cached_dict, nonviewable_iids, {}, rpc_tuples,
+      processed_invalidations_up_to)
+    self.mox.VerifyAll()
+    _, sid_0, rpc_0 = rpc_tuples[0]
+    self.assertEqual(2, sid_0)
+    self.assertEqual(a_fake_rpc, rpc_0)
+    self.assertIsNotNone(a_fake_rpc.callback)
+    self.assertEqual({1: {10001}}, nonviewable_iids)
+
+  def testGetCachedSearchResults(self):
+    # TODO(jrobbins): Write this test.
+    pass
+
+  def testMakeBackendRequestHeaders(self):
+    headers = frontendsearchpipeline._MakeBackendRequestHeaders(False)
+    self.assertNotIn('X-AppEngine-FailFast', headers)
+    headers = frontendsearchpipeline._MakeBackendRequestHeaders(True)
+    self.assertEqual('Yes', headers['X-AppEngine-FailFast'])
+
+  def testStartBackendSearchCall(self):
+    self.mox.StubOutWithMock(urlfetch, 'create_rpc')
+    self.mox.StubOutWithMock(urlfetch, 'make_fetch_call')
+    self.mox.StubOutWithMock(modules, 'get_hostname')
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    urlfetch.create_rpc(deadline=settings.backend_deadline).AndReturn(
+      a_fake_rpc)
+    modules.get_hostname(module='besearch')
+    urlfetch.make_fetch_call(
+      a_fake_rpc, mox.StrContains(
+          urls.BACKEND_SEARCH + '?groupby=cc&invalidation_timestep=12345&'
+          +'logged_in_user_id=777&me_user_ids=555&'
+          +'num=201&projects=proj&q=priority%3Dhigh&shard_id=2&start=0'),
+          follow_redirects=False,
+      headers=mox.IsA(dict))
+    self.mox.ReplayAll()
+
+    processed_invalidations_up_to = 12345
+    me_user_ids = [555]
+    logged_in_user_id = 777
+    new_url_num = 201
+    frontendsearchpipeline._StartBackendSearchCall(
+        ['proj'], (2, 'priority=high'),
+        processed_invalidations_up_to,
+        me_user_ids,
+        logged_in_user_id,
+        new_url_num,
+        group_by_spec='cc')
+    self.mox.VerifyAll()
+
+  def testStartBackendSearchCall_SortAndGroup(self):
+    self.mox.StubOutWithMock(urlfetch, 'create_rpc')
+    self.mox.StubOutWithMock(urlfetch, 'make_fetch_call')
+    self.mox.StubOutWithMock(modules, 'get_hostname')
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    urlfetch.create_rpc(deadline=settings.backend_deadline).AndReturn(
+      a_fake_rpc)
+    modules.get_hostname(module='besearch')
+    urlfetch.make_fetch_call(
+        a_fake_rpc,
+        mox.StrContains(
+            urls.BACKEND_SEARCH + '?groupby=bar&' +
+            'invalidation_timestep=12345&' +
+            'logged_in_user_id=777&me_user_ids=555&num=201&projects=proj&' +
+            'q=priority%3Dhigh&shard_id=2&sort=foo&start=0'),
+        follow_redirects=False,
+        headers=mox.IsA(dict))
+    self.mox.ReplayAll()
+
+    processed_invalidations_up_to = 12345
+    me_user_ids = [555]
+    logged_in_user_id = 777
+    new_url_num = 201
+    sort_spec = 'foo'
+    group_by_spec = 'bar'
+    frontendsearchpipeline._StartBackendSearchCall(
+        ['proj'], (2, 'priority=high'),
+        processed_invalidations_up_to,
+        me_user_ids,
+        logged_in_user_id,
+        new_url_num,
+        sort_spec=sort_spec,
+        group_by_spec=group_by_spec)
+    self.mox.VerifyAll()
+
+  def testStartBackendNonviewableCall(self):
+    self.mox.StubOutWithMock(urlfetch, 'create_rpc')
+    self.mox.StubOutWithMock(urlfetch, 'make_fetch_call')
+    self.mox.StubOutWithMock(modules, 'get_hostname')
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    urlfetch.create_rpc(deadline=settings.backend_deadline).AndReturn(
+      a_fake_rpc)
+    modules.get_hostname(module='besearch')
+    urlfetch.make_fetch_call(
+      a_fake_rpc, mox.StrContains(urls.BACKEND_NONVIEWABLE),
+      follow_redirects=False, headers=mox.IsA(dict))
+    self.mox.ReplayAll()
+
+    processed_invalidations_up_to = 12345
+    frontendsearchpipeline._StartBackendNonviewableCall(
+      789, 111, 2, processed_invalidations_up_to)
+    self.mox.VerifyAll()
+
+  def testHandleBackendSearchResponse_500(self):
+    response_str = 'There was a problem processing the query.'
+    rpc = testing_helpers.Blank(
+      get_result=lambda: testing_helpers.Blank(
+          content=response_str, status_code=500))
+    rpc_tuple = (NOW, 2, rpc)
+    rpc_tuples = []  # Nothing should be added for this case.
+    filtered_iids = {}  # Search results should accumlate here, per-shard.
+    search_limit_reached = {}  # Booleans accumulate here, per-shard.
+    processed_invalidations_up_to = 12345
+
+    me_user_ids = [111]
+    logged_in_user_id = 0
+    new_url_num = 100
+    error_responses = set()
+
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_StartBackendSearchCall')
+    frontendsearchpipeline._HandleBackendSearchResponse(
+        ['proj'], rpc_tuple, rpc_tuples, 0, filtered_iids, search_limit_reached,
+        processed_invalidations_up_to, error_responses, me_user_ids,
+        logged_in_user_id, new_url_num, 1, None, None)
+    self.assertEqual([], rpc_tuples)
+    self.assertIn(2, error_responses)
+
+  def testHandleBackendSearchResponse_Error(self):
+    response_str = (
+      '})]\'\n'
+      '{'
+      ' "unfiltered_iids": [],'
+      ' "search_limit_reached": false,'
+      ' "error": "Invalid query"'
+      '}'
+      )
+    rpc = testing_helpers.Blank(
+      get_result=lambda: testing_helpers.Blank(
+          content=response_str, status_code=200))
+    rpc_tuple = (NOW, 2, rpc)
+    rpc_tuples = []  # Nothing should be added for this case.
+    filtered_iids = {}  # Search results should accumlate here, per-shard.
+    search_limit_reached = {}  # Booleans accumulate here, per-shard.
+    processed_invalidations_up_to = 12345
+
+    me_user_ids = [111]
+    logged_in_user_id = 0
+    new_url_num = 100
+    error_responses = set()
+    frontendsearchpipeline._HandleBackendSearchResponse(
+        ['proj'], rpc_tuple, rpc_tuples, 2, filtered_iids, search_limit_reached,
+        processed_invalidations_up_to, error_responses, me_user_ids,
+        logged_in_user_id, new_url_num, 1, None, None)
+    self.assertEqual([], rpc_tuples)
+    self.assertEqual({2: []}, filtered_iids)
+    self.assertEqual({2: False}, search_limit_reached)
+    self.assertEqual({2}, error_responses)
+
+  def testHandleBackendSearchResponse_Normal(self):
+    response_str = (
+      '})]\'\n'
+      '{'
+      ' "unfiltered_iids": [10002, 10042],'
+      ' "search_limit_reached": false'
+      '}'
+      )
+    rpc = testing_helpers.Blank(
+      get_result=lambda: testing_helpers.Blank(
+          content=response_str, status_code=200))
+    rpc_tuple = (NOW, 2, rpc)
+    rpc_tuples = []  # Nothing should be added for this case.
+    filtered_iids = {}  # Search results should accumlate here, per-shard.
+    search_limit_reached = {}  # Booleans accumulate here, per-shard.
+    processed_invalidations_up_to = 12345
+
+    me_user_ids = [111]
+    logged_in_user_id = 0
+    new_url_num = 100
+    error_responses = set()
+    frontendsearchpipeline._HandleBackendSearchResponse(
+        ['proj'], rpc_tuple, rpc_tuples, 2, filtered_iids, search_limit_reached,
+        processed_invalidations_up_to, error_responses, me_user_ids,
+        logged_in_user_id, new_url_num, 1, None, None)
+    self.assertEqual([], rpc_tuples)
+    self.assertEqual({2: [10002, 10042]}, filtered_iids)
+    self.assertEqual({2: False}, search_limit_reached)
+
+  def testHandleBackendSearchResponse_TriggersRetry(self):
+    response_str = None
+    rpc = testing_helpers.Blank(
+      get_result=lambda: testing_helpers.Blank(content=response_str))
+    rpc_tuple = (NOW, 2, rpc)
+    rpc_tuples = []  # New RPC should be appended here
+    filtered_iids = {}  # No change here until retry completes.
+    search_limit_reached = {}  # No change here until retry completes.
+    processed_invalidations_up_to = 12345
+    error_responses = set()
+
+    me_user_ids = [111]
+    logged_in_user_id = 0
+    new_url_num = 100
+
+    self.mox.StubOutWithMock(frontendsearchpipeline, '_StartBackendSearchCall')
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    rpc = frontendsearchpipeline._StartBackendSearchCall(
+        ['proj'],
+        2,
+        processed_invalidations_up_to,
+        me_user_ids,
+        logged_in_user_id,
+        new_url_num,
+        can=1,
+        group_by_spec=None,
+        sort_spec=None,
+        failfast=False).AndReturn(a_fake_rpc)
+    self.mox.ReplayAll()
+
+    frontendsearchpipeline._HandleBackendSearchResponse(
+        ['proj'], rpc_tuple, rpc_tuples, 2, filtered_iids, search_limit_reached,
+        processed_invalidations_up_to, error_responses, me_user_ids,
+        logged_in_user_id, new_url_num, 1, None, None)
+    self.mox.VerifyAll()
+    _, retry_shard_id, retry_rpc = rpc_tuples[0]
+    self.assertEqual(2, retry_shard_id)
+    self.assertEqual(a_fake_rpc, retry_rpc)
+    self.assertIsNotNone(retry_rpc.callback)
+    self.assertEqual({}, filtered_iids)
+    self.assertEqual({}, search_limit_reached)
+
+  def testHandleBackendNonviewableResponse_Error(self):
+    response_str = 'There was an error.'
+    rpc = testing_helpers.Blank(
+      get_result=lambda: testing_helpers.Blank(
+          content=response_str,
+          status_code=500
+      ))
+    rpc_tuple = (NOW, 2, rpc)
+    rpc_tuples = []  # Nothing should be added for this case.
+    nonviewable_iids = {}  # At-risk issue IDs should accumlate here, per-shard.
+    processed_invalidations_up_to = 12345
+
+    self.mox.StubOutWithMock(
+        frontendsearchpipeline, '_StartBackendNonviewableCall')
+    frontendsearchpipeline._HandleBackendNonviewableResponse(
+      789, 111, 2, rpc_tuple, rpc_tuples, 0, nonviewable_iids,
+      processed_invalidations_up_to)
+    self.assertEqual([], rpc_tuples)
+    self.assertNotEqual({2: {10002, 10042}}, nonviewable_iids)
+
+  def testHandleBackendNonviewableResponse_Normal(self):
+    response_str = (
+      '})]\'\n'
+      '{'
+      ' "nonviewable": [10002, 10042]'
+      '}'
+      )
+    rpc = testing_helpers.Blank(
+      get_result=lambda: testing_helpers.Blank(
+          content=response_str,
+          status_code=200
+      ))
+    rpc_tuple = (NOW, 2, rpc)
+    rpc_tuples = []  # Nothing should be added for this case.
+    nonviewable_iids = {}  # At-risk issue IDs should accumlate here, per-shard.
+    processed_invalidations_up_to = 12345
+
+    frontendsearchpipeline._HandleBackendNonviewableResponse(
+      789, 111, 2, rpc_tuple, rpc_tuples, 2, nonviewable_iids,
+      processed_invalidations_up_to)
+    self.assertEqual([], rpc_tuples)
+    self.assertEqual({2: {10002, 10042}}, nonviewable_iids)
+
+  def testHandleBackendAtRiskResponse_TriggersRetry(self):
+    response_str = None
+    rpc = testing_helpers.Blank(
+      get_result=lambda: testing_helpers.Blank(content=response_str))
+    rpc_tuple = (NOW, 2, rpc)
+    rpc_tuples = []  # New RPC should be appended here
+    nonviewable_iids = {}  # No change here until retry completes.
+    processed_invalidations_up_to = 12345
+
+    self.mox.StubOutWithMock(
+      frontendsearchpipeline, '_StartBackendNonviewableCall')
+    a_fake_rpc = testing_helpers.Blank(callback=None)
+    rpc = frontendsearchpipeline._StartBackendNonviewableCall(
+      789, 111, 2, processed_invalidations_up_to, failfast=False
+      ).AndReturn(a_fake_rpc)
+    self.mox.ReplayAll()
+
+    frontendsearchpipeline._HandleBackendNonviewableResponse(
+      789, 111, 2, rpc_tuple, rpc_tuples, 2, nonviewable_iids,
+      processed_invalidations_up_to)
+    self.mox.VerifyAll()
+    _, retry_shard_id, retry_rpc = rpc_tuples[0]
+    self.assertEqual(2, retry_shard_id)
+    self.assertIsNotNone(retry_rpc.callback)
+    self.assertEqual(a_fake_rpc, retry_rpc)
+    self.assertEqual({}, nonviewable_iids)
+
+  def testSortIssues(self):
+    services = service_manager.Services(
+        cache_manager=fake.CacheManager())
+    sorting.InitializeArtValues(services)
+
+    issue_1 = fake.MakeTestIssue(
+      789, 1, 'one', 'New', 111, labels=['Priority-High'])
+    issue_2 = fake.MakeTestIssue(
+      789, 2, 'two', 'New', 111, labels=['Priority-Low'])
+    issue_3 = fake.MakeTestIssue(
+      789, 3, 'three', 'New', 111, labels=['Priority-Medium'])
+    issues = [issue_1, issue_2, issue_3]
+    config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)
+
+    sorted_issues = frontendsearchpipeline._SortIssues(
+        issues, config, {}, '', 'priority')
+
+    self.assertEqual(
+      [issue_1, issue_3, issue_2],  # Order is high, medium, low.
+      sorted_issues)
+
+
+class FrontendSearchPipelineShardMethodsTest(unittest.TestCase):
+
+  def setUp(self):
+    self.sharded_iids = {
+      (0, 'p:v'): [10, 20, 30, 40, 50],
+      (1, 'p:v'): [21, 41, 61, 81],
+      (2, 'p:v'): [42, 52, 62, 72, 102],
+      (3, 'p:v'): [],
+      }
+
+  def testTotalLength_Empty(self):
+    """If there were no results, the length of the sharded list is zero."""
+    self.assertEqual(0, frontendsearchpipeline._TotalLength({}))
+
+  def testTotalLength_Normal(self):
+    """The length of the sharded list is the sum of the shard lengths."""
+    self.assertEqual(
+        14, frontendsearchpipeline._TotalLength(self.sharded_iids))
+
+  def testReverseShards_Empty(self):
+    """Reversing an empty sharded list is still empty."""
+    empty_sharded_iids = {}
+    frontendsearchpipeline._ReverseShards(empty_sharded_iids)
+    self.assertEqual({}, empty_sharded_iids)
+
+  def testReverseShards_Normal(self):
+    """Reversing a sharded list reverses each shard."""
+    frontendsearchpipeline._ReverseShards(self.sharded_iids)
+    self.assertEqual(
+        {(0, 'p:v'): [50, 40, 30, 20, 10],
+         (1, 'p:v'): [81, 61, 41, 21],
+         (2, 'p:v'): [102, 72, 62, 52, 42],
+         (3, 'p:v'): [],
+         },
+        self.sharded_iids)
+
+  def testTrimShardedIIDs_Empty(self):
+    """If the sharded list is empty, trimming it makes no change."""
+    empty_sharded_iids = {}
+    frontendsearchpipeline._TrimEndShardedIIDs(empty_sharded_iids, [], 12)
+    self.assertEqual({}, empty_sharded_iids)
+
+    frontendsearchpipeline._TrimEndShardedIIDs(
+        empty_sharded_iids,
+        [(100, (0, 'p:v')), (88, (8, 'p:v')), (99, (9, 'p:v'))],
+        12)
+    self.assertEqual({}, empty_sharded_iids)
+
+  def testTrimShardedIIDs_NoSamples(self):
+    """If there are no samples, we don't trim off any IIDs."""
+    orig_sharded_iids = {
+      shard_id: iids[:] for shard_id, iids in self.sharded_iids.items()}
+    num_trimmed = frontendsearchpipeline._TrimEndShardedIIDs(
+        self.sharded_iids, [], 12)
+    self.assertEqual(0, num_trimmed)
+    self.assertEqual(orig_sharded_iids, self.sharded_iids)
+
+    num_trimmed = frontendsearchpipeline._TrimEndShardedIIDs(
+        self.sharded_iids, [], 1)
+    self.assertEqual(0, num_trimmed)
+    self.assertEqual(orig_sharded_iids, self.sharded_iids)
+
+  def testTrimShardedIIDs_Normal(self):
+    """The first 3 samples contribute all needed IIDs, so trim off the rest."""
+    samples = [(30, (0, 'p:v')), (41, (1, 'p:v')), (62, (2, 'p:v')),
+               (40, (0, 'p:v')), (81, (1, 'p:v'))]
+    num_trimmed = frontendsearchpipeline._TrimEndShardedIIDs(
+        self.sharded_iids, samples, 5)
+    self.assertEqual(2 + 1 + 0 + 0, num_trimmed)
+    self.assertEqual(
+        {  # shard_id: iids before lower-bound + iids before 1st excess sample.
+         (0, 'p:v'): [10, 20] + [30],
+         (1, 'p:v'): [21] + [41, 61],
+         (2, 'p:v'): [42, 52] + [62, 72, 102],
+         (3, 'p:v'): [] + []},
+        self.sharded_iids)
+
+  def testCalcSamplePositions_Empty(self):
+    sharded_iids = {0: []}
+    samples = []
+    self.assertEqual(
+      [], frontendsearchpipeline._CalcSamplePositions(sharded_iids, samples))
+
+    sharded_iids = {0: [10, 20, 30, 40]}
+    samples = []
+    self.assertEqual(
+      [], frontendsearchpipeline._CalcSamplePositions(sharded_iids, samples))
+
+    sharded_iids = {0: []}
+    # E.g., the IIDs 2 and 4 might have been trimmed out in the forward phase.
+    # But we still have them in the list for the backwards phase, and they
+    # should just not contribute anything to the result.
+    samples = [(2, (2, 'p:v')), (4, (4, 'p:v'))]
+    self.assertEqual(
+      [], frontendsearchpipeline._CalcSamplePositions(sharded_iids, samples))
+
+  def testCalcSamplePositions_Normal(self):
+    samples = [(30, (0, 'p:v')), (41, (1, 'p:v')), (62, (2, 'p:v')),
+               (40, (0, 'p:v')), (81, (1, 'p:v'))]
+    self.assertEqual(
+      [(30, (0, 'p:v'), 2),
+       (41, (1, 'p:v'), 1),
+       (62, (2, 'p:v'), 2),
+       (40, (0, 'p:v'), 3),
+       (81, (1, 'p:v'), 3)],
+      frontendsearchpipeline._CalcSamplePositions(self.sharded_iids, samples))
diff --git a/search/test/query2ast_test.py b/search/test/query2ast_test.py
new file mode 100644
index 0000000..fc92e72
--- /dev/null
+++ b/search/test/query2ast_test.py
@@ -0,0 +1,1041 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Tests for the query2ast module."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import datetime
+import time
+import unittest
+import mock
+
+from proto import ast_pb2
+from proto import tracker_pb2
+from search import query2ast
+from tracker import tracker_bizobj
+
+BOOL = query2ast.BOOL
+DATE = query2ast.DATE
+NUM = query2ast.NUM
+TXT = query2ast.TXT
+
+SUBQUERY = query2ast.SUBQUERY
+LEFT_PAREN = query2ast.LEFT_PAREN
+RIGHT_PAREN = query2ast.RIGHT_PAREN
+OR = query2ast.OR
+
+BUILTIN_ISSUE_FIELDS = query2ast.BUILTIN_ISSUE_FIELDS
+ANY_FIELD = query2ast.BUILTIN_ISSUE_FIELDS['any_field']
+
+EQ = query2ast.EQ
+NE = query2ast.NE
+LT = query2ast.LT
+GT = query2ast.GT
+LE = query2ast.LE
+GE = query2ast.GE
+TEXT_HAS = query2ast.TEXT_HAS
+NOT_TEXT_HAS = query2ast.NOT_TEXT_HAS
+IS_DEFINED = query2ast.IS_DEFINED
+IS_NOT_DEFINED = query2ast.IS_NOT_DEFINED
+KEY_HAS = query2ast.KEY_HAS
+
+MakeCond = ast_pb2.MakeCond
+NOW = 1277762224
+
+
+class QueryParsingUnitTest(unittest.TestCase):
+
+  def setUp(self):
+    self.project_id = 789
+    self.default_config = tracker_bizobj.MakeDefaultProjectIssueConfig(
+        self.project_id)
+
+  def testParseUserQuery_OrClause(self):
+    # an "OR" query, which should look like two separate simple querys
+    # joined together by a pipe.
+    ast = query2ast.ParseUserQuery(
+        'ham OR fancy', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    conj1 = ast.conjunctions[0]
+    conj2 = ast.conjunctions[1]
+    self.assertEqual([MakeCond(TEXT_HAS, [ANY_FIELD], ['ham'], [])],
+                     conj1.conds)
+    self.assertEqual([MakeCond(TEXT_HAS, [ANY_FIELD], ['fancy'], [])],
+                     conj2.conds)
+
+  def testParseUserQuery_Words(self):
+    # an "ORTerm" is actually anything appearing on either side of an
+    # "OR" operator. So this could be thought of as "simple" query parsing.
+
+    # a simple query with no spaces
+    ast = query2ast.ParseUserQuery(
+        'hamfancy', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    fulltext_cond = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['hamfancy'], []), fulltext_cond)
+
+    # negative word
+    ast = query2ast.ParseUserQuery(
+        '-hamfancy', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    fulltext_cond = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        # note: not NOT_TEXT_HAS.
+        MakeCond(NOT_TEXT_HAS, [ANY_FIELD], ['hamfancy'], []),
+        fulltext_cond)
+
+    # invalid fulltext term
+    ast = query2ast.ParseUserQuery(
+        'ham=fancy\\', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    self.assertEqual([], ast.conjunctions[0].conds)
+
+    # an explicit "AND" query in the "featured" context
+    warnings = []
+    query2ast.ParseUserQuery(
+        'ham AND fancy', 'label:featured', BUILTIN_ISSUE_FIELDS,
+        self.default_config, warnings=warnings)
+    self.assertEqual(
+      ['The only supported boolean operator is OR (all capitals).'],
+      warnings)
+
+    # an implicit "AND" query
+    ast = query2ast.ParseUserQuery(
+        'ham fancy', '-label:deprecated', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    scope_cond1, ft_cond1, ft_cond2 = ast.conjunctions[0].conds
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['deprecated'], []),
+        scope_cond1)
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['ham'], []), ft_cond1)
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['fancy'], []), ft_cond2)
+
+    # Use word with non-operator prefix.
+    word_with_non_op_prefix = '%stest' % query2ast.NON_OP_PREFIXES[0]
+    ast = query2ast.ParseUserQuery(
+        word_with_non_op_prefix, '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    fulltext_cond = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['"%s"' % word_with_non_op_prefix], []),
+        fulltext_cond)
+
+    # mix positive and negative words
+    ast = query2ast.ParseUserQuery(
+        'ham -fancy', '-label:deprecated', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    scope_cond1, ft_cond1, ft_cond2 = ast.conjunctions[0].conds
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['deprecated'], []),
+        scope_cond1)
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['ham'], []), ft_cond1)
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [ANY_FIELD], ['fancy'], []), ft_cond2)
+
+    # converts terms to lower case
+    ast = query2ast.ParseUserQuery(
+        'AmDude', '-label:deprecated', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    scope_cond1, fulltext_cond = ast.conjunctions[0].conds
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['deprecated'], []),
+        scope_cond1)
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['amdude'], []), fulltext_cond)
+
+  def testParseUserQuery_Phrases(self):
+    # positive phrases
+    ast = query2ast.ParseUserQuery(
+        '"one two"', '-label:deprecated', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    scope_cond1, fulltext_cond = ast.conjunctions[0].conds
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['deprecated'], []),
+        scope_cond1)
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['"one two"'], []), fulltext_cond)
+
+    # negative phrases
+    ast = query2ast.ParseUserQuery(
+        '-"one two"', '-label:deprecated', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    scope_cond1, fulltext_cond = ast.conjunctions[0].conds
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['deprecated'], []),
+        scope_cond1)
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [ANY_FIELD], ['"one two"'], []), fulltext_cond)
+
+    # multiple phrases
+    ast = query2ast.ParseUserQuery(
+        '-"a b" "x y"', '-label:deprecated', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    scope_cond1, ft_cond1, ft_cond2 = ast.conjunctions[0].conds
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['deprecated'], []),
+        scope_cond1)
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [ANY_FIELD], ['"a b"'], []), ft_cond1)
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['"x y"'], []), ft_cond2)
+
+  def testParseUserQuery_CodeSyntaxThatWeNeedToCopeWith(self):
+    # positive phrases
+    ast = query2ast.ParseUserQuery(
+        'Base::Tuple', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD],
+                 ['"base::tuple"'], []),
+        cond)
+
+    # stuff we just ignore
+    ast = query2ast.ParseUserQuery(
+        ':: - -- .', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    self.assertEqual([], ast.conjunctions[0].conds)
+
+  def testParseUserQuery_IsOperator(self):
+    """Test is:open, is:spam, and is:blocked."""
+    for keyword in ['open', 'spam', 'blocked']:
+      ast = query2ast.ParseUserQuery(
+          'is:' + keyword, '', BUILTIN_ISSUE_FIELDS, self.default_config)
+      cond1 = ast.conjunctions[0].conds[0]
+      self.assertEqual(
+          MakeCond(EQ, [BUILTIN_ISSUE_FIELDS[keyword]], [], []),
+          cond1)
+      ast = query2ast.ParseUserQuery(
+          '-is:' + keyword, '', BUILTIN_ISSUE_FIELDS, self.default_config)
+      cond1 = ast.conjunctions[0].conds[0]
+      self.assertEqual(
+          MakeCond(NE, [BUILTIN_ISSUE_FIELDS[keyword]], [], []),
+          cond1)
+
+  def testParseUserQuery_HasOperator(self):
+    # Search for issues with at least one attachment
+    ast = query2ast.ParseUserQuery(
+        'has:attachment', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(IS_DEFINED, [BUILTIN_ISSUE_FIELDS['attachment']], [], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-has:attachment', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(IS_NOT_DEFINED, [BUILTIN_ISSUE_FIELDS['attachment']], [], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'has=attachment', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(IS_DEFINED, [BUILTIN_ISSUE_FIELDS['attachment']], [], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-has=attachment', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(IS_NOT_DEFINED, [BUILTIN_ISSUE_FIELDS['attachment']], [], []),
+        cond1)
+
+    # Search for numeric fields for searches with 'has' prefix
+    ast = query2ast.ParseUserQuery(
+        'has:attachments', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(IS_DEFINED, [BUILTIN_ISSUE_FIELDS['attachments']], [], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-has:attachments', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(IS_NOT_DEFINED, [BUILTIN_ISSUE_FIELDS['attachments']],
+                 [], []),
+        cond1)
+
+    # If it is not a field, look for any key-value label.
+    ast = query2ast.ParseUserQuery(
+        'has:Size', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(IS_DEFINED, [BUILTIN_ISSUE_FIELDS['label']], ['size'], []),
+        cond1)
+
+  def testParseUserQuery_Phase(self):
+    ast = query2ast.ParseUserQuery(
+        'gate:Canary,Stable', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['gate']],
+                 ['canary', 'stable'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-gate:Canary,Stable', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['gate']],
+                 ['canary', 'stable'], []),
+        cond1)
+
+  def testParseUserQuery_Components(self):
+    """Parse user queries for components"""
+    ast = query2ast.ParseUserQuery(
+        'component:UI', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['component']],
+                 ['ui'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'Component:UI>AboutBox', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['component']],
+                 ['ui>aboutbox'], []),
+        cond1)
+
+  def testParseUserQuery_OwnersReportersAndCc(self):
+    """Parse user queries for owner:, reporter: and cc:."""
+    ast = query2ast.ParseUserQuery(
+        'owner:user', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['owner']],
+                 ['user'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'owner:user@example.com', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['owner']],
+                 ['user@example.com'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'owner=user@example.com', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['owner']],
+                 ['user@example.com'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-reporter=user@example.com', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(NE, [BUILTIN_ISSUE_FIELDS['reporter']],
+                 ['user@example.com'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'cc=user@example.com,user2@example.com', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['cc']],
+                 ['user@example.com', 'user2@example.com'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'cc:user,user2', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['cc']],
+                 ['user', 'user2'], []),
+        cond1)
+
+  def testParseUserQuery_SearchWithinFields(self):
+    # Search for issues with certain filenames
+    ast = query2ast.ParseUserQuery(
+        'attachment:filename', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['attachment']],
+                 ['filename'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-attachment:filename', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(NOT_TEXT_HAS, [BUILTIN_ISSUE_FIELDS['attachment']],
+                 ['filename'], []),
+        cond1)
+
+    # Search for issues with a certain number of attachments
+    ast = query2ast.ParseUserQuery(
+        'attachments:2', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['attachments']],
+                 ['2'], [2]),
+        cond1)
+
+    # Searches with '=' syntax
+    ast = query2ast.ParseUserQuery(
+        'attachment=filename', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['attachment']],
+                 ['filename'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-attachment=filename', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(NE, [BUILTIN_ISSUE_FIELDS['attachment']],
+                 ['filename'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'milestone=2009', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['label']], ['milestone-2009'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-milestone=2009', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(NE, [BUILTIN_ISSUE_FIELDS['label']], ['milestone-2009'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'milestone=2009-Q1', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['milestone-2009-q1'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        '-milestone=2009-Q1', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(NE, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['milestone-2009-q1'], []),
+        cond1)
+
+    # Searches with ':' syntax
+    ast = query2ast.ParseUserQuery(
+        'summary:foo', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS,
+                 [BUILTIN_ISSUE_FIELDS['summary']], ['foo'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'summary:"greetings programs"', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS,
+                 [BUILTIN_ISSUE_FIELDS['summary']], ['greetings programs'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'summary:"&#1234;"', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS,
+                 [BUILTIN_ISSUE_FIELDS['summary']], ['&#1234;'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'priority:high', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(KEY_HAS,
+                 [BUILTIN_ISSUE_FIELDS['label']], ['priority-high'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'type:security', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(KEY_HAS,
+                 [BUILTIN_ISSUE_FIELDS['label']], ['type-security'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'label:priority-high', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS,
+                 [BUILTIN_ISSUE_FIELDS['label']], ['priority-high'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'blockedon:other:123', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['blockedon']],
+                 ['other:123'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'cost=-2', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['cost--2'], []),
+        cond1)
+
+    # Searches with ':' and an email domain only.
+    ast = query2ast.ParseUserQuery(
+        'reporter:@google.com', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS,
+                 [BUILTIN_ISSUE_FIELDS['reporter']], ['@google.com'], []),
+        cond1)
+
+    # Search for issues in certain user hotlists.
+    ast = query2ast.ParseUserQuery(
+        'hotlist=gatsby@chromium.org:Hotlist1', '',
+        BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(
+            EQ, [BUILTIN_ISSUE_FIELDS['hotlist']],
+            ['gatsby@chromium.org:hotlist1'], []),
+        cond1)
+
+    # Search for 'Hotlist' labels.
+    ast = query2ast.ParseUserQuery(
+        'hotlist:sublabel', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(KEY_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['hotlist-sublabel'], []),
+        cond1)
+
+  def testParseUserQuery_SearchWithinCustomFields(self):
+    """Enums are treated as labels, other fields are kept as fields."""
+    fd1 = tracker_bizobj.MakeFieldDef(
+        1, self.project_id, 'Size', tracker_pb2.FieldTypes.ENUM_TYPE,
+        'applic', 'applic', False, False, False, None, None, None, False, None,
+        None, None, 'no_action', 'doc', False)
+    fd2 = tracker_bizobj.MakeFieldDef(
+        1, self.project_id, 'EstDays', tracker_pb2.FieldTypes.INT_TYPE,
+        'applic', 'applic', False, False, False, None, None, None, False, None,
+        None, None, 'no_action', 'doc', False)
+    self.default_config.field_defs.extend([fd1, fd2])
+    ast = query2ast.ParseUserQuery(
+        'Size:Small EstDays>3', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    cond2 = ast.conjunctions[0].conds[1]
+    self.assertEqual(
+        MakeCond(KEY_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['size-small'], []),
+        cond1)
+    self.assertEqual(
+        MakeCond(GT, [fd2], ['3'], [3]),
+        cond2)
+
+  @mock.patch('time.time', return_value=NOW)
+  def testParseUserQuery_Approvals(self, _mock_time):
+    """Test approval queries are parsed correctly."""
+    fd1 = tracker_bizobj.MakeFieldDef(
+        1, self.project_id, 'UIReview', tracker_pb2.FieldTypes.APPROVAL_TYPE,
+        'applic', 'applic', False, False, False, None, None, None, False, None,
+        None, None, 'no_action', 'doc', False)
+    fd2 = tracker_bizobj.MakeFieldDef(
+        2, self.project_id, 'EstDays', tracker_pb2.FieldTypes.INT_TYPE,
+        'applic', 'applic', False, False, False, None, None, None, False, None,
+        None, None, 'no_action', 'doc', False)
+    fd3 = tracker_bizobj.MakeFieldDef(
+        3, self.project_id, 'UXReview', tracker_pb2.FieldTypes.APPROVAL_TYPE,
+        'applic', 'applic', False, False, False, None, None, None, False, None,
+        None, None, 'no_action', 'doc', False)
+    self.default_config.field_defs.extend([fd1, fd2, fd3])
+    ast = query2ast.ParseUserQuery(
+        'UXReview-approver:user1@mail.com,user2@mail.com UIReview:Approved '
+        'UIReview-on>today-7', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    cond2 = ast.conjunctions[0].conds[1]
+    cond3 = ast.conjunctions[0].conds[2]
+    self.assertEqual(MakeCond(TEXT_HAS, [fd3],
+                              ['user1@mail.com', 'user2@mail.com'], [],
+                              key_suffix='-approver'), cond1)
+    self.assertEqual(MakeCond(TEXT_HAS, [fd1], ['approved'], []), cond2)
+    self.assertEqual(
+        cond3,
+        MakeCond(
+            GT, [fd1], [], [query2ast._CalculatePastDate(7, NOW)],
+            key_suffix='-on'))
+
+  def testParseUserQuery_PhaseFields(self):
+    fd = tracker_bizobj.MakeFieldDef(
+        1, self.project_id, 'EstDays', tracker_pb2.FieldTypes.INT_TYPE,
+        'applic', 'applic', False, False, False, None, None, None, False, None,
+        None, None, 'no_action', 'doc', False, is_phase_field=True)
+    self.default_config.field_defs.append(fd)
+    ast = query2ast.ParseUserQuery(
+        'UXReview.EstDays>3', '', BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(GT, [fd], ['3'], [3], phase_name='uxreview'),
+        cond1)
+
+  def testParseUserQuery_QuickOr(self):
+    # quick-or searches
+    ast = query2ast.ParseUserQuery(
+        'milestone:2008,2009,2010', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(KEY_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['milestone-2008', 'milestone-2009', 'milestone-2010'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'label:milestone-2008,milestone-2009,milestone-2010', '',
+        BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['milestone-2008', 'milestone-2009', 'milestone-2010'], []),
+        cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'milestone=2008,2009,2010', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['milestone-2008', 'milestone-2009', 'milestone-2010'], []),
+        cond1)
+
+    # Duplicated and trailing commas are ignored.
+    ast = query2ast.ParseUserQuery(
+        'milestone=2008,,2009,2010,', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['label']],
+                 ['milestone-2008', 'milestone-2009', 'milestone-2010'], []),
+        cond1)
+
+  def testParseUserQuery_Dates(self):
+    # query with a daterange
+    ast = query2ast.ParseUserQuery(
+        'modified>=2009-5-12', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    ts1 = int(time.mktime(datetime.datetime(2009, 5, 12).timetuple()))
+    self.assertEqual(
+        MakeCond(GE, [BUILTIN_ISSUE_FIELDS['modified']], [], [ts1]), cond1)
+
+    # query with quick-or
+    ast = query2ast.ParseUserQuery(
+        'modified=2009-5-12,2009-5-13', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config)
+    cond1 = ast.conjunctions[0].conds[0]
+    ts1 = int(time.mktime(datetime.datetime(2009, 5, 12).timetuple()))
+    ts2 = int(time.mktime(datetime.datetime(2009, 5, 13).timetuple()))
+    self.assertEqual(
+        MakeCond(EQ, [BUILTIN_ISSUE_FIELDS['modified']], [], [ts1, ts2]), cond1)
+
+    # query with multiple dateranges
+    ast = query2ast.ParseUserQuery(
+        'modified>=2009-5-12 opened<2008/1/1', '',
+        BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1, cond2 = ast.conjunctions[0].conds
+    ts1 = int(time.mktime(datetime.datetime(2009, 5, 12).timetuple()))
+    self.assertEqual(
+        MakeCond(GE, [BUILTIN_ISSUE_FIELDS['modified']], [], [ts1]), cond1)
+    ts2 = int(time.mktime(datetime.datetime(2008, 1, 1).timetuple()))
+    self.assertEqual(
+        MakeCond(LT, [BUILTIN_ISSUE_FIELDS['opened']], [], [ts2]), cond2)
+
+    # query with multiple dateranges plus a search term
+    ast = query2ast.ParseUserQuery(
+        'one two modified>=2009-5-12 opened<2008/1/1', '',
+        BUILTIN_ISSUE_FIELDS, self.default_config)
+    ft_cond1, ft_cond2, cond1, cond2 = ast.conjunctions[0].conds
+    ts1 = int(time.mktime(datetime.datetime(2009, 5, 12).timetuple()))
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['one'], []), ft_cond1)
+    self.assertEqual(
+        MakeCond(TEXT_HAS, [ANY_FIELD], ['two'], []), ft_cond2)
+    self.assertEqual(
+        MakeCond(GE, [BUILTIN_ISSUE_FIELDS['modified']], [], [ts1]), cond1)
+    ts2 = int(time.mktime(datetime.datetime(2008, 1, 1).timetuple()))
+    self.assertEqual(
+        MakeCond(LT, [BUILTIN_ISSUE_FIELDS['opened']], [], [ts2]), cond2)
+
+    # query with a date field compared to "today"
+    ast = query2ast.ParseUserQuery(
+        'modified<today', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config, now=NOW)
+    cond1 = ast.conjunctions[0].conds[0]
+    ts1 = query2ast._CalculatePastDate(0, now=NOW)
+    self.assertEqual(MakeCond(LT, [BUILTIN_ISSUE_FIELDS['modified']],
+                              [], [ts1]),
+                     cond1)
+
+    # query with a daterange using today-N alias
+    ast = query2ast.ParseUserQuery(
+        'modified>=today-13', '', BUILTIN_ISSUE_FIELDS,
+        self.default_config, now=NOW)
+    cond1 = ast.conjunctions[0].conds[0]
+    ts1 = query2ast._CalculatePastDate(13, now=NOW)
+    self.assertEqual(MakeCond(GE, [BUILTIN_ISSUE_FIELDS['modified']],
+                              [], [ts1]),
+                     cond1)
+
+    ast = query2ast.ParseUserQuery(
+        'modified>today-13', '', BUILTIN_ISSUE_FIELDS, self.default_config,
+        now=NOW)
+    cond1 = ast.conjunctions[0].conds[0]
+    ts1 = query2ast._CalculatePastDate(13, now=NOW)
+    self.assertEqual(MakeCond(GT, [BUILTIN_ISSUE_FIELDS['modified']],
+                              [], [ts1]),
+                     cond1)
+
+    # query with multiple old date query terms.
+    ast = query2ast.ParseUserQuery(
+        'modified-after:2009-5-12 opened-before:2008/1/1 '
+        'closed-after:2007-2-1', '',
+        BUILTIN_ISSUE_FIELDS, self.default_config)
+    cond1, cond2, cond3 = ast.conjunctions[0].conds
+    ts1 = int(time.mktime(datetime.datetime(2009, 5, 12).timetuple()))
+    self.assertEqual(
+        MakeCond(GT, [BUILTIN_ISSUE_FIELDS['modified']], [], [ts1]), cond1)
+    ts2 = int(time.mktime(datetime.datetime(2008, 1, 1).timetuple()))
+    self.assertEqual(
+        MakeCond(LT, [BUILTIN_ISSUE_FIELDS['opened']], [], [ts2]), cond2)
+    ts3 = int(time.mktime(datetime.datetime(2007, 2, 1).timetuple()))
+    self.assertEqual(
+        MakeCond(GT, [BUILTIN_ISSUE_FIELDS['closed']], [], [ts3]), cond3)
+
+  def testCalculatePastDate(self):
+    ts1 = query2ast._CalculatePastDate(0, now=NOW)
+    self.assertEqual(NOW, ts1)
+
+    ts2 = query2ast._CalculatePastDate(13, now=NOW)
+    self.assertEqual(ts2, NOW - 13 * 24 * 60 * 60)
+
+    # Try it once with time.time() instead of a known timestamp.
+    ts_system_clock = query2ast._CalculatePastDate(13)
+    self.assertTrue(ts_system_clock < int(time.time()))
+
+  def testParseUserQuery_BadDates(self):
+    bad_dates = ['today-13h', 'yesterday', '2/2', 'm/y/d',
+                 '99/99/1999', '0-0-0']
+    for val in bad_dates:
+      with self.assertRaises(query2ast.InvalidQueryError) as cm:
+        query2ast.ParseUserQuery(
+            'modified>=' + val, '', BUILTIN_ISSUE_FIELDS,
+            self.default_config)
+      self.assertEqual('Could not parse date: ' + val, cm.exception.message)
+
+  def testQueryToSubqueries_BasicQuery(self):
+    self.assertEqual(['owner:me'], query2ast.QueryToSubqueries('owner:me'))
+
+  def testQueryToSubqueries_EmptyQuery(self):
+    self.assertEqual([''], query2ast.QueryToSubqueries(''))
+
+  def testQueryToSubqueries_UnmatchedParenthesesThrowsError(self):
+    with self.assertRaises(query2ast.InvalidQueryError):
+      self.assertEqual(['Pri=1'], query2ast.QueryToSubqueries('Pri=1))'))
+    with self.assertRaises(query2ast.InvalidQueryError):
+      self.assertEqual(
+          ['label:Hello'], query2ast.QueryToSubqueries('((label:Hello'))
+
+    with self.assertRaises(query2ast.InvalidQueryError):
+      self.assertEqual(
+          ['owner:me'], query2ast.QueryToSubqueries('((((owner:me)))'))
+
+    with self.assertRaises(query2ast.InvalidQueryError):
+      self.assertEqual(
+          ['test=What'], query2ast.QueryToSubqueries('(((test=What))))'))
+
+  def testQueryToSubqueries_IgnoresEmptyGroups(self):
+    self.assertEqual([''], query2ast.QueryToSubqueries('()(()(()))()()'))
+
+    self.assertEqual(
+        ['owner:me'], query2ast.QueryToSubqueries('()(()owner:me)()()'))
+
+  def testQueryToSubqueries_BasicOr(self):
+    self.assertEqual(
+        ['owner:me', 'status:New', 'Pri=1'],
+        query2ast.QueryToSubqueries('owner:me OR status:New OR Pri=1'))
+
+  def testQueryToSubqueries_OrAtStartOrEnd(self):
+    self.assertEqual(
+        ['owner:me OR'], query2ast.QueryToSubqueries('owner:me OR'))
+
+    self.assertEqual(
+        ['OR owner:me'], query2ast.QueryToSubqueries('OR owner:me'))
+
+  def testQueryToSubqueries_BasicParentheses(self):
+    self.assertEqual(
+        ['owner:me status:New'],
+        query2ast.QueryToSubqueries('owner:me (status:New)'))
+
+    self.assertEqual(
+        ['owner:me status:New'],
+        query2ast.QueryToSubqueries('(owner:me) status:New'))
+
+    self.assertEqual(
+        ['owner:me status:New'],
+        query2ast.QueryToSubqueries('((owner:me) (status:New))'))
+
+  def testQueryToSubqueries_ParenthesesWithOr(self):
+    self.assertEqual(
+        ['Pri=1 owner:me', 'Pri=1 status:New'],
+        query2ast.QueryToSubqueries('Pri=1 (owner:me OR status:New)'))
+
+    self.assertEqual(
+        ['owner:me component:OhNo', 'status:New component:OhNo'],
+        query2ast.QueryToSubqueries('(owner:me OR status:New) component:OhNo'))
+
+  def testQueryToSubqueries_ParenthesesWithOr_Multiple(self):
+    self.assertEqual(
+        [
+            'Pri=1 test owner:me', 'Pri=1 test status:New',
+            'Pri=2 test owner:me', 'Pri=2 test status:New'
+        ],
+        query2ast.QueryToSubqueries(
+            '(Pri=1 OR Pri=2)(test (owner:me OR status:New))'))
+
+  def testQueryToSubqueries_OrNextToParentheses(self):
+    self.assertEqual(['A', 'B'], query2ast.QueryToSubqueries('(A) OR (B)'))
+
+    self.assertEqual(
+        ['A B', 'A C E', 'A D E'],
+        query2ast.QueryToSubqueries('A (B OR (C OR D) E)'))
+
+    self.assertEqual(
+        ['A B C', 'A B D', 'A E'],
+        query2ast.QueryToSubqueries('A (B (C OR D) OR E)'))
+
+  def testQueryToSubqueries_ExtraSpaces(self):
+    self.assertEqual(
+        ['A', 'B'], query2ast.QueryToSubqueries(' ( A )   OR  ( B ) '))
+
+    self.assertEqual(
+        ['A B', 'A C E', 'A D E'],
+        query2ast.QueryToSubqueries(' A  ( B   OR   ( C  OR  D )  E )'))
+
+  def testQueryToSubqueries_OrAtEndOfParentheses(self):
+    self.assertEqual(['A B'], query2ast.QueryToSubqueries('(A OR )(B)'))
+    self.assertEqual(
+        ['A B', 'A C'], query2ast.QueryToSubqueries('( OR A)(B OR C)'))
+    self.assertEqual(
+        ['A B', 'A C'], query2ast.QueryToSubqueries(' OR A (B OR C)'))
+
+  def testQueryToSubqueries_EmptyOrGroup(self):
+    self.assertEqual(
+        ['A C', 'C', 'B C'], query2ast.QueryToSubqueries('(A OR  OR B)(C)'))
+
+  def testParseQuery_Basic(self):
+    self.assertEqual(
+        [
+            'owner:me',
+        ],
+        query2ast._ParseQuery(
+            query2ast.PeekIterator(
+                [ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me')])))
+
+  def testParseQuery_Complex(self):
+    self.assertEqual(
+        [
+            'owner:me',
+            'Pri=1',
+            'label=test',
+        ],
+        query2ast._ParseQuery(
+            query2ast.PeekIterator(
+                [
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me'),
+                    ast_pb2.QueryToken(token_type=OR),
+                    ast_pb2.QueryToken(token_type=LEFT_PAREN),
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='Pri=1'),
+                    ast_pb2.QueryToken(token_type=RIGHT_PAREN),
+                    ast_pb2.QueryToken(token_type=OR),
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='label=test'),
+                ])))
+
+  def testParseOrGroup_Basic(self):
+    self.assertEqual(
+        [
+            'owner:me',
+        ],
+        query2ast._ParseOrGroup(
+            query2ast.PeekIterator(
+                [ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me')])))
+
+  def testParseOrGroup_TwoAdjacentAndGroups(self):
+    self.assertEqual(
+        [
+            'owner:me Pri=1',
+            'owner:me label=test',
+        ],
+        query2ast._ParseOrGroup(
+            query2ast.PeekIterator(
+                [
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me'),
+                    ast_pb2.QueryToken(token_type=LEFT_PAREN),
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='Pri=1'),
+                    ast_pb2.QueryToken(token_type=OR),
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='label=test'),
+                    ast_pb2.QueryToken(token_type=RIGHT_PAREN),
+                ])))
+
+  def testParseAndGroup_Subquery(self):
+    self.assertEqual(
+        [
+            'owner:me',
+        ],
+        query2ast._ParseAndGroup(
+            query2ast.PeekIterator(
+                [ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me')])))
+
+  def testParseAndGroup_ParenthesesGroup(self):
+    self.assertEqual(
+        [
+            'owner:me',
+            'Pri=1',
+        ],
+        query2ast._ParseAndGroup(
+            query2ast.PeekIterator(
+                [
+                    ast_pb2.QueryToken(token_type=LEFT_PAREN),
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me'),
+                    ast_pb2.QueryToken(token_type=OR),
+                    ast_pb2.QueryToken(token_type=SUBQUERY, value='Pri=1'),
+                    ast_pb2.QueryToken(token_type=RIGHT_PAREN),
+                ])))
+
+  def testParseAndGroup_Empty(self):
+    self.assertEqual([], query2ast._ParseAndGroup(query2ast.PeekIterator([])))
+
+  def testParseAndGroup_InvalidTokens(self):
+    with self.assertRaises(query2ast.InvalidQueryError):
+      query2ast._ParseAndGroup(
+          query2ast.PeekIterator(
+              [
+                  ast_pb2.QueryToken(token_type=OR),
+                  ast_pb2.QueryToken(token_type=SUBQUERY, value='Pri=1'),
+                  ast_pb2.QueryToken(token_type=RIGHT_PAREN),
+              ]))
+
+    with self.assertRaises(query2ast.InvalidQueryError):
+      query2ast._ParseAndGroup(
+          query2ast.PeekIterator(
+              [
+                  ast_pb2.QueryToken(token_type=RIGHT_PAREN),
+                  ast_pb2.QueryToken(token_type=OR),
+                  ast_pb2.QueryToken(token_type=SUBQUERY, value='Pri=1'),
+              ]))
+
+  def testValidateAndTokenizeQuery_Basic(self):
+    self.assertEqual(
+        [
+            ast_pb2.QueryToken(token_type=LEFT_PAREN),
+            ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me'),
+            ast_pb2.QueryToken(token_type=OR),
+            ast_pb2.QueryToken(token_type=SUBQUERY, value='Pri=1'),
+            ast_pb2.QueryToken(token_type=RIGHT_PAREN),
+        ], query2ast._ValidateAndTokenizeQuery('(owner:me OR Pri=1)'))
+
+  def testValidateAndTokenizeQuery_UnmatchedParentheses(self):
+    with self.assertRaises(query2ast.InvalidQueryError):
+      query2ast._ValidateAndTokenizeQuery('(owner:me')
+
+    with self.assertRaises(query2ast.InvalidQueryError):
+      query2ast._ValidateAndTokenizeQuery('owner:me)')
+
+    with self.assertRaises(query2ast.InvalidQueryError):
+      query2ast._ValidateAndTokenizeQuery('(()owner:me))')
+
+    with self.assertRaises(query2ast.InvalidQueryError):
+      query2ast._ValidateAndTokenizeQuery('(()owner:me)())')
+
+  def testTokenizeSubqueryOnOr_NoOrOperator(self):
+    self.assertEqual(
+        [ast_pb2.QueryToken(token_type=SUBQUERY, value='owner:me')],
+        query2ast._TokenizeSubqueryOnOr('owner:me'))
+
+  def testTokenizeSubqueryOnOr_BasicOrOperator(self):
+    self.assertEqual(
+        [
+            ast_pb2.QueryToken(token_type=SUBQUERY, value='A'),
+            ast_pb2.QueryToken(token_type=OR),
+            ast_pb2.QueryToken(token_type=SUBQUERY, value='B'),
+            ast_pb2.QueryToken(token_type=OR),
+            ast_pb2.QueryToken(token_type=SUBQUERY, value='C'),
+        ], query2ast._TokenizeSubqueryOnOr('A OR B OR C'))
+
+  def testTokenizeSubqueryOnOr_EmptyOrOperator(self):
+    self.assertEqual(
+        [ast_pb2.QueryToken(token_type=OR)],
+        query2ast._TokenizeSubqueryOnOr(' OR '))
+
+    self.assertEqual(
+        [
+            ast_pb2.QueryToken(token_type=SUBQUERY, value='A'),
+            ast_pb2.QueryToken(token_type=OR),
+        ], query2ast._TokenizeSubqueryOnOr('A OR '))
+
+  def testMultiplySubqueries_Basic(self):
+    self.assertEqual(
+        ['owner:me Pri=1', 'owner:me Pri=2', 'test Pri=1', 'test Pri=2'],
+        query2ast._MultiplySubqueries(['owner:me', 'test'], ['Pri=1', 'Pri=2']))
+
+  def testMultiplySubqueries_OneEmpty(self):
+    self.assertEqual(
+        ['Pri=1', 'Pri=2'],
+        query2ast._MultiplySubqueries([], ['Pri=1', 'Pri=2']))
+    self.assertEqual(
+        ['Pri=1', 'Pri=2'],
+        query2ast._MultiplySubqueries([''], ['Pri=1', 'Pri=2']))
+
+    self.assertEqual(
+        ['Pri=1', 'Pri=2'],
+        query2ast._MultiplySubqueries(['Pri=1', 'Pri=2'], []))
+    self.assertEqual(
+        ['Pri=1', 'Pri=2'],
+        query2ast._MultiplySubqueries(['Pri=1', 'Pri=2'], ['']))
+
+  def testPeekIterator_Basic(self):
+    iterator = query2ast.PeekIterator([1, 2, 3])
+
+    self.assertEqual(1, iterator.peek())
+    self.assertEqual(1, iterator.next())
+
+    self.assertEqual(2, iterator.next())
+
+    self.assertEqual(3, iterator.peek())
+    self.assertEqual(3, iterator.next())
+
+    with self.assertRaises(StopIteration):
+      iterator.peek()
+
+    with self.assertRaises(StopIteration):
+      iterator.next()
diff --git a/search/test/search_helpers_test.py b/search/test/search_helpers_test.py
new file mode 100644
index 0000000..5905234
--- /dev/null
+++ b/search/test/search_helpers_test.py
@@ -0,0 +1,130 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Unit tests for monorail.search.search_helpers."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import mox
+import unittest
+
+from search import search_helpers
+
+from google.appengine.ext import testbed
+from framework import permissions
+from framework import sql
+from proto import user_pb2
+from services import chart_svc
+from services import service_manager
+from testing import fake
+
+
+def MakeChartService(my_mox, config):
+  chart_service = chart_svc.ChartService(config)
+  for table_var in ['issuesnapshot_tbl', 'labeldef_tbl']:
+    setattr(chart_service, table_var, my_mox.CreateMock(sql.SQLTableManager))
+  return chart_service
+
+
+class SearchHelpersTest(unittest.TestCase):
+  """Tests for functions in search_helpers.
+
+  Also covered by search.backendnonviewable.GetAtRiskIIDs cases.
+  """
+
+  def setUp(self):
+    self.testbed = testbed.Testbed()
+    self.testbed.activate()
+    self.testbed.init_memcache_stub()
+
+    self.mox = mox.Mox()
+    self.cnxn = self.mox.CreateMock(sql.MonorailConnection)
+    self.services = service_manager.Services()
+    self.services.chart = MakeChartService(self.mox, self.services.config)
+    self.config_service = fake.ConfigService()
+    self.user = user_pb2.User()
+
+  def testGetPersonalAtRiskLabelIDs_ReadOnly(self):
+    """Test returns risky IDs a read-only user cannot access."""
+    self.mox.StubOutWithMock(self.config_service, 'GetLabelDefRowsAnyProject')
+    self.config_service.GetLabelDefRowsAnyProject(
+      self.cnxn, where=[('LOWER(label) LIKE %s', ['restrict-view-%'])]
+    ).AndReturn([
+      (123, 789, 0, 'Restrict-View-Google', 'docstring', 0),
+      (124, 789, 0, 'Restrict-View-SecurityTeam', 'docstring', 0),
+    ])
+
+    self.mox.ReplayAll()
+    ids = search_helpers.GetPersonalAtRiskLabelIDs(
+      self.cnxn,
+      self.user,
+      self.config_service,
+      effective_ids=[10, 20],
+      project=fake.Project(project_id=789),
+      perms=permissions.READ_ONLY_PERMISSIONSET)
+    self.mox.VerifyAll()
+
+    self.assertEqual(ids, [123, 124])
+
+  def testGetPersonalAtRiskLabelIDs_LoggedInUser(self):
+    """Test returns restricted label IDs a logged in user cannot access."""
+    self.mox.StubOutWithMock(self.config_service, 'GetLabelDefRowsAnyProject')
+    self.config_service.GetLabelDefRowsAnyProject(
+      self.cnxn, where=[('LOWER(label) LIKE %s', ['restrict-view-%'])]
+    ).AndReturn([
+      (123, 789, 0, 'Restrict-View-Google', 'docstring', 0),
+      (124, 789, 0, 'Restrict-View-SecurityTeam', 'docstring', 0),
+    ])
+
+    self.mox.ReplayAll()
+    ids = search_helpers.GetPersonalAtRiskLabelIDs(
+      self.cnxn,
+      self.user,
+      self.config_service,
+      effective_ids=[10, 20],
+      project=fake.Project(project_id=789),
+      perms=permissions.USER_PERMISSIONSET)
+    self.mox.VerifyAll()
+
+    self.assertEqual(ids, [123, 124])
+
+  def testGetPersonalAtRiskLabelIDs_UserWithRVG(self):
+    """Test returns restricted label IDs a logged in user cannot access."""
+    self.mox.StubOutWithMock(self.config_service, 'GetLabelDefRowsAnyProject')
+    self.config_service.GetLabelDefRowsAnyProject(
+      self.cnxn, where=[('LOWER(label) LIKE %s', ['restrict-view-%'])]
+    ).AndReturn([
+      (123, 789, 0, 'Restrict-View-Google', 'docstring', 0),
+      (124, 789, 0, 'Restrict-View-SecurityTeam', 'docstring', 0),
+    ])
+
+    self.mox.ReplayAll()
+    perms = permissions.PermissionSet(['Google'])
+    ids = search_helpers.GetPersonalAtRiskLabelIDs(
+      self.cnxn,
+      self.user,
+      self.config_service,
+      effective_ids=[10, 20],
+      project=fake.Project(project_id=789),
+      perms=perms)
+    self.mox.VerifyAll()
+
+    self.assertEqual(ids, [124])
+
+  def testGetPersonalAtRiskLabelIDs_Admin(self):
+    """Test returns nothing for an admin (who can view everything)."""
+    self.user.is_site_admin = True
+    self.mox.ReplayAll()
+    ids = search_helpers.GetPersonalAtRiskLabelIDs(
+      self.cnxn,
+      self.user,
+      self.config_service,
+      effective_ids=[10, 20],
+      project=fake.Project(project_id=789),
+      perms=permissions.ADMIN_PERMISSIONSET)
+    self.mox.VerifyAll()
+
+    self.assertEqual(ids, [])
diff --git a/search/test/searchpipeline_test.py b/search/test/searchpipeline_test.py
new file mode 100644
index 0000000..5d23316
--- /dev/null
+++ b/search/test/searchpipeline_test.py
@@ -0,0 +1,121 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""Tests for the searchpipeline module."""
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import unittest
+
+from proto import ast_pb2
+from proto import tracker_pb2
+from search import searchpipeline
+from services import service_manager
+from testing import fake
+from tracker import tracker_bizobj
+
+
+class SearchPipelineTest(unittest.TestCase):
+
+  def setUp(self):
+    self.cnxn = 'fake cnxn'
+    self.config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)
+    self.services = service_manager.Services(
+        user=fake.UserService(),
+        project=fake.ProjectService(),
+        issue=fake.IssueService(),
+        config=fake.ConfigService())
+    self.services.user.TestAddUser('a@example.com', 111)
+
+  def testIsStarredRE(self):
+    """IS_STARRED_RE matches only the is:starred term."""
+    input_output = {
+      'something:else': 'something:else',
+      'genesis:starred': 'genesis:starred',
+      'is:starred-in-bookmarks': 'is:starred-in-bookmarks',
+      'is:starred': 'foo',
+      'Is:starred': 'foo',
+      'is:STARRED': 'foo',
+      'is:starred is:open': 'foo is:open',
+      'is:open is:starred': 'is:open foo',
+      }
+    for i, o in input_output.items():
+      self.assertEqual(o, searchpipeline.IS_STARRED_RE.sub('foo', i))
+
+  def testMeRE(self):
+    """ME_RE matches only the 'me' value keyword."""
+    input_output = {
+      'something:else': 'something:else',
+      'else:some': 'else:some',
+      'me': 'me',  # It needs to have a ":" in front.
+      'cc:me-team': 'cc:me-team',
+      'cc:me=domain@otherdomain': 'cc:me=domain@otherdomain',
+      'cc:me@example.com': 'cc:me@example.com',
+      'me:the-boss': 'me:the-boss',
+      'cc:me': 'cc:foo',
+      'cc=me': 'cc=foo',
+      'owner:Me': 'owner:foo',
+      'reporter:ME': 'reporter:foo',
+      'cc:me is:open': 'cc:foo is:open',
+      'is:open cc:me': 'is:open cc:foo',
+      }
+    for i, o in input_output.items():
+      self.assertEqual(o, searchpipeline.ME_RE.sub('foo', i))
+
+  def testAccumulateIssueProjectsAndConfigs(self):
+    pass  # TODO(jrobbins): write tests
+
+  def testReplaceKeywordsWithUserIDs_IsStarred(self):
+    """The term is:starred is replaced with starredby:USERID."""
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [111], 'is:starred')
+    self.assertEqual('starredby:111', actual)
+    self.assertEqual([], warnings)
+
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [111], 'Pri=1 is:starred M=61')
+    self.assertEqual('Pri=1 starredby:111 M=61', actual)
+    self.assertEqual([], warnings)
+
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [], 'Pri=1 is:starred M=61')
+    self.assertEqual('Pri=1  M=61', actual)
+    self.assertEqual(
+        ['"is:starred" ignored because you are not signed in.'],
+        warnings)
+
+  def testReplaceKeywordsWithUserIDs_IsStarred_linked(self):
+    """is:starred is replaced by starredby:uid1,uid2 for linked accounts."""
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [111, 222], 'is:starred')
+    self.assertEqual('starredby:111,222', actual)
+    self.assertEqual([], warnings)
+
+  def testReplaceKeywordsWithUserIDs_Me(self):
+    """Terms like owner:me are replaced with owner:USERID."""
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [111], 'owner:me')
+    self.assertEqual('owner:111', actual)
+    self.assertEqual([], warnings)
+
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [111], 'Pri=1 cc:me M=61')
+    self.assertEqual('Pri=1 cc:111 M=61', actual)
+    self.assertEqual([], warnings)
+
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [], 'Pri=1 reporter:me M=61')
+    self.assertEqual('Pri=1  M=61', actual)
+    self.assertEqual(
+        ['"me" keyword ignored because you are not signed in.'],
+        warnings)
+
+  def testReplaceKeywordsWithUserIDs_Me_LinkedAccounts(self):
+    """owner:me is replaced with owner:uid,uid for each linked account."""
+    actual, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
+        [111, 222], 'owner:me')
+    self.assertEqual('owner:111,222', actual)
+    self.assertEqual([], warnings)