Merge branch 'main' into avm99963-monorail

Merged commit cd4b3b336f1f14afa02990fdc2eec5d9467a827e

GitOrigin-RevId: e67bbf185d5538e1472bb42e0abb2a141f88bac1
diff --git a/services/cachemanager_svc.py b/services/cachemanager_svc.py
index 8dc5753..02ad6dd 100644
--- a/services/cachemanager_svc.py
+++ b/services/cachemanager_svc.py
@@ -130,6 +130,7 @@
         cnxn, kind=kind, where=[('timestep < %s', [last_timestep])])
 
 
+# TODO: change to FlaskInternalTask when convert to Flask
 class RamCacheConsolidate(jsonfeed.InternalTask):
   """Drop old Invalidate rows when there are too many of them."""
 
@@ -164,3 +165,9 @@
       'old_count': old_count,
       'new_count': new_count,
       }
+
+  # def GetRamCacheConsolidate(self, **kwargs):
+  #   return self.handler(**kwargs)
+
+  # def PostRamCacheConsolidate(self, **kwargs):
+  #   return self.handler(**kwargs)
diff --git a/services/caches.py b/services/caches.py
index 07702bf..35276a0 100644
--- a/services/caches.py
+++ b/services/caches.py
@@ -20,7 +20,6 @@
 from __future__ import absolute_import
 
 import logging
-import redis
 
 from protorpc import protobuf
 
@@ -28,8 +27,6 @@
 
 import settings
 from framework import framework_constants
-from framework import redis_utils
-from proto import tracker_pb2
 
 
 DEFAULT_MAX_SIZE = 10000
@@ -186,28 +183,12 @@
   # so as to avoid timeouts.
   _FETCH_BATCH_SIZE = 10000
 
-  def __init__(
-      self,
-      cache_manager,
-      kind,
-      prefix,
-      pb_class,
-      max_size=None,
-      use_redis=False,
-      redis_client=None):
+  def __init__(self, cache_manager, kind, prefix, pb_class, max_size=None):
 
     self.cache = self._MakeCache(cache_manager, kind, max_size=max_size)
     self.prefix = prefix
     self.pb_class = pb_class
 
-    if use_redis:
-      self.redis_client = redis_client or redis_utils.CreateRedisClient()
-      self.use_redis = redis_utils.VerifyRedisConnection(
-          self.redis_client, msg=kind)
-    else:
-      self.redis_client = None
-      self.use_redis = False
-
   def _MakeCache(self, cache_manager, kind, max_size=None):
     """Make the RAM cache and register it with the cache_manager."""
     return RamCache(cache_manager, kind, max_size=max_size)
@@ -215,7 +196,7 @@
   def CacheItem(self, key, value):
     """Add the given key-value pair to RAM and L2 cache."""
     self.cache.CacheItem(key, value)
-    self._WriteToCache({key: value})
+    self._WriteToMemcache({key: value})
 
   def HasItem(self, key):
     """Return True if the given key is in the RAM cache."""
@@ -258,7 +239,7 @@
 
     if missed_keys:
       if use_cache:
-        cache_hits, missed_keys = self._ReadFromCache(missed_keys)
+        cache_hits, missed_keys = self._ReadFromMemcache(missed_keys)
         result_dict.update(cache_hits)
         self.cache.CacheAll(cache_hits)
 
@@ -269,7 +250,7 @@
       result_dict.update(retrieved_dict)
       if use_cache:
         self.cache.CacheAll(retrieved_dict)
-        self._WriteToCache(retrieved_dict)
+        self._WriteToMemcache(retrieved_dict)
 
     still_missing_keys = [key for key in keys if key not in result_dict]
     return result_dict, still_missing_keys
@@ -283,7 +264,7 @@
   def InvalidateKeys(self, cnxn, keys):
     """Drop the given keys from both RAM and L2 cache."""
     self.cache.InvalidateKeys(cnxn, keys)
-    self._DeleteFromCache(keys)
+    self._DeleteFromMemcache(keys)
 
   def InvalidateAllKeys(self, cnxn, keys):
     """Drop the given keys from L2 cache and invalidate all keys in RAM.
@@ -292,7 +273,7 @@
     invalidating a large group of keys all at once. Only use when necessary.
     """
     self.cache.InvalidateAll(cnxn)
-    self._DeleteFromCache(keys)
+    self._DeleteFromMemcache(keys)
 
   def GetAllAlreadyInRam(self, keys):
     """Look only in RAM to return {key: values}, missed_keys."""
@@ -307,55 +288,6 @@
     """On RAM and L2 cache miss, hit the database."""
     raise NotImplementedError()
 
-  def _ReadFromCache(self, keys):
-    # type: (Sequence[int]) -> Mapping[str, Any], Sequence[int]
-    """Reads a list of keys from secondary caching service.
-
-    Redis will be used if Redis is enabled and connection is valid;
-    otherwise, memcache will be used.
-
-    Args:
-      keys: List of integer keys to look up in L2 cache.
-
-    Returns:
-      A pair: hits, misses.  Where hits is {key: value} and misses is
-        a list of any keys that were not found anywhere.
-    """
-    if self.use_redis:
-      return self._ReadFromRedis(keys)
-    else:
-      return self._ReadFromMemcache(keys)
-
-  def _WriteToCache(self, retrieved_dict):
-    # type: (Mapping[int, Any]) -> None
-    """Writes a set of key-value pairs to secondary caching service.
-
-    Redis will be used if Redis is enabled and connection is valid;
-    otherwise, memcache will be used.
-
-    Args:
-      retrieved_dict: Dictionary contains pairs of key-values to write to cache.
-    """
-    if self.use_redis:
-      return self._WriteToRedis(retrieved_dict)
-    else:
-      return self._WriteToMemcache(retrieved_dict)
-
-  def _DeleteFromCache(self, keys):
-    # type: (Sequence[int]) -> None
-    """Selects which cache to delete from.
-
-    Redis will be used if Redis is enabled and connection is valid;
-    otherwise, memcache will be used.
-
-    Args:
-      keys: List of integer keys to delete from cache.
-    """
-    if self.use_redis:
-      return self._DeleteFromRedis(keys)
-    else:
-      return self._DeleteFromMemcache(keys)
-
   def _ReadFromMemcache(self, keys):
     # type: (Sequence[int]) -> Mapping[str, Any], Sequence[int]
     """Read the given keys from memcache, return {key: value}, missing_keys."""
@@ -403,79 +335,6 @@
         key_prefix=self.prefix,
         namespace=settings.memcache_namespace)
 
-  def _WriteToRedis(self, retrieved_dict):
-    # type: (Mapping[int, Any]) -> None
-    """Write entries for each key-value pair to Redis.  Encode PBs.
-
-    Args:
-      retrieved_dict: Dictionary of key-value pairs to write to Redis.
-    """
-    try:
-      for key, value in retrieved_dict.items():
-        redis_key = redis_utils.FormatRedisKey(key, prefix=self.prefix)
-        redis_value = self._ValueToStr(value)
-
-        self.redis_client.setex(
-            redis_key, framework_constants.CACHE_EXPIRATION, redis_value)
-    except redis.RedisError as identifier:
-      logging.error(
-          'Redis error occurred during write operation: %s', identifier)
-      self._DeleteFromRedis(list(retrieved_dict.keys()))
-      return
-    logging.info(
-        'cached batch of %d values in redis %s', len(retrieved_dict),
-        self.prefix)
-
-  def _ReadFromRedis(self, keys):
-    # type: (Sequence[int]) -> Mapping[str, Any], Sequence[int]
-    """Read the given keys from Redis, return {key: value}, missing keys.
-
-    Args:
-      keys: List of integer keys to read from Redis.
-
-    Returns:
-      A pair: hits, misses.  Where hits is {key: value} and misses is
-        a list of any keys that were not found anywhere.
-    """
-    cache_hits = {}
-    missing_keys = []
-    try:
-      values_list = self.redis_client.mget(
-          [redis_utils.FormatRedisKey(key, prefix=self.prefix) for key in keys])
-    except redis.RedisError as identifier:
-      logging.error(
-          'Redis error occurred during read operation: %s', identifier)
-      values_list = [None] * len(keys)
-
-    for key, serialized_value in zip(keys, values_list):
-      if serialized_value:
-        value = self._StrToValue(serialized_value)
-        cache_hits[key] = value
-        self.cache.CacheItem(key, value)
-      else:
-        missing_keys.append(key)
-    logging.info(
-        'decoded %d values from redis %s, missing %d', len(cache_hits),
-        self.prefix, len(missing_keys))
-    return cache_hits, missing_keys
-
-  def _DeleteFromRedis(self, keys):
-    # type: (Sequence[int]) -> None
-    """Delete key-values from redis.
-
-    Args:
-      keys: List of integer keys to delete.
-    """
-    try:
-      self.redis_client.delete(
-          *[
-              redis_utils.FormatRedisKey(key, prefix=self.prefix)
-              for key in keys
-          ])
-    except redis.RedisError as identifier:
-      logging.error(
-          'Redis error occurred during delete operation %s', identifier)
-
   def _KeyToStr(self, key):
     # type: (int) -> str
     """Convert our int IDs to strings for use as memcache keys."""
@@ -489,26 +348,19 @@
   def _ValueToStr(self, value):
     # type: (Any) -> str
     """Serialize an application object so that it can be stored in L2 cache."""
-    if self.use_redis:
-      return redis_utils.SerializeValue(value, pb_class=self.pb_class)
+    if not self.pb_class:
+      return value
+    elif self.pb_class == int:
+      return str(value)
     else:
-      if not self.pb_class:
-        return value
-      elif self.pb_class == int:
-        return str(value)
-      else:
-        return protobuf.encode_message(value)
+      return protobuf.encode_message(value)
 
   def _StrToValue(self, serialized_value):
     # type: (str) -> Any
     """Deserialize L2 cache string into an application object."""
-    if self.use_redis:
-      return redis_utils.DeserializeValue(
-          serialized_value, pb_class=self.pb_class)
+    if not self.pb_class:
+      return serialized_value
+    elif self.pb_class == int:
+      return int(serialized_value)
     else:
-      if not self.pb_class:
-        return serialized_value
-      elif self.pb_class == int:
-        return int(serialized_value)
-      else:
-        return protobuf.decode_message(self.pb_class, serialized_value)
+      return protobuf.decode_message(self.pb_class, serialized_value)
diff --git a/services/client_config_svc.py b/services/client_config_svc.py
index c0acf03..ce85a95 100644
--- a/services/client_config_svc.py
+++ b/services/client_config_svc.py
@@ -12,7 +12,7 @@
 import logging
 import os
 import time
-import urllib
+from six.moves import urllib
 import webapp2
 
 from google.appengine.api import app_identity
@@ -46,6 +46,7 @@
 
 
 # Note: The cron job must have hit the servlet before this will work.
+# when convert to flask replace the webapp2.RequestHandler to Object
 class LoadApiClientConfigs(webapp2.RequestHandler):
 
   config_loads = ts_mon.CounterMetric(
@@ -117,6 +118,36 @@
 
     return content_text
 
+  # def GetLoadApiClientConfigs(self):
+  #   global service_account_map
+  #   global qpm_dict
+  #   authorization_token, _ = app_identity.get_access_token(
+  #     framework_constants.OAUTH_SCOPE)
+  #   response = urlfetch.fetch(
+  #     LUCI_CONFIG_URL,
+  #     method=urlfetch.GET,
+  #     follow_redirects=False,
+  #     headers={'Content-Type': 'application/json; charset=UTF-8',
+  #             'Authorization': 'Bearer ' + authorization_token})
+
+  #   if response.status_code != 200:
+  #    logging.error('Invalid response from luci-config: %r', response)
+  #    self.config_loads.increment({'success': False, 'type': 'luci-cfg-error'})
+  #    flask.abort(500, 'Invalid response from luci-config')
+
+  #   try:
+  #     content_text = self._process_response(response)
+  #   except Exception as e:
+  #     flask.abort(500, str(e))
+
+  #   logging.info('luci-config content decoded: %r.', content_text)
+  #   configs = ClientConfig(configs=content_text,
+  #                           key_name='api_client_configs')
+  #   configs.put()
+  #   service_account_map = None
+  #   qpm_dict = None
+  #   self.config_loads.increment({'success': True, 'type': 'success'})
+
 
 class ClientConfigService(object):
   """The persistence layer for client config data."""
diff --git a/services/issue_svc.py b/services/issue_svc.py
index eab85ab..8e5a45f 100644
--- a/services/issue_svc.py
+++ b/services/issue_svc.py
@@ -24,7 +24,7 @@
 
 from google.appengine.api import app_identity
 from google.appengine.api import images
-from third_party import cloudstorage
+from google.cloud import storage
 
 import settings
 from features import filterrules_helpers
@@ -1659,12 +1659,11 @@
       # MakeIssueComments expects a list of [(filename, contents, mimetype),...]
       attachments = []
       for attachment in initial_summary_comment.attachments:
-        object_path = ('/' + app_identity.get_default_gcs_bucket_name() +
-                       attachment.gcs_object_id)
-        with cloudstorage.open(object_path, 'r') as f:
-          content = f.read()
-          attachments.append(
-              [attachment.filename, content, attachment.mimetype])
+        client = storage.Client()
+        bucket = client.get_bucket(app_identity.get_default_gcs_bucket_name)
+        blob = bucket.get_blob(attachment.gcs_object_id)
+        content = blob.download_as_bytes()
+        attachments.append([attachment.filename, content, attachment.mimetype])
 
       if attachments:
         new_issue.attachment_count = len(attachments)
diff --git a/services/ml_helpers.py b/services/ml_helpers.py
index c4650b4..d05a582 100644
--- a/services/ml_helpers.py
+++ b/services/ml_helpers.py
@@ -8,6 +8,7 @@
 feature extraction, so that the serving code and training code both use the same
 set of features.
 """
+# TODO(crbug.com/monorail/7515): DELETE THIS FILE and all references.
 
 from __future__ import division
 from __future__ import print_function
diff --git a/services/spam_svc.py b/services/spam_svc.py
index 9a62cb9..e916830 100644
--- a/services/spam_svc.py
+++ b/services/spam_svc.py
@@ -12,12 +12,9 @@
 import collections
 import logging
 import settings
-import sys
 
 from collections import defaultdict
-from features import filterrules_helpers
 from framework import sql
-from framework import framework_constants
 from infra_libs import ts_mon
 from services import ml_helpers
 
@@ -110,7 +107,7 @@
     """
     return self.LookupIssuesFlaggers(cnxn, [issue_id])[issue_id]
 
-  def LookupIssueFlagCounts(self, cnxn, issue_ids):
+  def _LookupIssueFlagCounts(self, cnxn, issue_ids):
     """Returns a map of issue_id to flag counts"""
     rows = self.report_tbl.Select(cnxn, cols=['issue_id', 'COUNT(*)'],
                                   issue_id=issue_ids, group_by=['issue_id'])
@@ -193,7 +190,7 @@
 
     # Now record new verdicts and update issue.is_spam, if they've changed.
     ids = [issue.issue_id for issue in issues]
-    counts = self.LookupIssueFlagCounts(cnxn, ids)
+    counts = self._LookupIssueFlagCounts(cnxn, ids)
     previous_verdicts = self.LookupIssueVerdicts(cnxn, ids)
 
     for issue_id in counts:
@@ -467,62 +464,6 @@
     return {'confidence_is_spam': 0.0,
             'failed_open': False}
 
-  def GetIssueClassifierQueue(
-      self, cnxn, _issue_service, project_id, offset=0, limit=10):
-    """Returns list of recent issues with spam verdicts,
-     ranked in ascending order of confidence (so uncertain items are first).
-     """
-    # TODO(seanmccullough): Optimize pagination. This query probably gets
-    # slower as the number of SpamVerdicts grows, regardless of offset
-    # and limit values used here.  Using offset,limit in general may not
-    # be the best way to do this.
-    issue_results = self.verdict_tbl.Select(
-        cnxn,
-        cols=[
-            'issue_id', 'is_spam', 'reason', 'classifier_confidence', 'created'
-        ],
-        where=[
-            ('project_id = %s', [project_id]),
-            (
-                'classifier_confidence <= %s',
-                [settings.classifier_moderation_thresh]),
-            ('overruled = %s', [False]),
-            ('issue_id IS NOT NULL', []),
-        ],
-        order_by=[
-            ('classifier_confidence ASC', []),
-            ('created ASC', []),
-        ],
-        group_by=['issue_id'],
-        offset=offset,
-        limit=limit,
-    )
-
-    ret = []
-    for row in issue_results:
-      ret.append(
-          ModerationItem(
-              issue_id=int(row[0]),
-              is_spam=row[1] == 1,
-              reason=row[2],
-              classifier_confidence=row[3],
-              verdict_time='%s' % row[4],
-          ))
-
-    count = self.verdict_tbl.SelectValue(
-        cnxn,
-        col='COUNT(*)',
-        where=[
-            ('project_id = %s', [project_id]),
-            (
-                'classifier_confidence <= %s',
-                [settings.classifier_moderation_thresh]),
-            ('overruled = %s', [False]),
-            ('issue_id IS NOT NULL', []),
-        ])
-
-    return ret, count
-
   def GetIssueFlagQueue(
       self, cnxn, _issue_service, project_id, offset=0, limit=10):
     """Returns list of recent issues that have been flagged by users"""
diff --git a/services/test/caches_test.py b/services/test/caches_test.py
index 4ced369..cd401be 100644
--- a/services/test/caches_test.py
+++ b/services/test/caches_test.py
@@ -8,13 +8,11 @@
 from __future__ import division
 from __future__ import absolute_import
 
-import fakeredis
 import unittest
 
 from google.appengine.api import memcache
 from google.appengine.ext import testbed
 
-import settings
 from services import caches
 from testing import fake
 
@@ -145,21 +143,9 @@
 
 class TestableTwoLevelCache(caches.AbstractTwoLevelCache):
 
-  def __init__(
-      self,
-      cache_manager,
-      kind,
-      max_size=None,
-      use_redis=False,
-      redis_client=None):
+  def __init__(self, cache_manager, kind, max_size=None):
     super(TestableTwoLevelCache, self).__init__(
-        cache_manager,
-        kind,
-        'testable:',
-        None,
-        max_size=max_size,
-        use_redis=use_redis,
-        redis_client=redis_client)
+        cache_manager, kind, 'testable:', None, max_size=max_size)
 
   # pylint: disable=unused-argument
   def FetchItems(self, cnxn, keys, **kwargs):
@@ -295,124 +281,3 @@
     self.testable_2lc.InvalidateAllRamEntries(self.cnxn)
     self.assertFalse(self.testable_2lc.HasItem(123))
     self.assertFalse(self.testable_2lc.HasItem(124))
-
-
-class AbstractTwoLevelCacheTest_Redis(unittest.TestCase):
-
-  def setUp(self):
-    self.cnxn = 'fake connection'
-    self.cache_manager = fake.CacheManager()
-
-    self.server = fakeredis.FakeServer()
-    self.fake_redis_client = fakeredis.FakeRedis(server=self.server)
-    self.testable_2lc = TestableTwoLevelCache(
-        self.cache_manager,
-        'issue',
-        use_redis=True,
-        redis_client=self.fake_redis_client)
-
-  def tearDown(self):
-    self.fake_redis_client.flushall()
-
-  def testCacheItem(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.assertEqual(12300, self.testable_2lc.cache.cache[123])
-
-  def testHasItem(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.assertTrue(self.testable_2lc.HasItem(123))
-    self.assertFalse(self.testable_2lc.HasItem(444))
-    self.assertFalse(self.testable_2lc.HasItem(999))
-
-  def testWriteToRedis_Normal(self):
-    retrieved_dict = {123: 12300, 124: 12400}
-    self.testable_2lc._WriteToRedis(retrieved_dict)
-    actual_123, _ = self.testable_2lc._ReadFromRedis([123])
-    self.assertEqual(12300, actual_123[123])
-    actual_124, _ = self.testable_2lc._ReadFromRedis([124])
-    self.assertEqual(12400, actual_124[124])
-
-  def testWriteToRedis_str(self):
-    retrieved_dict = {111: 'foo', 222: 'bar'}
-    self.testable_2lc._WriteToRedis(retrieved_dict)
-    actual_111, _ = self.testable_2lc._ReadFromRedis([111])
-    self.assertEqual('foo', actual_111[111])
-    actual_222, _ = self.testable_2lc._ReadFromRedis([222])
-    self.assertEqual('bar', actual_222[222])
-
-  def testWriteToRedis_ProtobufInt(self):
-    self.testable_2lc.pb_class = int
-    retrieved_dict = {123: 12300, 124: 12400}
-    self.testable_2lc._WriteToRedis(retrieved_dict)
-    actual_123, _ = self.testable_2lc._ReadFromRedis([123])
-    self.assertEqual(12300, actual_123[123])
-    actual_124, _ = self.testable_2lc._ReadFromRedis([124])
-    self.assertEqual(12400, actual_124[124])
-
-  def testWriteToRedis_List(self):
-    retrieved_dict = {123: [1, 2, 3], 124: [1, 2, 4]}
-    self.testable_2lc._WriteToRedis(retrieved_dict)
-    actual_123, _ = self.testable_2lc._ReadFromRedis([123])
-    self.assertEqual([1, 2, 3], actual_123[123])
-    actual_124, _ = self.testable_2lc._ReadFromRedis([124])
-    self.assertEqual([1, 2, 4], actual_124[124])
-
-  def testWriteToRedis_Dict(self):
-    retrieved_dict = {123: {'ham': 2, 'spam': 3}, 124: {'eggs': 2, 'bean': 4}}
-    self.testable_2lc._WriteToRedis(retrieved_dict)
-    actual_123, _ = self.testable_2lc._ReadFromRedis([123])
-    self.assertEqual({'ham': 2, 'spam': 3}, actual_123[123])
-    actual_124, _ = self.testable_2lc._ReadFromRedis([124])
-    self.assertEqual({'eggs': 2, 'bean': 4}, actual_124[124])
-
-  def testGetAll_FetchGetsIt(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.testable_2lc.CacheItem(124, 12400)
-    # Clear the RAM cache so that we find items in redis.
-    self.testable_2lc.cache.LocalInvalidateAll()
-    self.testable_2lc.CacheItem(125, 12500)
-    hits, misses = self.testable_2lc.GetAll(self.cnxn, [123, 124, 333, 444])
-    self.assertEqual({123: 12300, 124: 12400, 333: 333, 444: 444}, hits)
-    self.assertEqual([], misses)
-    # The RAM cache now has items found in redis and DB.
-    self.assertItemsEqual(
-        [123, 124, 125, 333, 444], list(self.testable_2lc.cache.cache.keys()))
-
-  def testGetAll_FetchGetsItFromDB(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.testable_2lc.CacheItem(124, 12400)
-    hits, misses = self.testable_2lc.GetAll(self.cnxn, [123, 124, 333, 444])
-    self.assertEqual({123: 12300, 124: 12400, 333: 333, 444: 444}, hits)
-    self.assertEqual([], misses)
-
-  def testGetAll_FetchDoesNotFindIt(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.testable_2lc.CacheItem(124, 12400)
-    hits, misses = self.testable_2lc.GetAll(self.cnxn, [123, 124, 999])
-    self.assertEqual({123: 12300, 124: 12400}, hits)
-    self.assertEqual([999], misses)
-
-  def testInvalidateKeys(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.testable_2lc.CacheItem(124, 12400)
-    self.testable_2lc.CacheItem(125, 12500)
-    self.testable_2lc.InvalidateKeys(self.cnxn, [124])
-    self.assertEqual(2, len(self.testable_2lc.cache.cache))
-    self.assertNotIn(124, self.testable_2lc.cache.cache)
-    self.assertEqual(self.cache_manager.last_call,
-                     ('StoreInvalidateRows', self.cnxn, 'issue', [124]))
-
-  def testGetAllAlreadyInRam(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.testable_2lc.CacheItem(124, 12400)
-    hits, misses = self.testable_2lc.GetAllAlreadyInRam(
-        [123, 124, 333, 444, 999])
-    self.assertEqual({123: 12300, 124: 12400}, hits)
-    self.assertEqual([333, 444, 999], misses)
-
-  def testInvalidateAllRamEntries(self):
-    self.testable_2lc.CacheItem(123, 12300)
-    self.testable_2lc.CacheItem(124, 12400)
-    self.testable_2lc.InvalidateAllRamEntries(self.cnxn)
-    self.assertFalse(self.testable_2lc.HasItem(123))
-    self.assertFalse(self.testable_2lc.HasItem(124))
diff --git a/services/test/spam_svc_test.py b/services/test/spam_svc_test.py
index 3aeba13..67b53cf 100644
--- a/services/test/spam_svc_test.py
+++ b/services/test/spam_svc_test.py
@@ -233,86 +233,6 @@
     self.assertNotIn(issue, self.issue_service.updated_issues)
     self.assertEqual(True, issue.is_spam)
 
-  def testGetIssueClassifierQueue_noVerdicts(self):
-    self.mock_verdict_tbl.Select(self.cnxn,
-        cols=['issue_id', 'is_spam', 'reason', 'classifier_confidence',
-              'created'],
-        where=[
-             ('project_id = %s', [789]),
-             ('classifier_confidence <= %s',
-                 [settings.classifier_moderation_thresh]),
-             ('overruled = %s', [False]),
-             ('issue_id IS NOT NULL', []),
-        ],
-        order_by=[
-             ('classifier_confidence ASC', []),
-             ('created ASC', [])
-        ],
-        group_by=['issue_id'],
-        offset=0,
-        limit=10,
-    ).AndReturn([])
-
-    self.mock_verdict_tbl.SelectValue(self.cnxn,
-        col='COUNT(*)',
-        where=[
-            ('project_id = %s', [789]),
-            ('classifier_confidence <= %s',
-                [settings.classifier_moderation_thresh]),
-            ('overruled = %s', [False]),
-            ('issue_id IS NOT NULL', []),
-        ]).AndReturn(0)
-
-    self.mox.ReplayAll()
-    res, count = self.spam_service.GetIssueClassifierQueue(
-        self.cnxn, self.issue_service, 789)
-    self.mox.VerifyAll()
-
-    self.assertEqual([], res)
-    self.assertEqual(0, count)
-
-  def testGetIssueClassifierQueue_someVerdicts(self):
-    self.mock_verdict_tbl.Select(self.cnxn,
-        cols=['issue_id', 'is_spam', 'reason', 'classifier_confidence',
-              'created'],
-        where=[
-             ('project_id = %s', [789]),
-             ('classifier_confidence <= %s',
-                 [settings.classifier_moderation_thresh]),
-             ('overruled = %s', [False]),
-             ('issue_id IS NOT NULL', []),
-        ],
-        order_by=[
-             ('classifier_confidence ASC', []),
-             ('created ASC', [])
-        ],
-        group_by=['issue_id'],
-        offset=0,
-        limit=10,
-    ).AndReturn([[78901, 0, "classifier", 0.9, "2015-12-10 11:06:24"]])
-
-    self.mock_verdict_tbl.SelectValue(self.cnxn,
-        col='COUNT(*)',
-        where=[
-            ('project_id = %s', [789]),
-            ('classifier_confidence <= %s',
-                [settings.classifier_moderation_thresh]),
-            ('overruled = %s', [False]),
-            ('issue_id IS NOT NULL', []),
-        ]).AndReturn(10)
-
-    self.mox.ReplayAll()
-    res, count  = self.spam_service.GetIssueClassifierQueue(
-        self.cnxn, self.issue_service, 789)
-    self.mox.VerifyAll()
-    self.assertEqual(1, len(res))
-    self.assertEqual(10, count)
-    self.assertEqual(78901, res[0].issue_id)
-    self.assertEqual(False, res[0].is_spam)
-    self.assertEqual("classifier", res[0].reason)
-    self.assertEqual(0.9, res[0].classifier_confidence)
-    self.assertEqual("2015-12-10 11:06:24", res[0].verdict_time)
-
   def testIsExempt_RegularUser(self):
     author = user_pb2.MakeUser(111, email='test@example.com')
     self.assertFalse(self.spam_service._IsExempt(author, False))