Adrià Vilanova Martínez | f19ea43 | 2024-01-23 20:20:52 +0100 | [diff] [blame] | 1 | # Copyright 2018 The Chromium Authors |
| 2 | # Use of this source code is governed by a BSD-style license that can be |
| 3 | # found in the LICENSE file. |
Copybara | 854996b | 2021-09-07 19:36:02 +0000 | [diff] [blame] | 4 | |
| 5 | """A service for querying data for charts. |
| 6 | |
| 7 | Functions for querying the IssueSnapshot table and associated join tables. |
| 8 | """ |
| 9 | from __future__ import print_function |
| 10 | from __future__ import division |
| 11 | from __future__ import absolute_import |
| 12 | |
| 13 | import logging |
| 14 | import settings |
| 15 | import time |
| 16 | |
| 17 | from features import hotlist_helpers |
| 18 | from framework import framework_helpers |
| 19 | from framework import sql |
| 20 | from search import search_helpers |
| 21 | from tracker import tracker_bizobj |
| 22 | from tracker import tracker_helpers |
| 23 | from search import query2ast |
| 24 | from search import ast2select |
| 25 | from search import ast2ast |
| 26 | |
| 27 | |
| 28 | ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot' |
| 29 | ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc' |
| 30 | ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component' |
| 31 | ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label' |
| 32 | |
| 33 | ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id', |
| 34 | 'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end', |
| 35 | 'is_open'] |
| 36 | ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id'] |
| 37 | ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id'] |
| 38 | ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id'] |
| 39 | |
| 40 | |
| 41 | class ChartService(object): |
| 42 | """Class for querying chart data.""" |
| 43 | |
| 44 | def __init__(self, config_service): |
| 45 | """Constructor for ChartService. |
| 46 | |
| 47 | Args: |
| 48 | config_service (ConfigService): An instance of ConfigService. |
| 49 | """ |
| 50 | self.config_service = config_service |
| 51 | |
| 52 | # Set up SQL table objects. |
| 53 | self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME) |
| 54 | self.issuesnapshot2cc_tbl = sql.SQLTableManager( |
| 55 | ISSUESNAPSHOT2CC_TABLE_NAME) |
| 56 | self.issuesnapshot2component_tbl = sql.SQLTableManager( |
| 57 | ISSUESNAPSHOT2COMPONENT_TABLE_NAME) |
| 58 | self.issuesnapshot2label_tbl = sql.SQLTableManager( |
| 59 | ISSUESNAPSHOT2LABEL_TABLE_NAME) |
| 60 | |
| 61 | def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids, |
| 62 | project, perms, group_by=None, label_prefix=None, |
| 63 | query=None, canned_query=None, hotlist=None): |
| 64 | """Queries historical issue counts grouped by label or component. |
| 65 | |
| 66 | Args: |
| 67 | cnxn: A MonorailConnection instance. |
| 68 | services: A Services instance. |
| 69 | unixtime: An integer representing the Unix time in seconds. |
| 70 | effective_ids: The effective User IDs associated with the current user. |
| 71 | project: A project object representing the current project. |
| 72 | perms: A permissions object associated with the current user. |
| 73 | group_by (str, optional): Which dimension to group by. Values can |
| 74 | be 'label', 'component', or None, in which case no grouping will |
| 75 | be applied. |
| 76 | label_prefix: Required when group_by is 'label.' Will limit the query to |
| 77 | only labels with the specified prefix (for example 'Pri'). |
| 78 | query (str, optional): A query string from the request to apply to |
| 79 | the snapshot query. |
| 80 | canned_query (str, optional): Parsed canned query applied to the query |
| 81 | scope. |
| 82 | hotlist (Hotlist, optional): Hotlist to search under (in lieu of project). |
| 83 | |
| 84 | Returns: |
| 85 | 1. A dict of {'2nd dimension or "total"': number of occurences}. |
| 86 | 2. A list of any unsupported query conditions in query. |
| 87 | 3. A boolean that is true if any results were capped. |
| 88 | """ |
| 89 | if hotlist: |
| 90 | # TODO(jeffcarp): Get project_ids in a more efficient manner. We can |
| 91 | # query for "SELECT DISTINCT(project_id)" for all issues in hotlist. |
| 92 | issues_list = services.issue.GetIssues(cnxn, |
| 93 | [hotlist_issue.issue_id for hotlist_issue in hotlist.items]) |
| 94 | hotlist_issues_project_ids = hotlist_helpers.GetAllProjectsOfIssues( |
| 95 | [issue for issue in issues_list]) |
| 96 | config_list = hotlist_helpers.GetAllConfigsOfProjects( |
| 97 | cnxn, hotlist_issues_project_ids, services) |
| 98 | project_config = tracker_bizobj.HarmonizeConfigs(config_list) |
| 99 | else: |
| 100 | project_config = services.config.GetProjectConfig(cnxn, |
| 101 | project.project_id) |
| 102 | |
| 103 | if project: |
| 104 | project_ids = [project.project_id] |
| 105 | else: |
| 106 | project_ids = hotlist_issues_project_ids |
| 107 | |
| 108 | try: |
| 109 | query_left_joins, query_where, unsupported_conds = self._QueryToWhere( |
| 110 | cnxn, services, project_config, query, canned_query, project_ids) |
| 111 | except ast2select.NoPossibleResults: |
| 112 | return {}, ['Invalid query.'], False |
| 113 | |
| 114 | restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs( |
| 115 | cnxn, None, self.config_service, effective_ids, project, perms) |
| 116 | |
| 117 | left_joins = [ |
| 118 | ('Issue ON IssueSnapshot.issue_id = Issue.id', []), |
| 119 | ] |
| 120 | |
| 121 | if restricted_label_ids: |
| 122 | left_joins.append( |
| 123 | (('Issue2Label AS Forbidden_label' |
| 124 | ' ON Issue.id = Forbidden_label.issue_id' |
| 125 | ' AND Forbidden_label.label_id IN (%s)' % ( |
| 126 | sql.PlaceHolders(restricted_label_ids) |
| 127 | )), restricted_label_ids)) |
| 128 | |
| 129 | if effective_ids: |
| 130 | left_joins.append( |
| 131 | ('Issue2Cc AS I2cc' |
| 132 | ' ON Issue.id = I2cc.issue_id' |
| 133 | ' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids), |
| 134 | effective_ids)) |
| 135 | |
| 136 | # TODO(jeffcarp): Handle case where there are issues with no labels. |
| 137 | where = [ |
| 138 | ('IssueSnapshot.period_start <= %s', [unixtime]), |
| 139 | ('IssueSnapshot.period_end > %s', [unixtime]), |
| 140 | ('Issue.is_spam = %s', [False]), |
| 141 | ('Issue.deleted = %s', [False]), |
| 142 | ] |
| 143 | if project_ids: |
| 144 | where.append( |
| 145 | ('IssueSnapshot.project_id IN (%s)' % sql.PlaceHolders(project_ids), |
| 146 | project_ids)) |
| 147 | |
| 148 | forbidden_label_clause = 'Forbidden_label.label_id IS NULL' |
| 149 | if effective_ids: |
| 150 | if restricted_label_ids: |
| 151 | forbidden_label_clause = ' OR %s' % forbidden_label_clause |
| 152 | else: |
| 153 | forbidden_label_clause = '' |
| 154 | |
| 155 | where.append( |
| 156 | (( |
| 157 | '(Issue.reporter_id IN (%s)' |
| 158 | ' OR Issue.owner_id IN (%s)' |
| 159 | ' OR I2cc.cc_id IS NOT NULL' |
| 160 | '%s)' |
| 161 | ) % ( |
| 162 | sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids), |
| 163 | forbidden_label_clause |
| 164 | ), |
| 165 | list(effective_ids) + list(effective_ids) |
| 166 | )) |
| 167 | else: |
| 168 | where.append((forbidden_label_clause, [])) |
| 169 | |
| 170 | if group_by == 'component': |
| 171 | cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)'] |
| 172 | left_joins.extend([ |
| 173 | (('IssueSnapshot2Component AS Is2c ON' |
| 174 | ' Is2c.issuesnapshot_id = IssueSnapshot.id'), []), |
| 175 | ('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []), |
| 176 | ]) |
| 177 | group_by = ['Comp.path'] |
| 178 | elif group_by == 'label': |
| 179 | cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)'] |
| 180 | left_joins.extend([ |
| 181 | (('IssueSnapshot2Label AS Is2l' |
| 182 | ' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []), |
| 183 | ('LabelDef AS Lab ON Lab.id = Is2l.label_id', []), |
| 184 | ]) |
| 185 | |
| 186 | if not label_prefix: |
| 187 | raise ValueError('`label_prefix` required when grouping by label.') |
| 188 | |
| 189 | # TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output, |
| 190 | # ensure regex is case-insensitive. |
| 191 | where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%'])) |
| 192 | group_by = ['Lab.label'] |
| 193 | elif group_by == 'open': |
| 194 | cols = ['IssueSnapshot.is_open', |
| 195 | 'COUNT(IssueSnapshot.issue_id) AS issue_count'] |
| 196 | group_by = ['IssueSnapshot.is_open'] |
| 197 | elif group_by == 'status': |
| 198 | left_joins.append(('StatusDef AS Stats ON ' \ |
| 199 | 'Stats.id = IssueSnapshot.status_id', [])) |
| 200 | cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)'] |
| 201 | group_by = ['Stats.status'] |
| 202 | elif group_by == 'owner': |
| 203 | cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)'] |
| 204 | group_by = ['IssueSnapshot.owner_id'] |
| 205 | elif not group_by: |
| 206 | cols = ['IssueSnapshot.issue_id'] |
| 207 | else: |
| 208 | raise ValueError('`group_by` must be label, component, ' \ |
| 209 | 'open, status, owner or None.') |
| 210 | |
| 211 | if query_left_joins: |
| 212 | left_joins.extend(query_left_joins) |
| 213 | |
| 214 | if query_where: |
| 215 | where.extend(query_where) |
| 216 | |
| 217 | if hotlist: |
| 218 | left_joins.extend([ |
| 219 | (('IssueSnapshot2Hotlist AS Is2h' |
| 220 | ' ON Is2h.issuesnapshot_id = IssueSnapshot.id' |
| 221 | ' AND Is2h.hotlist_id = %s'), [hotlist.hotlist_id]), |
| 222 | ]) |
| 223 | where.append( |
| 224 | ('Is2h.hotlist_id = %s', [hotlist.hotlist_id])) |
| 225 | |
| 226 | promises = [] |
| 227 | |
| 228 | for shard_id in range(settings.num_logical_shards): |
| 229 | count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols, |
| 230 | where=where, joins=left_joins, group_by=group_by, |
| 231 | shard_id=shard_id) |
| 232 | promises.append(framework_helpers.Promise(cnxn.Execute, |
| 233 | count_stmt, stmt_args, shard_id=shard_id)) |
| 234 | |
| 235 | shard_values_dict = {} |
| 236 | |
| 237 | search_limit_reached = False |
| 238 | |
| 239 | for promise in promises: |
| 240 | # Wait for each query to complete and add it to the dict. |
| 241 | shard_values = list(promise.WaitAndGetValue()) |
| 242 | |
| 243 | if not shard_values: |
| 244 | continue |
| 245 | if group_by: |
| 246 | for name, count in shard_values: |
| 247 | if count >= settings.chart_query_max_rows: |
| 248 | search_limit_reached = True |
| 249 | |
| 250 | shard_values_dict.setdefault(name, 0) |
| 251 | shard_values_dict[name] += count |
| 252 | else: |
| 253 | if shard_values[0][0] >= settings.chart_query_max_rows: |
Adrià Vilanova Martínez | f19ea43 | 2024-01-23 20:20:52 +0100 | [diff] [blame] | 254 | search_limit_reached = True |
Copybara | 854996b | 2021-09-07 19:36:02 +0000 | [diff] [blame] | 255 | |
| 256 | shard_values_dict.setdefault('total', 0) |
| 257 | shard_values_dict['total'] += shard_values[0][0] |
| 258 | |
| 259 | unsupported_field_names = list(set([ |
| 260 | field.field_name |
| 261 | for cond in unsupported_conds |
| 262 | for field in cond.field_defs |
| 263 | ])) |
| 264 | |
| 265 | return shard_values_dict, unsupported_field_names, search_limit_reached |
| 266 | |
| 267 | def StoreIssueSnapshots(self, cnxn, issues, commit=True): |
| 268 | """Adds an IssueSnapshot and updates the previous one for each issue.""" |
| 269 | for issue in issues: |
| 270 | right_now = self._currentTime() |
| 271 | |
| 272 | # Update previous snapshot of current issue's end time to right now. |
| 273 | self.issuesnapshot_tbl.Update(cnxn, |
| 274 | delta={'period_end': right_now}, |
| 275 | where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]), |
| 276 | ('IssueSnapshot.period_end = %s', |
| 277 | [settings.maximum_snapshot_period_end])], |
| 278 | commit=commit) |
| 279 | |
| 280 | config = self.config_service.GetProjectConfig(cnxn, issue.project_id) |
| 281 | period_end = settings.maximum_snapshot_period_end |
| 282 | is_open = tracker_helpers.MeansOpenInProject( |
| 283 | tracker_bizobj.GetStatus(issue), config) |
| 284 | shard = issue.issue_id % settings.num_logical_shards |
| 285 | status = tracker_bizobj.GetStatus(issue) |
| 286 | status_id = self.config_service.LookupStatusID( |
| 287 | cnxn, issue.project_id, status) or None |
| 288 | owner_id = tracker_bizobj.GetOwnerId(issue) or None |
| 289 | |
| 290 | issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id, |
| 291 | issue.local_id, issue.reporter_id, owner_id, status_id, right_now, |
| 292 | period_end, is_open)] |
| 293 | |
| 294 | ids = self.issuesnapshot_tbl.InsertRows( |
| 295 | cnxn, ISSUESNAPSHOT_COLS[1:], |
| 296 | issuesnapshot_rows, |
| 297 | replace=True, commit=commit, |
| 298 | return_generated_ids=True) |
| 299 | issuesnapshot_id = ids[0] |
| 300 | |
| 301 | # Add all labels to IssueSnapshot2Label. |
| 302 | label_rows = [ |
| 303 | (issuesnapshot_id, |
| 304 | self.config_service.LookupLabelID(cnxn, issue.project_id, label)) |
| 305 | for label in tracker_bizobj.GetLabels(issue) |
| 306 | ] |
| 307 | self.issuesnapshot2label_tbl.InsertRows( |
| 308 | cnxn, ISSUESNAPSHOT2LABEL_COLS, |
| 309 | label_rows, replace=True, commit=commit) |
| 310 | |
| 311 | # Add all CCs to IssueSnapshot2Cc. |
| 312 | cc_rows = [ |
| 313 | (issuesnapshot_id, cc_id) |
| 314 | for cc_id in tracker_bizobj.GetCcIds(issue) |
| 315 | ] |
| 316 | self.issuesnapshot2cc_tbl.InsertRows( |
| 317 | cnxn, ISSUESNAPSHOT2CC_COLS, |
| 318 | cc_rows, |
| 319 | replace=True, commit=commit) |
| 320 | |
| 321 | # Add all components to IssueSnapshot2Component. |
| 322 | component_rows = [ |
| 323 | (issuesnapshot_id, component_id) |
| 324 | for component_id in issue.component_ids |
| 325 | ] |
| 326 | self.issuesnapshot2component_tbl.InsertRows( |
| 327 | cnxn, ISSUESNAPSHOT2COMPONENT_COLS, |
| 328 | component_rows, |
| 329 | replace=True, commit=commit) |
| 330 | |
| 331 | # Add all components to IssueSnapshot2Hotlist. |
| 332 | # This is raw SQL to obviate passing FeaturesService down through |
| 333 | # the call stack wherever this function is called. |
| 334 | # TODO(jrobbins): sort out dependencies between service classes. |
| 335 | cnxn.Execute(''' |
| 336 | INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id) |
| 337 | SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s |
| 338 | ''', [issuesnapshot_id, issue.issue_id]) |
| 339 | |
| 340 | def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids, commit=True): |
| 341 | """Expunge the existence of hotlists from issue snapshots. |
| 342 | |
| 343 | Args: |
| 344 | cnxn: connection to SQL database. |
| 345 | hotlist_ids: list of hotlist_ids for hotlists we want to delete. |
| 346 | commit: set to False to skip the DB commit and do it in a caller. |
| 347 | """ |
| 348 | vals_ph = sql.PlaceHolders(hotlist_ids) |
| 349 | cnxn.Execute( |
| 350 | 'DELETE FROM IssueSnapshot2Hotlist ' |
| 351 | 'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph), |
| 352 | hotlist_ids, |
| 353 | commit=commit) |
| 354 | |
| 355 | def _currentTime(self): |
| 356 | """This is a separate method so it can be mocked by tests.""" |
| 357 | return time.time() |
| 358 | |
| 359 | def _QueryToWhere(self, cnxn, services, project_config, query, canned_query, |
| 360 | project_ids): |
| 361 | """Parses a query string into LEFT JOIN and WHERE conditions. |
| 362 | |
| 363 | Args: |
| 364 | cnxn: A MonorailConnection instance. |
| 365 | services: A Services instance. |
| 366 | project_config: The configuration for the given project. |
| 367 | query (string): The query to parse. |
| 368 | canned_query (string): The supplied canned query. |
| 369 | project_ids: The current project ID(s). |
| 370 | |
| 371 | Returns: |
| 372 | 1. A list of LEFT JOIN clauses for the SQL query. |
| 373 | 2. A list of WHERE clases for the SQL query. |
| 374 | 3. A list of query conditions that are unsupported with snapshots. |
| 375 | """ |
| 376 | if not (query or canned_query): |
| 377 | return [], [], [] |
| 378 | |
| 379 | query = query or '' |
| 380 | scope = canned_query or '' |
| 381 | |
| 382 | query_ast = query2ast.ParseUserQuery(query, scope, |
| 383 | query2ast.BUILTIN_ISSUE_FIELDS, project_config) |
| 384 | query_ast = ast2ast.PreprocessAST(cnxn, query_ast, project_ids, |
| 385 | services, project_config) |
| 386 | left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast, |
| 387 | snapshot_mode=True) |
| 388 | |
| 389 | return left_joins, where, unsupported |
| 390 | |
| 391 | def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id): |
| 392 | """Given SQL arguments, executes a snapshot COUNT query.""" |
| 393 | stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True) |
| 394 | stmt.AddJoinClauses(joins, left=True) |
| 395 | stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])]) |
| 396 | if group_by: |
| 397 | stmt.AddGroupByTerms(group_by) |
| 398 | stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0) |
| 399 | stmt_str, stmt_args = stmt.Generate() |
| 400 | if group_by: |
| 401 | if group_by[0] == 'IssueSnapshot.is_open': |
| 402 | count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \ |
| 403 | 'AS bool_open, results.issue_count ' \ |
| 404 | 'FROM (%s) AS results' % stmt_str) |
| 405 | else: |
| 406 | count_stmt = stmt_str |
| 407 | else: |
| 408 | count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % ( |
| 409 | stmt_str) |
| 410 | return count_stmt, stmt_args |