Coverage for postrfp / buyer / api / endpoints / scoring.py: 98%

220 statements  

« prev     ^ index     » next       coverage.py v7.12.0, created at 2025-12-03 01:35 +0000

1""" 

2Scoring endpoints: manage raw scores, comments, autoscores, summaries and calculated / 

3weighted aggregations. Empty scoreset_id ("") = agreed (consensus) set; non‑empty = individual. 

4""" 

5 

6from datetime import datetime 

7 

8from typing import NamedTuple 

9from decimal import Decimal 

10 

11from sqlalchemy.orm import Session 

12from sqlalchemy.orm.exc import NoResultFound 

13 

14from postrfp.authorisation import perms 

15from postrfp.shared import fetch, update, serial 

16from postrfp.shared.decorators import http 

17from postrfp.model import ( 

18 ScoreComment, 

19 AuditEvent, 

20 Score, 

21 Project, 

22 Issue, 

23 User, 

24 QuestionInstance, 

25) 

26from postrfp.shared.serial.common import ScoringModel 

27from postrfp.model.questionnaire.b36 import from_b36 

28 

29from postrfp.buyer.api import authorise 

30from postrfp.shared.exceptions import AuthorizationFailure 

31 

32 

33@http 

34def get_project_scores( 

35 session: Session, user: User, project_id: int, scoreset_id: str = "__default__" 

36) -> serial.ScoringData: 

37 """ 

38 Bulk export of all scores for a project in one score set (agreed or individual) for 

39 analytic use. "__default__" resolves to the agreed set. Permission: ISSUE_VIEW_AGREED_SCORES. 

40 """ 

41 project = fetch.project(session, project_id) 

42 authorise.check( 

43 user, perms.ISSUE_VIEW_AGREED_SCORES, project=project, deny_restricted=True 

44 ) 

45 scoreset_id = scoreset_id if scoreset_id != "__default__" else "" 

46 score_list = [] 

47 for s in fetch.scoring_data(project, scoreset_id=scoreset_id): 

48 score_dict = s._asdict() 

49 score_dict["number"] = from_b36(s.b36_number) 

50 score_list.append(score_dict) 

51 

52 return serial.ScoringData( 

53 scores=score_list, 

54 scoreset_id=scoreset_id, 

55 ) 

56 

57 

58@http 

59def get_question_scores( 

60 session: Session, user: User, question_id: int, scoreset_id: str = "" 

61): 

62 """ 

63 Scores for one question across all Issues in a specified score set (agreed or individual). 

64 Permission determined by score set context. 

65 """ 

66 question = fetch.question(session, question_id, with_project=True) 

67 section = question.section 

68 

69 question_filter = QuestionInstance.id == question.id 

70 

71 score_permission = fetch.get_permission_for_scoreset(user, scoreset_id) 

72 authorise.check( 

73 user, score_permission, project=question.project, section_id=section.id 

74 ) 

75 

76 return fetch.scores( 

77 session, section.project, section, scoreset_id, user, question_filter 

78 ) 

79 

80 

81@http 

82def post_question_score( 

83 session: Session, user: User, question_id: int, score_doc: serial.Score 

84) -> serial.Id: 

85 """ 

86 Create or update a single score (question + issue + score set). Validates range, 

87 enforces save permission, emits audit event (create/change). 

88 """ 

89 question = fetch.question(session, question_id, with_project=True) 

90 authorise.check( 

91 user, 

92 perms.PROJECT_VIEW_QUESTIONNAIRE, 

93 project=question.project, 

94 section_id=question.section_id, 

95 deny_restricted=False, 

96 ) 

97 project = question.project 

98 score, created = fetch.or_create_score(session, project, question, score_doc) 

99 initial_score_value = score.score 

100 score_value = score_doc.score_value 

101 Score.check_score_value(score_value, project) 

102 score.score = score_value 

103 

104 score_perm = fetch.get_permission_for_scoreset( 

105 user, score_doc.scoreset_id, to_save=True 

106 ) 

107 authorise.check( 

108 user, 

109 score_perm, 

110 project=question.project, 

111 section_id=question.section_id, 

112 deny_restricted=False, 

113 ) 

114 

115 # Need the Score record's ID for the audit event record, so flush 

116 session.flush() 

117 update.log_score_event(session, score, initial_score_value, created, project, user) 

118 return serial.Id(id=score.id) 

119 

120 

121@http 

122def get_project_section_issue_scores( 

123 session: Session, 

124 user: User, 

125 project_id: int, 

126 section_id: int, 

127 issue_id: int, 

128 scoreset_id: str = "", 

129): 

130 """ 

131 Scores for a single Issue within the scope of a Section (and underlying fetch rules), 

132 filtered by score set. Permission: derived from score set. 

133 """ 

134 project = fetch.project(session, project_id) 

135 section = fetch.section_of_project(project, section_id) 

136 issue = project.get_issue(issue_id) 

137 

138 score_permission = fetch.get_permission_for_scoreset(user, scoreset_id) 

139 authorise.check( 

140 user, score_permission, project=project, issue=issue, section_id=section.id 

141 ) 

142 

143 issue_filter = Issue.id == issue.id 

144 return fetch.scores(session, project, section, scoreset_id, user, issue_filter) 

145 

146 

147@http 

148def post_question_score_comment( 

149 session: Session, user: User, question_id: int, score_doc: serial.Score 

150) -> None: 

151 """ 

152 Add a comment to (and optionally update) a score. Creates score if needed. 

153 Emits SCORE_COMMENT_ADDED plus score change audit if value updated. 

154 """ 

155 question = fetch.question(session, question_id, with_project=True) 

156 authorise.check( 

157 user, 

158 perms.PROJECT_VIEW_QUESTIONNAIRE, 

159 project=question.project, 

160 section_id=question.section_id, 

161 ) 

162 project = question.project 

163 

164 score, created = fetch.or_create_score(session, project, question, score_doc) 

165 

166 to_save = False if score_doc.score_value is None else True 

167 

168 score_permission = fetch.get_permission_for_scoreset( 

169 user, score_doc.scoreset_id, to_save=to_save 

170 ) 

171 

172 authorise.check( 

173 user, 

174 score_permission, 

175 issue=score.issue, 

176 section_id=question.section_id, 

177 project=question.project, 

178 ) 

179 

180 if score_doc.score_value is not None: 

181 initial_score_value = score.score 

182 score_value = score_doc.score_value 

183 Score.check_score_value(score_value, project) 

184 score.score = score_value 

185 # Need the Score record's ID for the audit event record, so flush 

186 session.flush() 

187 update.log_score_event( 

188 session, score, initial_score_value, created, project, user 

189 ) 

190 

191 if score_doc.comment is not None: 

192 # make the comment and add to database 

193 comment = ScoreComment( 

194 score=score, 

195 comment_time=datetime.now(), 

196 user_id=user.id, 

197 comment_text=score_doc.comment, 

198 ) 

199 session.add(comment) 

200 session.flush() 

201 evt = AuditEvent.create( 

202 session, 

203 "SCORE_COMMENT_ADDED", 

204 object_id=comment.id, 

205 user=user, 

206 project=project, 

207 issue_id=score_doc.issue_id, 

208 question_id=question.id, 

209 ) 

210 evt.add_change("Comment", "", comment.comment_text) 

211 

212 session.add(evt) 

213 

214 

215def check_autoscore_permissions( 

216 project: Project, initiating_user: User, target_user: User 

217): 

218 if not project.multiscored: 

219 raise ValueError("Project must be using Multiple Score Sets") 

220 if target_user.organisation not in project.participants: 

221 m = f"User {target_user.id} not a participant in project {project.id}" 

222 raise AuthorizationFailure(m) 

223 target_user.check_permission(perms.ISSUE_SAVE_SCORES) 

224 initiating_user.check_permission(perms.ISSUE_SAVE_AGREED_SCORES) 

225 

226 

227@http 

228def get_project_calcautoscores( 

229 session: Session, user: User, project_id: int, target_user: User 

230): 

231 """ 

232 Preview autoscores (not persisted) for a target scorer’s set. Requires: Live project, 

233 multiscored, participant target, proper save/agreed permissions. 

234 """ 

235 project = fetch.project(session, project_id) 

236 

237 if not project.status_name == "Live": 

238 raise ValueError("Project must be live to generate autoscores") 

239 

240 check_autoscore_permissions(project, user, target_user) 

241 ascores = fetch.generate_autoscores(project, session, target_user) 

242 return list(ascores.values()) 

243 

244 

245@http 

246def post_project_calcautoscores( 

247 session: Session, user: User, project_id: int, target_user: User 

248): 

249 """ 

250 Persist autoscores into a target scorer’s set. Skips unchanged values. Emits create/change 

251 audit events (autoscore flagged). Same preconditions as preview. 

252 """ 

253 project = fetch.project(session, project_id) 

254 

255 if project.status_name != "Live": 

256 raise ValueError("Project must be live to generate autoscores") 

257 

258 check_autoscore_permissions(project, user, target_user) 

259 existing_scores = fetch.scores_dict_scoreset(project, target_user.id) 

260 

261 # Collect all scores for batch processing 

262 scores_to_update = [] 

263 scores_to_create = [] 

264 

265 for auto_key, autoscore_entry in fetch.generate_autoscores( 

266 project, session, target_user 

267 ).items(): 

268 if auto_key in existing_scores: 

269 score = existing_scores[auto_key] 

270 # More efficient score comparison - avoid int conversion if possible 

271 new_score = autoscore_entry.score 

272 if score.score is not None and score.score == new_score: 

273 continue 

274 initial_score_value = score.score 

275 score.score = Decimal(new_score) 

276 scores_to_update.append((score, initial_score_value)) 

277 else: 

278 score = Score( 

279 question_instance_id=autoscore_entry.question_id, 

280 scoreset_id=autoscore_entry.scoreset_id, 

281 issue_id=autoscore_entry.issue_id, 

282 score=autoscore_entry.score, 

283 ) 

284 scores_to_create.append(score) 

285 

286 # Batch add all new scores 

287 if scores_to_create: 

288 session.add_all(scores_to_create) 

289 

290 # Need ID values for newly created score objects 

291 session.flush() 

292 

293 # Batch create audit events and comments 

294 update.log_score_events_batch( 

295 session, scores_to_update, scores_to_create, project, target_user 

296 ) 

297 

298 

299@http 

300def get_section_scoresummaries( 

301 session: Session, user: User, section_id: int, scoreset_id: str 

302) -> serial.ScoreSummary: 

303 """ 

304 Coverage & subtotal summary for a Section: per-subsection progress (questions vs scored) 

305 and per-question score snapshot. Used for completion dashboards. Permission: score set view. 

306 """ 

307 section = fetch.section(session, section_id) 

308 project = section.project 

309 permission = fetch.get_permission_for_scoreset(user, scoreset_id) 

310 authorise.check(user, permission, project=project, section_id=section.id) 

311 sub = fetch.subsection_scoressummary(session, user, project, section, scoreset_id) 

312 return serial.ScoreSummary( 

313 subsections=[ 

314 serial.SectionScore.model_validate(row) 

315 for row in fetch.section_scoresummary(session, user, project, section, sub) 

316 ], 

317 questions=[ 

318 serial.QuestionScore.model_validate(q) 

319 for q in fetch.question_scoresummary( 

320 session, user, project, section, scoreset_id 

321 ) 

322 ], 

323 ) 

324 

325 

326@http 

327def get_section_scores( 

328 session: Session, 

329 user: User, 

330 section_id: int, 

331 scoreset_id: str | None = None, 

332 weightset_id: int | None = None, 

333 scoring_model: str | None = None, 

334) -> serial.CalculatedScores: 

335 """ 

336 Calculated scores (immediate child subsections & questions) applying optional weighting 

337 and scoring model (default "Unweighted"). Returns per Issue breakdown + totals. 

338 """ 

339 from postrfp.shared.fetch.view_scoring import get_child_scores 

340 from postrfp.model.questionnaire.score_views import QuestionScoreComponent 

341 

342 section = fetch.section(session, section_id) 

343 project = section.project 

344 authorise.check( 

345 user, 

346 perms.ISSUE_VIEW_AGREED_SCORES, 

347 project=project, 

348 section_id=section.id, 

349 deny_restricted=True, 

350 ) 

351 

352 # Set defaults 

353 scoreset_id = scoreset_id or "" 

354 scoring_model = scoring_model or "Unweighted" 

355 

356 # Get scores for immediate child questions and sections 

357 scores_data = get_child_scores( 

358 session=session, 

359 section=section, 

360 target_types=["question", "section"], 

361 scoreset_id=scoreset_id, 

362 weighting_set_id=weightset_id, 

363 scoring_model=scoring_model, 

364 ) 

365 

366 # Type the components properly - accept both view and direct query results 

367 question_components: list[QuestionScoreComponent] = [ 

368 comp 

369 for comp in scores_data.get("question", []) 

370 if isinstance(comp, QuestionScoreComponent) 

371 ] 

372 # Accept any object with the right interface (view or direct query) 

373 section_components = scores_data.get("section", []) 

374 

375 # Build the response structure 

376 score_dict = {} 

377 

378 # Group by issue_id 

379 all_issues = set() 

380 for q_comp in question_components: 

381 all_issues.add(q_comp.issue_id) 

382 for s_comp in section_components: 

383 all_issues.add(s_comp.issue_id) 

384 

385 for issue_id in all_issues: 

386 # Get question scores for this issue - keep as Decimal until final conversion 

387 issue_question_decimals = { 

388 q_comp.question_id: q_comp.get_calculated_score(scoring_model) 

389 for q_comp in question_components 

390 if q_comp.issue_id == issue_id and q_comp.raw_score is not None 

391 } 

392 

393 # Get section scores for this issue - keep as Decimal until final conversion 

394 issue_section_decimals = { 

395 s_comp.section_id: s_comp.get_calculated_score(scoring_model) 

396 for s_comp in section_components 

397 if s_comp.issue_id == issue_id and s_comp.raw_total is not None 

398 } 

399 

400 # Calculate total score using Decimal arithmetic 

401 total_decimal = ( 

402 sum(issue_question_decimals.values(), Decimal("0")) 

403 + sum(issue_section_decimals.values(), Decimal("0")) 

404 ).quantize(Decimal("0.0001")) 

405 

406 # Convert to float only at the very end for serialization 

407 issue_question_scores = { 

408 qid: float(score) for qid, score in issue_question_decimals.items() 

409 } 

410 issue_section_scores = { 

411 sid: float(score) for sid, score in issue_section_decimals.items() 

412 } 

413 

414 # Create the IssueScores object 

415 score_dict[issue_id] = serial.IssueScores( 

416 total_score=float(total_decimal), 

417 section_scores=issue_section_scores, 

418 question_scores=issue_question_scores, 

419 ) 

420 

421 return serial.CalculatedScores( 

422 scoring_model=ScoringModel(scoring_model), scores=score_dict 

423 ) 

424 

425 

426@http 

427def get_project_scoresets( 

428 session: Session, user: User, project_id: int, scoreset_id: str = "" 

429) -> list[serial.ScoreSet]: 

430 """ 

431 list accessible score sets. With ISSUE_VIEW_AGREED_SCORES returns all distinct user sets 

432 plus synthetic agreed set (__default__). Otherwise returns only caller’s own set. 

433 """ 

434 

435 project = fetch.project(session, project_id) 

436 

437 # VIEW_AGREED_SCORE permission allows a user to view other user's score sets 

438 if user.has_permission(perms.ISSUE_VIEW_AGREED_SCORES): 

439 sq = ( 

440 session.query(Score.scoreset_id, User.fullname) 

441 .join(Issue) 

442 .outerjoin(User, Score.scoreset_id == User.id) 

443 .filter(Issue.project == project, Score.scoreset_id != "") 

444 .distinct() 

445 ) 

446 

447 sc = [serial.ScoreSet.model_validate(row) for row in sq] 

448 sc.append( 

449 serial.ScoreSet(scoreset_id="__default__", fullname="Agreed Scoring Set") 

450 ) 

451 return sc 

452 

453 else: 

454 user.check_permission(perms.ISSUE_VIEW_SCORES) 

455 return [serial.ScoreSet(scoreset_id=user.id, fullname=user.fullname)] 

456 

457 

458@http 

459def get_question_issue_comments( 

460 session: Session, user: User, question_id: int, issue_id: int, scoreset_id: str = "" 

461): 

462 """ 

463 All comments (chronological) for a specific score (question + issue + score set). 

464 Empty list if score not yet created. Requires view permissions. 

465 """ 

466 question = fetch.question(session, question_id, with_project=True) 

467 authorise.check( 

468 user, 

469 perms.PROJECT_VIEW_QUESTIONNAIRE, 

470 project=question.project, 

471 section_id=question.section_id, 

472 ) 

473 issue = question.project.get_issue(issue_id) 

474 

475 score_permission = fetch.get_permission_for_scoreset(user, scoreset_id) 

476 

477 authorise.check( 

478 user, 

479 score_permission, 

480 issue=issue, 

481 section_id=question.section_id, 

482 project=question.project, 

483 ) 

484 

485 try: 

486 score = ( 

487 session.query(Score) 

488 .filter( 

489 Score.question_instance_id == question.id, 

490 Score.issue_id == issue.id, 

491 Score.scoreset_id == scoreset_id, 

492 ) 

493 .one() 

494 ) 

495 except NoResultFound: 

496 return [] 

497 

498 return [comment.as_dict() for comment in score.comments] 

499 

500 

501class ScoreData(NamedTuple): 

502 score: Score 

503 initial_score_value: Decimal | None 

504 created: bool 

505 

506 

507@http 

508def post_section_scoreset_scores( 

509 session: Session, 

510 user: User, 

511 section_id: int, 

512 scoreset_id: str, 

513 section_score_docs: serial.SectionScoreDocs, 

514) -> list[serial.Id]: 

515 """ 

516 Bulk upsert of multiple scores across questions & issues within a Section for one score set. 

517 Emits per-score audit events. Fails fast on permission or value validation errors. 

518 """ 

519 

520 section = fetch.section(session, section_id) 

521 project = section.project 

522 authorise.check( 

523 user, 

524 perms.PROJECT_VIEW_QUESTIONNAIRE, 

525 project=project, 

526 section_id=section_id, 

527 deny_restricted=False, 

528 ) 

529 

530 score_perm = fetch.get_permission_for_scoreset(user, scoreset_id, to_save=True) 

531 authorise.check( 

532 user, score_perm, project=project, section_id=section_id, deny_restricted=False 

533 ) 

534 

535 data: list[ScoreData] = [] 

536 

537 # Collect all score requests for batch processing 

538 score_requests = [] 

539 score_docs_mapping = [] 

540 

541 for doc in section_score_docs.root: 

542 question = fetch.question_of_section(session, section_id, doc.question_id) 

543 for score_doc in doc.scores: 

544 issue = project.get_issue(score_doc.issue_id) 

545 sd = fetch.ScoreTuple( 

546 issue_id=issue.id, 

547 score_value=score_doc.score_value, 

548 scoreset_id=scoreset_id, 

549 ) 

550 score_requests.append((question, sd)) 

551 score_docs_mapping.append(sd) 

552 

553 # Process all scores in batch 

554 batch_results = fetch.or_create_scores_batch(session, project, score_requests) 

555 

556 # Build data list with results 

557 for i, (score, created) in enumerate(batch_results): 

558 sd = score_docs_mapping[i] 

559 initial_score_value = score.score 

560 score_value = sd.score_value 

561 Score.check_score_value(score_value, project) 

562 score.score = score_value 

563 data.append( 

564 ScoreData( 

565 score=score, 

566 initial_score_value=initial_score_value, 

567 created=created, 

568 ) 

569 ) 

570 

571 session.flush() 

572 

573 # Separate scores by created vs updated for batch processing 

574 scores_to_create = [] 

575 scores_to_update = [] 

576 

577 for item in data: 

578 if item.created: 

579 scores_to_create.append(item.score) 

580 else: 

581 scores_to_update.append((item.score, item.initial_score_value)) 

582 

583 # Use batch processing only if there are multiple scores, otherwise use individual logging 

584 total_scores = len(scores_to_create) + len(scores_to_update) 

585 if total_scores > 3: # Threshold for batch processing 

586 # Batch create audit events for all scores 

587 update.log_score_events_batch( 

588 session, 

589 scores_to_update, 

590 scores_to_create, 

591 project, 

592 user, 

593 add_comments=False, 

594 ) 

595 else: 

596 # Use individual logging for small batches to avoid overhead 

597 for item in data: 

598 update.log_score_event( 

599 session, 

600 item.score, 

601 item.initial_score_value, 

602 item.created, 

603 project, 

604 user, 

605 ) 

606 

607 # Build return list of score IDs 

608 score_ids: list[serial.Id] = [] 

609 for item in data: 

610 score_ids.append(serial.Id(id=item.score.id)) 

611 

612 return score_ids