diff --git a/bases/rsptx/assignment_server_api/assignment_builder/src/components/routes/AssignmentBuilder/components/exercises/AssignmentExercisesList/AssignmentExercisesTable.tsx b/bases/rsptx/assignment_server_api/assignment_builder/src/components/routes/AssignmentBuilder/components/exercises/AssignmentExercisesList/AssignmentExercisesTable.tsx index 217f81456..be812c201 100644 --- a/bases/rsptx/assignment_server_api/assignment_builder/src/components/routes/AssignmentBuilder/components/exercises/AssignmentExercisesList/AssignmentExercisesTable.tsx +++ b/bases/rsptx/assignment_server_api/assignment_builder/src/components/routes/AssignmentBuilder/components/exercises/AssignmentExercisesList/AssignmentExercisesTable.tsx @@ -41,7 +41,7 @@ const AsyncModeHeader = ({ hasApiKey }: { hasApiKey: boolean }) => { const exercises = assignmentExercises.map((ex) => ({ ...ex, question_json: JSON.stringify(ex.question_json), - use_llm: value === "LLM" + async_mode: value === "Generic LLM" ? "llm" : value === "Personalized LLM" ? "analogies" : "standard" })); const { error } = await updateExercises(exercises); if (!error) { @@ -75,7 +75,8 @@ const AsyncModeHeader = ({ hasApiKey }: { hasApiKey: boolean }) => { onChange={(e) => setValue(e.value)} options={[ { label: "Standard", value: "Standard" }, - { label: "LLM", value: "LLM", disabled: !hasApiKey } + { label: "Generic LLM", value: "Generic LLM", disabled: !hasApiKey }, + { label: "Personalized LLM", value: "Personalized LLM", disabled: !hasApiKey } ]} optionLabel="label" optionDisabled="disabled" @@ -364,11 +365,12 @@ export const AssignmentExercisesTable = ({
updateAssignmentQuestions([{ ...data, question_json: JSON.stringify(data.question_json), use_llm: e.value === "LLM" }])} + value={!hasApiKey || !data.async_mode || data.async_mode === "standard" ? "Standard" : data.async_mode === "analogies" ? "Personalized LLM" : "Generic LLM"} + onChange={(e) => updateAssignmentQuestions([{ ...data, question_json: JSON.stringify(data.question_json), async_mode: e.value === "Generic LLM" ? "llm" : e.value === "Personalized LLM" ? "analogies" : "standard" }])} options={[ { label: "Standard", value: "Standard" }, - { label: "LLM", value: "LLM", disabled: !hasApiKey } + { label: "Generic LLM", value: "Generic LLM", disabled: !hasApiKey }, + { label: "Personalized LLM", value: "Personalized LLM", disabled: !hasApiKey } ]} optionLabel="label" optionDisabled="disabled" diff --git a/bases/rsptx/assignment_server_api/assignment_builder/src/types/exercises.ts b/bases/rsptx/assignment_server_api/assignment_builder/src/types/exercises.ts index 5d5129080..a4c2408bf 100644 --- a/bases/rsptx/assignment_server_api/assignment_builder/src/types/exercises.ts +++ b/bases/rsptx/assignment_server_api/assignment_builder/src/types/exercises.ts @@ -56,7 +56,7 @@ export type Exercise = { reading_assignment: boolean; sorting_priority: number; activities_required: number; - use_llm: boolean; + async_mode: string; qnumber: string; name: string; subchapter: string; diff --git a/bases/rsptx/assignment_server_api/routers/peer.py b/bases/rsptx/assignment_server_api/routers/peer.py index df6c29e2c..fb09d105b 100644 --- a/bases/rsptx/assignment_server_api/routers/peer.py +++ b/bases/rsptx/assignment_server_api/routers/peer.py @@ -35,8 +35,12 @@ count_distinct_student_answers, count_peer_messages, fetch_last_useinfo_peergroup, + fetch_course_students, + fetch_api_token, + fetch_question, ) -from rsptx.db.models import UseinfoValidation +from rsptx.db.models import UseinfoValidation, Useinfo +from rsptx.db.async_session import async_session from rsptx.auth.session import auth_manager import pandas as pd import random @@ -49,6 +53,24 @@ get_webpack_static_imports, ) +# Analogy themes for async LLM mode +# ================================== +PI_THEMES = [ + {"id": "family_tree", "label": "Family Tree"}, + {"id": "geographic", "label": "Geographic Hierarchy"}, + {"id": "html_dom", "label": "HTML/DOM Tree"}, + {"id": "university", "label": "Organizational Hierarchy"}, + {"id": "apartment", "label": "Apartment Building"}, + {"id": "grocery", "label": "Grocery Store"}, + {"id": "airport", "label": "Airport"}, + {"id": "discord", "label": "Discord Server"}, + {"id": "video_game", "label": "Video Game World"}, + {"id": "music_library", "label": "Music Library"}, + {"id": "audio_production", "label": "Audio Production"}, + {"id": "kitchen", "label": "Kitchen Storage"}, +] +THEME_BY_ID = {t["id"]: t for t in PI_THEMES} + # Routing # ======= router = APIRouter( @@ -229,6 +251,7 @@ async def get_peer_dashboard( "num_questions": num_questions, "is_instructor": True, "is_last": is_last, + "peer_async_visible": bool(assignment.peer_async_visible), "lti": is_lti, "latex_macros": latex_macros, "enable_ab": enable_ab, @@ -253,26 +276,39 @@ async def get_peer_extra( course=None, ): """ - Display extra information for peer instruction that instructors - might not want to share with students (e.g., correct answers, insights). + Instructor-only view showing live percent-correct for the current question. + Meant to be opened on a separate device so students can't see it. """ rslogger.info(f"Peer extra info for assignment {assignment_id}") - - # TODO: Implement extra info display - # This will show: - # - Correct answers - # - Instructor notes - # - Common misconceptions + templates = Jinja2Templates(directory=template_folder) assignment = await fetch_one_assignment(assignment_id) + questions_result = await fetch_assignment_questions(assignment_id) + questions = [q for q, _aq in questions_result] - return JSONResponse( - content={ - "message": "Extra info functionality to be implemented", - "assignment_id": assignment_id, - "assignment_name": assignment.name, - } + current_idx = ( + assignment.current_index if assignment.current_index is not None else 0 ) + if current_idx >= len(questions): + current_idx = len(questions) - 1 if questions else 0 + current_question = questions[current_idx] if questions else None + + if not current_question: + return JSONResponse( + status_code=404, + content={"detail": "No questions found for this assignment"}, + ) + + context = { + "request": request, + "course": course, + "user": user, + "assignment_id": assignment_id, + "current_question": current_question, + "is_instructor": True, + } + + return templates.TemplateResponse("assignment/instructor/peer_extra.html", context) @router.get("/instructor/review", response_class=HTMLResponse) @@ -287,23 +323,13 @@ async def get_peer_review( """ Review peer instruction results and student interactions. """ - rslogger.info(f"Peer review for assignment {assignment_id}") - - # TODO: Implement review functionality - # This will show: - # - Overall statistics - # - Student participation - # - Answer distribution - # - Chat transcripts - - assignment = await fetch_one_assignment(assignment_id) + rslogger.info( + f"Peer review for assignment {assignment_id} - not yet implemented, redirecting" + ) + from fastapi.responses import RedirectResponse - return JSONResponse( - content={ - "message": "Review functionality to be implemented", - "assignment_id": assignment_id, - "assignment_name": assignment.name, - } + return RedirectResponse( + url=f"/assignment/instructor/reviewPeerAssignment?assignment_id={assignment_id}" ) @@ -427,30 +453,70 @@ async def get_peer_question( return templates.TemplateResponse("assignment/student/peer_question.html", context) +async def _has_vote1_async(div_id: str, sid: str) -> bool: + from sqlalchemy import select + + async with async_session() as session: + query = ( + select(Useinfo) + .where( + (Useinfo.event == "mChoice") + & (Useinfo.sid == sid) + & (Useinfo.div_id == div_id) + & Useinfo.act.like("%vote1") + ) + .order_by(Useinfo.id.desc()) + .limit(1) + ) + result = await session.execute(query) + return result.scalar() is not None + + +async def _has_reflection_async(div_id: str, sid: str) -> bool: + from sqlalchemy import select + + async with async_session() as session: + query = ( + select(Useinfo) + .where( + (Useinfo.event == "reflection") + & (Useinfo.sid == sid) + & (Useinfo.div_id == div_id) + ) + .order_by(Useinfo.id.desc()) + .limit(1) + ) + result = await session.execute(query) + return result.scalar() is not None + + +async def _llm_enabled_async(course_id: int) -> bool: + token = await fetch_api_token(course_id, "openai") + return token is not None and bool(token.token) + + @router.get("/student/async", response_class=HTMLResponse) @with_course() async def get_peer_async( request: Request, assignment_id: int, + question_num: int = 1, user=Depends(auth_manager), course=None, ): - """ - Display the asynchronous (after-class) peer instruction interface. - Students can review questions and see recorded chat transcripts. - """ - rslogger.info(f"Peer async for assignment {assignment_id}, user {user.username}") - # TODO: Implement async peer instruction interface - # This will include: - # - Question review - # - Two voting rounds - # - Access to recorded chat transcripts - # - Explanations from peers + import os + import random + + rslogger.info( + f"Peer async for assignment {assignment_id}, question {question_num}, user {user.username}" + ) + templates = Jinja2Templates(directory=template_folder) assignment = await fetch_one_assignment(assignment_id) + if not assignment: + return JSONResponse(status_code=404, content={"detail": "Assignment not found"}) - # Check if async is enabled for this assignment if not assignment.peer_async_visible: return JSONResponse( status_code=403, @@ -459,16 +525,96 @@ async def get_peer_async( }, ) - # TODO: Use assignment_questions when implementing async interface - # assignment_questions = await fetch_assignment_questions(assignment_id) + qa_pairs = list(await fetch_assignment_questions(assignment_id)) + questions = [q for q, _aq in qa_pairs] + total_questions = len(questions) + + idx = question_num - 1 + if not questions or idx >= total_questions: + all_done = True + current_question = None + aq = None + else: + all_done = False + current_question = questions[idx] + aq = qa_pairs[idx][1] + + has_vote1 = False + has_reflection = False + if current_question: + has_vote1 = await _has_vote1_async(current_question.name, user.username) + has_reflection = await _has_reflection_async( + current_question.name, user.username + ) - return JSONResponse( - content={ - "message": "Async peer instruction functionality to be implemented", - "assignment_id": assignment_id, - "assignment_name": assignment.name, - } + course_attrs = await fetch_all_course_attributes(course.id) + latex_macros = course_attrs.get("latex_macros", "") + + async_llm_modes_enabled = ( + course_attrs.get("enable_async_llm_modes", "false") == "true" + ) + has_api_token = await _llm_enabled_async(course.id) + question_async_mode = ( + (getattr(aq, "async_mode", None) or "standard") if aq else "standard" ) + if async_llm_modes_enabled: + question_use_llm = question_async_mode in ("llm", "analogies") + llm_enabled = question_use_llm and has_api_token + else: + llm_enabled = has_api_token + + try: + await create_useinfo_entry( + UseinfoValidation( + course_id=course.course_name, + sid=user.username, + div_id=current_question.name if current_question else "", + event="pi_mode", + act=json.dumps({"mode": "llm" if llm_enabled else "legacy"}), + timestamp=datetime.datetime.utcnow(), + ) + ) + except Exception: + rslogger.exception("Failed to log pi_mode for peer_async") + + site_packages_path = sys.path[0] + peer_js_path = os.path.join( + site_packages_path, "rsptx/templates/staticAssets", "js", "peer.js" + ) + try: + peer_mtime = str(int(os.path.getmtime(peer_js_path))) + except (FileNotFoundError, AttributeError): + peer_mtime = str(random.randrange(10000)) + + wp_assets = get_webpack_static_imports(course) + + context = { + "request": request, + "course": course, + "user": user, + "course_id": course.course_name, + "assignment_id": assignment_id, + "assignment_name": assignment.name, + "current_question": current_question, + "nextQnum": question_num + 1, + "total_questions": total_questions, + "is_last_question": question_num >= total_questions, + "all_done": all_done, + "has_vote1": has_vote1, + "has_reflection": has_reflection, + "llm_enabled": llm_enabled, + "async_mode": question_async_mode, + "pi_themes_json": json.dumps(PI_THEMES), + "llm_reply": None, + "latex_macros": latex_macros, + "peer_mtime": peer_mtime, + "is_instructor": False, + "student_page": True, + "settings": settings, + "wp_imports": wp_assets, + } + + return templates.TemplateResponse("assignment/student/peer_async.html", context) # API Endpoints @@ -1044,39 +1190,556 @@ async def log_peer_rating( return JSONResponse(content={"message": retmess}) -@router.post("/api/get_async_explainer") +@router.post("/clear_pairs") +@instructor_role_required() +@with_course() +async def clear_pairs( + request: Request, + user=Depends(auth_manager), + course=None, +): + """ + Clear the partner assignments for the current course from Redis. + Called when the instructor switches to face-chat mode. + """ + import os + import redis as redis_lib + + r = redis_lib.from_url(os.environ.get("REDIS_URI", "redis://redis:6379/0")) + r.delete(f"partnerdb_{course.course_name}") + rslogger.info(f"Cleared pairs for course {course.course_name}") + return JSONResponse(content="success") + + +@router.get("/course_students") +@instructor_role_required() +@with_course() +async def get_course_students( + request: Request, + user=Depends(auth_manager), + course=None, +): + """ + Return a dict of {username: full_name} for all students in the course. + Used by peer.js to populate the group selection panel. + """ + students = await fetch_course_students(course.id) + result = {s.username: f"{s.first_name} {s.last_name}".strip() for s in students} + return JSONResponse(content=result) + + +@router.post("/send_lti_scores") +@instructor_role_required() @with_course() +async def send_lti_scores( + request: Request, + assignment_id: int = Body(..., embed=True), + user=Depends(auth_manager), + course=None, +): + """ + Send LTI scores for all students in the assignment. + Mirrors web2py peer.send_lti_scores. + """ + from rsptx.lti1p3.core import attempt_lti1p3_score_updates + + rslogger.info(f"Sending LTI scores for assignment {assignment_id}") + try: + await attempt_lti1p3_score_updates(assignment_id, force=True) + except Exception as e: + rslogger.error(f"Error sending LTI scores: {e}") + return JSONResponse(status_code=500, content={"ok": False, "error": str(e)}) + return JSONResponse(content="success") + + +@router.post("/api/toggle_async") +@instructor_role_required() +@with_course() +async def toggle_async( + request: Request, + assignment_id: int = Body(..., embed=True), + user=Depends(auth_manager), + course=None, +): + """ + Toggle peer_async_visible on an assignment. Instructors use this to open/close + the after-class async interface for students. + """ + assignment = await fetch_one_assignment(assignment_id) + if not assignment: + return JSONResponse( + status_code=404, content={"ok": False, "error": "assignment not found"} + ) + if assignment.course != course.id: + return JSONResponse( + status_code=403, + content={"ok": False, "error": "assignment does not belong to your course"}, + ) + + assignment.peer_async_visible = not bool(assignment.peer_async_visible) + await update_assignment(assignment, pi_update=True) + + rslogger.info( + f"Toggled peer_async_visible for assignment {assignment_id} to {assignment.peer_async_visible}" + ) + return JSONResponse(content={"peer_async_visible": assignment.peer_async_visible}) + + +@router.post("/api/get_async_explainer") async def get_async_explainer( request: Request, div_id: str = Body(...), course: str = Body(...), user=Depends(auth_manager), - course_obj=None, ): - """ - Get a peer's explanation for async peer instruction mode. - Returns a recorded explanation and the peer's answer. - """ + + from sqlalchemy import select + rslogger.info(f"Getting async explainer for {div_id} in course {course}") try: - # TODO: Implement the logic to fetch a recorded peer explanation - # This should: - # 1. Query the database for peer messages from this question - # 2. Find a good explanation to show (not from this student) - # 3. Return the explanation and the peer's answer + async with async_session() as session: + msg_query = ( + select(Useinfo) + .where( + Useinfo.event.in_(["sendmessage", "reflection"]), + Useinfo.div_id == div_id, + Useinfo.course_id == course, + ) + .order_by(Useinfo.id) + ) + msg_result = await session.execute(msg_query) + messages = msg_result.scalars().all() + + all_msgs = [] + last_per_sid = {} + for row in messages: + if row.event == "reflection": + msg = row.act + else: + try: + msg = row.act.split(":", 2)[2] + except Exception: + msg = row.act + if last_per_sid.get(row.sid) != msg: + all_msgs.append((row.sid, msg)) + last_per_sid[row.sid] = msg + + parts = [] + for sid, msg in all_msgs: + parts.append(f"
  • {sid} said: {msg}
  • ") + + if not parts: + mess = "Sorry there are no explanations yet." + else: + mess = "" - # Placeholder response return JSONResponse( - content={ - "user": "peer_student", - "answer": "A", - "mess": "This is a placeholder explanation. The actual implementation will fetch recorded peer explanations from the database.", - "responses": {"peer1": "A", "peer2": "B"}, - } + content={"mess": mess, "user": "", "answer": "", "responses": {}} ) + except Exception as e: rslogger.error(f"Error getting async explainer: {e}") return JSONResponse( status_code=500, content={"detail": f"Failed to get explainer: {str(e)}"} ) + + +async def _get_mcq_context_async(div_id: str): + try: + q = await fetch_question(div_id) + if not q: + rslogger.error(f"_get_mcq_context_async: no question row for {div_id}") + return "", "", [] + + question = (q.question or "").strip() + code = (q.code or "").strip() if hasattr(q, "code") else "" + choices = [] + try: + if q.answers: + opts = json.loads(q.answers) + for i, opt in enumerate(opts): + choices.append(f"{chr(65+i)}. {opt.strip()}") + except Exception as e: + rslogger.warning(f"Could not parse choices for {div_id}: {e}") + return question, code, choices + except Exception: + rslogger.exception(f"_get_mcq_context_async failed for {div_id}") + return "", "", [] + + +async def _call_openai_async(messages: list, course_id: int) -> str: + import os + + import aiohttp + + token_row = await fetch_api_token(course_id, "openai") + if not token_row or not token_row.token: + raise Exception("missing api key") + + api_key = token_row.token + model = os.environ.get("PI_OPENAI_MODEL", "gpt-4o-mini").strip() or "gpt-4o-mini" + url = "https://api.openai.com/v1/chat/completions" + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + payload = { + "model": model, + "messages": messages, + "temperature": 0.4, + "max_tokens": 300, + } + async with aiohttp.ClientSession() as session: + async with session.post( + url, headers=headers, json=payload, timeout=aiohttp.ClientTimeout(total=30) + ) as resp: + resp.raise_for_status() + data = await resp.json() + rslogger.warning(f"PEER LLM CALL | provider=openai-course-token | model={model}") + return data["choices"][0]["message"]["content"].strip() + + +async def _generate_analogy_mapping_async( + question: str, + code: str, + choices: list, + theme_label: str, + selected: str = "", + reflection: str = "", + course_id: int = 0, +): + """ + Produces a structural mapping between the question's CS concept and the + student's chosen theme, plus the first message to send to the student. + Returns (mapping_string, first_message) or ("", "") on failure. + """ + context_parts = [] + if question: + context_parts.append(f"Question: {question}") + if code: + context_parts.append(f"Code:\n{code}") + if choices: + context_parts.append("Choices:\n" + "\n".join(choices)) + if selected: + context_parts.append(f"Student's answer: {selected}") + if reflection: + context_parts.append(f"Student's explanation: {reflection}") + context = "\n\n".join(context_parts) + + prompt = ( + f"Read this CS question carefully:\n\n" + f"{context}\n\n" + f"The student chose '{theme_label}' as their analogy theme.\n\n" + f"Step 1: Identify the underlying CS concept this question is testing — one sentence, specific about what structurally happens (not just the topic name).\n\n" + f"Step 2: Break the question's structure down into its meaningful spatial or logical elements — the starting point, the required location to perform the action, the target item, and the action itself. Do NOT include the command syntax itself as an element. Do NOT evaluate the outcome or describe dependencies (do not write elements like 'the outcome depends on...' or 'the action requires...'). Focus only on what factually exists: where the student currently is, where the target item is, and what location is needed to perform the action. Describe elements factually and neutrally. The structure should describe what exists, not what the right reasoning is. Use bullet points, one element per line.\n\n" + f"Step 3: Find a real, concrete, familiar situation in '{theme_label}' that structurally mirrors the question. Map each element from step 2 to a specific, real, recognizable thing from that situation — not invented names or generic labels. CRITICAL: never use CS or file system terminology on the right side of the mapping — do not name a theme item after a folder name, variable name, or command (e.g. do not write 'project aisle' or 'backup section' — those are just CS names with a theme word appended). Instead use things that actually exist in '{theme_label}' (e.g. 'dairy section', 'frozen foods aisle', 'checkout counter'). The theme items should feel like something a person familiar with '{theme_label}' would immediately picture with no knowledge of CS. CRITICAL: if the question is about being in the right location to perform an action, the target item DOES exist at the required location — the only issue is whether the student is in the right place to reach it. Make this clear in the mapping: the item is there, the student just needs to get there. Do not create any ambiguity about whether the item exists. The required location must be somewhere the person would normally go — do not map it to a staff-only or restricted area (e.g. do not use 'back stockroom' in a grocery store — customers do not go there; use a specific aisle or section instead). The scene must start in a natural, already-stable state — do not invent events to explain how things came to be. IMPORTANT: if the question involves navigating toward a root or parent (e.g. `..` in a file path), that means moving toward the outermost container — in a building this is the ground floor or lobby, NOT a higher floor. Deep nested = higher up, closer to root = lower/ground. Make sure the direction in your theme matches this intuition. CRITICAL: the mapped action must be a concrete, physical, presence-required activity — something that only makes sense when you are physically at the required location. Do NOT map 'edit/run/modify a file' to any kind of writing, editing, or rewriting action on a document, card, or paper — documents are portable and can be edited anywhere, which breaks the analogy. Instead map the CS action to the act of REACHING, GRABBING, OR USING a fixed item that lives at that location: reach for a jar on a shelf, use the blender on the counter, pick up a bag at a carousel, order at a specific counter. The mapped item should be something fixed at the required location that you can only interact with by being there.\n\n" + f"Step 4: Write the first message the LLM peer will send to the student. This message should:\n" + f"- Be in casual, lowercase, peer voice — like a student talking to another student\n" + f"- Use minimal commas\n" + f"- Introduce the scene to the student from scratch — they have never heard of this scenario. Do not say 'i'm picturing X' or reference the scenario as if they already know it. Start with 'imagine you're in...' or 'so picture this...' to actually place them in the situation\n" + f"- Set the scene in 1-2 sentences using theme vocabulary only — no file paths, no CS terms, no variable names\n" + f"- Name the specific locations from your mapping (e.g. 'the dairy section' not 'an aisle') so the conversation is grounded from the start\n" + f"- After placing them in the scene, ask a specific, concrete location question using the exact mapped action — e.g. 'can you board your flight from the lobby or do you need to get to gate 12 first?' not vague questions like 'can you use it from here' or 'where does the item end up'. The action in the question must be the specific mapped action, not a generic 'use it' or 'access it'.\n" + f"- Do NOT assume the student is wrong — the question works whether they are right or wrong\n" + f"- Never imply the student is wrong or ask a rhetorical question with an obvious answer\n" + f"- Never say 'our scenario' or announce it as an analogy\n" + f"- Do not narrate the full trace — set the scene then ask them to trace it\n\n" + f"Output ONLY in this exact format:\n" + f"CONCEPT: [one sentence]\n" + f"STRUCTURE:\n" + f"- [element 1]\n" + f"- [element 2]\n" + f"- ...\n" + f"MAPPING:\n" + f"- [element 1] -> [theme equivalent]\n" + f"- [element 2] -> [theme equivalent]\n" + f"- ...\n" + f"FIRST_MESSAGE: [the first message to send]\n" + ) + + try: + import os + import aiohttp + + token_row = await fetch_api_token(course_id, "openai") + if not token_row or not token_row.token: + return "", "" + api_key = token_row.token + model = ( + os.environ.get("PI_OPENAI_MODEL", "gpt-4o-mini").strip() or "gpt-4o-mini" + ) + url = "https://api.openai.com/v1/chat/completions" + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + payload = { + "model": model, + "messages": [{"role": "user", "content": prompt}], + "temperature": 0.5, + "max_tokens": 700, + } + async with aiohttp.ClientSession() as session: + async with session.post( + url, + headers=headers, + json=payload, + timeout=aiohttp.ClientTimeout(total=45), + ) as resp: + resp.raise_for_status() + result = await resp.json() + raw = result["choices"][0]["message"]["content"].strip() + rslogger.warning(f"ANALOGY MAPPING RAW RESPONSE:\n{raw}") + first_message = "" + mapping = raw + if "FIRST_MESSAGE:" in raw: + parts = raw.split("FIRST_MESSAGE:", 1) + mapping = parts[0].strip() + first_message = parts[1].strip() + rslogger.warning( + f"ANALOGY MAPPING parsed | mapping_len={len(mapping)} first_message_len={len(first_message)} first_message_preview={first_message[:120]!r}" + ) + return mapping, first_message + except Exception: + rslogger.exception("Failed to generate analogy mapping") + return "", "" + + +@router.post("/api/get_async_llm_reflection") +async def get_async_llm_reflection( + request: Request, + user=Depends(auth_manager), +): + try: + data = await request.json() + except Exception: + return JSONResponse(content={"ok": False, "error": "invalid json"}) + + div_id = (data.get("div_id") or "").strip() + selected = (data.get("selected_answer") or "").strip() + messages = data.get("messages") + theme_id = (data.get("theme_id") or "").strip() + analogy_mapping = (data.get("analogy_mapping") or "").strip() + + if not div_id: + return JSONResponse(content={"ok": False, "error": "missing div_id"}) + + try: + user_msgs = [m for m in (messages or []) if m.get("role") == "user"] + for idx, m in enumerate(user_msgs): + content = (m.get("content") or "").strip() + if not content: + continue + if idx == 0: + await create_useinfo_entry( + UseinfoValidation( + course_id=user.course_name, + sid=user.username, + div_id=div_id, + event="reflection", + act=content, + timestamp=datetime.datetime.utcnow(), + ) + ) + else: + await create_useinfo_entry( + UseinfoValidation( + course_id=user.course_name, + sid=user.username, + div_id=div_id, + event="sendmessage", + act=f"to:llm:{content}", + timestamp=datetime.datetime.utcnow(), + ) + ) + except Exception: + rslogger.exception("Failed to log LLM user message") + + question, code, choices = await _get_mcq_context_async(div_id) + + reflection_text = (data.get("reflection") or "").strip() + if not reflection_text and messages: + first_user = next((m for m in messages if m.get("role") == "user"), None) + if first_user: + reflection_text = (first_user.get("content") or "").strip() + + generated_mapping = "" + generated_first_message = "" + if theme_id and not analogy_mapping: + theme_obj = THEME_BY_ID.get(theme_id) + if theme_obj: + try: + from rsptx.db.crud import fetch_course + + course_row = await fetch_course(user.course_name) + generated_mapping, generated_first_message = ( + await _generate_analogy_mapping_async( + question, + code, + choices, + theme_obj["label"], + selected=selected, + reflection=reflection_text, + course_id=course_row.id, + ) + ) + if generated_mapping: + analogy_mapping = generated_mapping + except Exception: + rslogger.exception("Failed to generate analogy mapping") + + analogy_preamble = "" + if theme_id: + theme = THEME_BY_ID.get(theme_id) + rslogger.warning( + f"PEER ANALOGY | theme_id={theme_id!r} theme={theme} has_mapping={bool(analogy_mapping)}" + ) + if theme: + theme_label = theme["label"] + if analogy_mapping: + analogy_preamble = ( + f"the student chose '{theme_label}' as their analogy theme. here is a structural mapping between the question and the theme:\n" + f"{analogy_mapping}\n" + f"\n" + f"use this mapping to frame your conversation naturally. the student has never seen this mapping — you are introducing this scenario to them for the first time.\n" + f"IMPORTANT: in conversation only ever use the RIGHT side of the mapping (the theme terms). never use the LEFT side (the CS/file system terms) when talking in the analogy — so if the mapping says 'project/ folder -> dairy section' you say 'dairy section' never 'project folder' or 'project aisle'. this means never say words like 'staging', 'commit', 'repository', 'directory', 'git', or any CS term while you are in the analogy — stay fully in theme vocabulary until the explicit bridge-back moment.\n" + f"in your first message: paint the scenario in natural language — describe the situation as if you are telling a story, not reading a list. do not recite the mapping labels (e.g. do not say 'the move action' or 'forest of wisdom' as if they are technical terms — say 'imagine you're walking through...' or 'so you're in the...'). place the student in the scene concretely, then connect it to what they said, then ask a question. do not say 'our scenario' — introduce it fresh.\n" + f"in follow-ups: keep using the theme vocabulary. if the student engages with it, build on it. when they seem to understand the structure through the theme, bridge back to the actual question.\n" + f"do not formally announce the analogy. do not say 'in the {theme_label} analogy' or 'using {theme_label} as a metaphor'. just talk in those terms naturally.\n" + f"if the student makes a claim or assumption, use the theme to have them test it — like 'hmm would that floor even exist in that building tho?'\n" + ) + else: + # Mapping generation failed — still enforce the correct theme + analogy_preamble = ( + f"the student chose '{theme_label}' as their analogy theme.\n" + f"you MUST use only '{theme_label}' vocabulary to frame this conversation — do not invent a different scenario or theme.\n" + f"introduce a concrete scene from '{theme_label}' that mirrors the question structure, then ask the student to trace through it.\n" + f"never use CS terms or file-system terminology while in the analogy — stay fully in '{theme_label}' language.\n\n" + ) + + base_rules = ( + "only speak in lower case.\n" + "you are a student talking to another student during peer instruction.\n" + "you are both looking at the same multiple choice question with code and answers.\n" + "you remember the question and choices.\n" + "most messages should be short (1 to 3 sentences often very short).\n" + "use casual informal language and common typos.\n" + "never use commas.\n" + "never use gendered language.\n" + "do not use new lines.\n" + "never use em dashes (—) or formal punctuation. write the way you actually talk.\n" + "do not sound like a teacher.\n" + "do not explain step by step.\n" + "never say something is right or wrong.\n" + "STRICT RULE: every message must begin with one of these allowed openers: a question word ('what', 'where', 'how', 'do', 'can', 'if', 'wait', 'hmm'), or the word 'so' followed immediately by a scenario observation (not 'so exactly' or 'so right'). never begin with 'yeah', 'right', 'exactly', 'correct', 'yes', 'yep', 'true', 'good', 'nice', 'great', 'perfect', 'cool', 'totally', 'sure', 'got it', or any variation. these imply the student is correct. check your first word before every message.\n" + "never confirm or explain the code outcome after the student says something — only ask them to keep tracing or elaborate.\n" + "if the student introduces a new assumption or claim, do not build on it as if it is correct — instead use the analogy to make them test that assumption themselves. for example if they claim 'it creates the directory' ask them through the analogy: 'does trying to walk to a floor that doesn't exist create it, or does something else happen?' never validate the assumption, never deny it — just ask them to examine it through the scenario.\n" + "if an analogy scenario was established, always bring follow-up questions back through that scenario — do not abandon it for direct code talk. never use CS terms (like 'staging', 'commit', 'directory', 'repository', 'file path') in a message that is otherwise in analogy vocabulary — stay fully in the theme language until you are explicitly bridging back.\n" + "each follow-up must move the conversation forward — do not ask the same question twice in different words. if the student answered your last question, accept that and go one level deeper or bridge back to the actual problem.\n" + "when referencing a structural detail of the scenario in a follow-up, briefly restate that detail in the same message — do not assume the student remembers the exact structure from the first message. for example: 'remember in that tree the grandparent has two children — user and shared. you are currently under user...' then ask your question.\n" + "never say things like 'let's focus on the code' or 'going back to the code' — always route through the scenario instead.\n" + "if the student themselves references the analogy, use that as an opening to deepen it — never redirect away from it.\n" + "once the student has traced through the analogy and seems to understand the structure, explicitly bridge back to the question — ask them to apply that same reasoning to the actual problem values or code.\n" + "do not let the analogy float indefinitely without connecting it back to the question. the goal is for the student to say 'oh so in the question that means...' — guide them there.\n" + "never use phrases like 'not quite' or 'not exactly' or 'almost' or 'close' or 'not yet' or any phrase that implies the student is incorrect.\n" + "never react to whether the student's answer is correct or incorrect — only ask them to explain their reasoning.\n" + "never use the analogy to imply the student's answer is wrong.\n" + "never end a message with a rhetorical question whose obvious answer signals the student is wrong — like 'does that room just appear?' or 'is there really a backup area there?' — these tell the student they are wrong.\n" + "instead: have the student trace through the scenario step by step. ask where they end up after each step, or ask them to walk you through what they think each part of the command does in the scenario. keep questions open — 'where does that put you?' not 'does that even exist?'\n" + "the student should discover whether their answer is right or wrong by tracing through the analogy themselves.\n" + "never connect the analogy conclusion back to a specific answer choice — do not say things like 'answer B says you need to be in X — does that match up with needing to be in the new server?' because this confirms the answer without saying it. if the student has traced through the analogy and reached a conclusion ask them what that tells them about the problem and let THEM connect it back to their choice — never make that connection for them.\n" + "do not pretend to have picked an answer yourself.\n" + "never mention a choice letter as the correct answer.\n" + "if the question includes code never clearly describe the final result or fully state what it prints.\n" + "if the question does not include code do not make up or reference code that is not there.\n" + "only refer to what is actually in the question.\n" + "be aware of common misconceptions but do not introduce them yourself.\n" + "if there is code refer to it loosely like 'that line' or 'the loop' or 'the print'.\n" + "often hedge with uncertainty.\n" + "ask the other student to explain why they picked their answer and how they reasoned through it.\n" + "ask follow up questions about their reasoning like 'what makes you think that' or 'how did you trace through it'.\n" + "do not push them toward a different answer or imply their answer is wrong.\n" + "never reveal or hint at which answer is correct or incorrect.\n" + "never say things like 'the feedback says' or 'according to the answer' or reference any grading or correctness information.\n" + "do not make up information that is not in the question.\n" + "if you are unsure about something say so honestly instead of guessing.\n" + "if the other student mentions the same answer more than once or sounds confident in their answer you must tell them to go ahead and vote again — this overrides everything else.\n" + "do not ask another question or continue the analogy after they have confirmed their answer — just tell them to vote.\n" + "do not continue reasoning after telling them to vote again.\n" + "focus on getting them to think through the problem not on changing their mind.\n\n" + ) + + context_suffix = "" + if question: + context_suffix += f"question:\n{question}\n\n" + if code: + context_suffix += f"code:\n{code}\n\n" + if choices: + context_suffix += "answer choices:\n" + "\n".join(choices) + "\n\n" + if selected: + context_suffix += f"the other student chose: {selected}\n\n" + + sys_content = analogy_preamble + base_rules + context_suffix + system_msg = {"role": "system", "content": sys_content} + + if not messages: + reflection = (data.get("reflection") or "").strip() + if not reflection: + return JSONResponse(content={"ok": False, "error": "missing reflection"}) + messages = [ + system_msg, + { + "role": "user", + "content": f"i chose answer {selected}. my explanation was:\n\n{reflection}", + }, + ] + else: + if not isinstance(messages, list): + return JSONResponse( + content={"ok": False, "error": "messages must be a list"} + ) + if len(messages) == 0 or messages[0].get("role") != "system": + messages = [system_msg] + messages + else: + messages[0] = system_msg + + try: + from rsptx.db.crud import fetch_course + + course = await fetch_course(user.course_name) + user_turn_count = sum(1 for m in messages if m.get("role") == "user") + if generated_first_message and user_turn_count <= 1: + reply = generated_first_message + else: + reply = await _call_openai_async(messages, course.id) + except Exception as e: + rslogger.exception("LLM reflection failed") + return JSONResponse(content={"ok": False, "error": str(e)}) + + try: + await create_useinfo_entry( + UseinfoValidation( + course_id=user.course_name, + sid=user.username, + div_id=div_id, + event="llm_peer_sendmessage", + act=f"to: student:{reply}", + timestamp=datetime.datetime.utcnow(), + ) + ) + except Exception: + rslogger.exception("Failed to log LLM reply") + + if not reply: + return JSONResponse( + content={ + "ok": False, + "error": "llm returned empty reply (missing api key?)", + } + ) + + result = {"ok": True, "reply": reply} + if generated_mapping: + result["analogy_mapping"] = generated_mapping + return JSONResponse(content=result) diff --git a/bases/rsptx/interactives/runestone/common/project_template/_templates/plugin_layouts/sphinx_bootstrap/layout.html b/bases/rsptx/interactives/runestone/common/project_template/_templates/plugin_layouts/sphinx_bootstrap/layout.html index b66e58559..153b952e5 100644 --- a/bases/rsptx/interactives/runestone/common/project_template/_templates/plugin_layouts/sphinx_bootstrap/layout.html +++ b/bases/rsptx/interactives/runestone/common/project_template/_templates/plugin_layouts/sphinx_bootstrap/layout.html @@ -197,8 +197,8 @@
  • Course Home
  • Assignments
  • Practice
  • - -
  • Peer Instruction (Student)
  • + +
  • Peer Instruction (Student)
  • {% if minimal_outside_links != 'True' %}
  • Change Course
  • diff --git a/bases/rsptx/web2py_server/applications/runestone/static/css/peer.css b/bases/rsptx/web2py_server/applications/runestone/static/css/peer.css index e67543374..951242a6a 100644 --- a/bases/rsptx/web2py_server/applications/runestone/static/css/peer.css +++ b/bases/rsptx/web2py_server/applications/runestone/static/css/peer.css @@ -166,6 +166,25 @@ display: none; } +.oneq label.mchoice-option, +#pi-student-interface label.mchoice-option { + display: block !important; + margin-bottom: 4px; +} + +.oneq button[name="compare"], +#pi-student-interface button[name="compare"] { + display: none !important; +} + +.oneq button[aria-label="Close"], +.oneq .close, +.oneq .knowl-close, +#pi-student-interface button[aria-label="Close"], +#pi-student-interface .close { + display: none !important; +} + /* ------------------- */ /* Student's interface */ /* ------------------- */ diff --git a/bases/rsptx/web2py_server/applications/runestone/views/layout.html b/bases/rsptx/web2py_server/applications/runestone/views/layout.html index d1ae92b64..f0954a5fa 100644 --- a/bases/rsptx/web2py_server/applications/runestone/views/layout.html +++ b/bases/rsptx/web2py_server/applications/runestone/views/layout.html @@ -109,8 +109,8 @@
  • Assignments
  • Practice
  • - -
  • Peer Instruction (Student)
  • + +
  • Peer Instruction (Student)
  • Request Invoice
  • {{ if settings.academy_mode: }} diff --git a/components/rsptx/db/models.py b/components/rsptx/db/models.py index 42b366cf6..3d2d5e120 100644 --- a/components/rsptx/db/models.py +++ b/components/rsptx/db/models.py @@ -688,7 +688,7 @@ class AssignmentQuestion(Base, IdMixin): activities_required = Column( Integer ) # only reading assignments will have this populated - use_llm = Column(Web2PyBoolean, server_default="F") + async_mode = Column(String(20), server_default="standard") AssignmentQuestionValidator: TypeAlias = sqlalchemy_to_pydantic(AssignmentQuestion) # type: ignore diff --git a/components/rsptx/templates/_base.html b/components/rsptx/templates/_base.html index 8aa86d972..4aaded5f2 100644 --- a/components/rsptx/templates/_base.html +++ b/components/rsptx/templates/_base.html @@ -96,14 +96,14 @@ Course Home Assignments Practice - Peer Instruction (Student) + Peer Instruction (Student) Progress Page Change Course {% if is_instructor %} Instructor Dashboard - Peer Instruction (Instructor) + Peer Instruction (Instructor) Author Tools Editorial Page Request Invoice diff --git a/components/rsptx/templates/admin/instructor/menu.html b/components/rsptx/templates/admin/instructor/menu.html index 9c0a5a783..2cc2c7843 100644 --- a/components/rsptx/templates/admin/instructor/menu.html +++ b/components/rsptx/templates/admin/instructor/menu.html @@ -221,7 +221,7 @@

    Configure Practice

    Reset Student Exam

    Reset timed exams for individual students

    - +

    Peer Instruction Dashboard

    Manage peer assignment reviews

    diff --git a/components/rsptx/templates/assignment/instructor/peer_dashboard.html b/components/rsptx/templates/assignment/instructor/peer_dashboard.html index c8aa40dbb..f84de406c 100644 --- a/components/rsptx/templates/assignment/instructor/peer_dashboard.html +++ b/components/rsptx/templates/assignment/instructor/peer_dashboard.html @@ -47,9 +47,23 @@

    Question {{ current_qnum }} of {{ num_questions }}

    + {% if not is_last %} + {% else %} +
    + +
    + {% endif %} @@ -279,7 +293,41 @@

    Question {{ current_qnum }} of {{ num_questions }}

    var answerCount = 0; var done = {{ 'true' if is_last else 'false' }}; if (done) { - document.getElementById("nextq").disabled = true; + document.getElementById("nextq") && (document.getElementById("nextq").disabled = true); + } + + var asyncReleased = {{ 'true' if peer_async_visible else 'false' }}; + + function showAsyncConfirm() { + var area = document.getElementById("asyncBtnArea"); + var msg = asyncReleased + ? "Undo the after-class PI release?" + : "Release after-class PI questions to students?"; + area.innerHTML = ` + ${msg} + + + `; + } + + function cancelAsyncConfirm() { + var area = document.getElementById("asyncBtnArea"); + var label = asyncReleased ? "Undo After-Class Release" : "Release After-Class PI"; + var extraStyle = asyncReleased + ? 'style="background-color:#a3d4ec; border-color:#a3d4ec; color:#fff; margin-right:4px;"' + : 'style="margin-right:4px;"'; + area.innerHTML = ``; + } + + async function confirmToggleAsync() { + var resp = await fetch("/assignment/peer/api/toggle_async", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ assignment_id: {{ assignment_id }} }), + }); + var data = await resp.json(); + asyncReleased = data.peer_async_visible; + cancelAsyncConfirm(); } {% endblock %} diff --git a/components/rsptx/templates/assignment/instructor/peer_extra.html b/components/rsptx/templates/assignment/instructor/peer_extra.html new file mode 100644 index 000000000..62ae56186 --- /dev/null +++ b/components/rsptx/templates/assignment/instructor/peer_extra.html @@ -0,0 +1,39 @@ +{% extends "_base.html" %} + +{% block title %}Peer Instruction - Extra Info{% endblock %} + +{% block content %} +
    +

    Percent Correct for {{ current_question.name }}

    +

    %

    +
    +{% endblock %} + +{% block js %} + +{% endblock %} diff --git a/components/rsptx/templates/assignment/student/peer_async.html b/components/rsptx/templates/assignment/student/peer_async.html new file mode 100644 index 000000000..ba1b1a796 --- /dev/null +++ b/components/rsptx/templates/assignment/student/peer_async.html @@ -0,0 +1,1224 @@ +{% extends "_base.html" %} + +{% block title %}Peer Instruction (After Class){% endblock %} + +{% block css %} + + +{% endblock %} + +{% block navitems %} + +{% endblock %} + +{% block content %} +
    +{% include 'common/static_assets.html' %} +{% include 'common/ebook_config.html' %} + + + +
    +
    + + + +

    Peer Instruction Question (After Class)

    + +{% if not all_done %} +
    +   Step 1 of 3: Answer the question +
    +{% endif %} + +
    +

    This page is meant for you to use after the inclass peer instruction is over. +

      +
    1. Answer the question as best you can.
    2. +
    3. Then, in the space provided write a justification for your answer.
    4. + {% if llm_enabled %} +
    5. Discuss the question with an LLM peer — explain your reasoning and respond to their questions.
    6. + {% else %} +
    7. Read the justification or discussion.
    8. + {% endif %} +
    9. Answer the question again. Even if you are not changing your answer from the first time.
    10. +
    +

    +
    + +
    + {% if not all_done %} +
    + {{ current_question.htmlsrc|safe }} +
    + {% else %} +
    +

    Congratulations, you have completed this assignment!

    +
    + {% endif %} + + {% if not all_done %} +
    + +
    + +
    + {% if llm_enabled %}Justify & discuss your answer{% else %}Justify your answer{% endif %} + Your pick:  +
    + +
    + + +
    + +
    +
    +
    Answer the question first
    +
    Submit your first vote to write your justification.
    +
    + +

    + Please explain why you chose . +

    + + +
    + + + +
    +
    + {% if llm_enabled %} + You are chatting with an LLM, not a real peer. + {% endif %} +
    + +
    + +
    + +
    + {% endif %} +
    + +{% if has_vote1 and has_reflection and llm_reply %} +
    +
    + An LLM peer said: +

    {{ llm_reply }}

    +
    +{% endif %} + +
    {# end max-width wrapper #} +{% endblock %} + +{% block js %} + + + + + + + + + + + +{% endblock %} diff --git a/components/rsptx/templates/footer.html b/components/rsptx/templates/footer.html index ce788138b..3c2a029ae 100644 --- a/components/rsptx/templates/footer.html +++ b/components/rsptx/templates/footer.html @@ -1,10 +1,15 @@ -