tfrere commited on
Commit
a61ba58
·
1 Parent(s): 2c65a29
Files changed (39) hide show
  1. client/src/components/DebugConsole.jsx +54 -0
  2. client/src/components/Metric.jsx +28 -0
  3. client/src/components/TalkWithSarah.jsx +0 -279
  4. client/src/components/UniverseMetrics.jsx +54 -0
  5. client/src/components/UniverseView.jsx +44 -0
  6. client/src/hooks/useGameSession.js +2 -1
  7. client/src/main.jsx +2 -0
  8. client/src/pages/Debug.jsx +617 -0
  9. client/src/pages/Game.jsx +0 -19
  10. server/README.md +109 -0
  11. server/api/models.py +38 -43
  12. server/api/routes/chat.py +5 -12
  13. server/api/routes/universe.py +21 -9
  14. server/core/game_logic.py +0 -229
  15. server/core/game_state.py +76 -0
  16. server/core/generators/base_generator.py +18 -0
  17. server/core/generators/image_generator.py +0 -69
  18. server/core/generators/image_prompt_generator.py +198 -0
  19. server/core/generators/metadata_generator.py +56 -15
  20. server/core/generators/{text_generator.py → story_segment_generator.py} +88 -105
  21. server/core/generators/universe_generator.py +27 -10
  22. server/core/prompt_utils.py +8 -0
  23. server/core/prompts/cinematic.py +0 -161
  24. server/core/prompts/formatting_rules.py +6 -0
  25. server/core/prompts/hero.py +5 -0
  26. server/core/prompts/image_style.py +0 -21
  27. server/core/prompts/story_beats.py +24 -0
  28. server/core/prompts/system.py +0 -72
  29. server/core/prompts/text_prompts.py +0 -80
  30. server/core/session_manager.py +3 -2
  31. server/core/setup.py +21 -0
  32. server/core/state/game_state.py +0 -39
  33. server/core/story_generator.py +121 -0
  34. server/core/story_orchestrator.py +0 -111
  35. server/core/styles/universe_styles.json +48 -16
  36. server/scripts/test_game.py +49 -21
  37. server/server.py +2 -1
  38. server/services/mistral_client.py +34 -5
  39. yarn.lock +4 -0
client/src/components/DebugConsole.jsx ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from "react";
2
+ import { Box, Paper } from "@mui/material";
3
+
4
+ // Composant pour afficher du JSON avec coloration syntaxique
5
+ const JsonView = ({ data, maxHeight = "500px" }) => (
6
+ <Box
7
+ sx={{
8
+ backgroundColor: "grey.900",
9
+ p: 2,
10
+ borderRadius: 1,
11
+ position: "relative",
12
+ }}
13
+ >
14
+ <Box
15
+ sx={{
16
+ maxHeight,
17
+ overflow: "auto",
18
+ fontFamily: "monospace",
19
+ "&::-webkit-scrollbar": {
20
+ width: "8px",
21
+ height: "8px",
22
+ },
23
+ "&::-webkit-scrollbar-track": {
24
+ backgroundColor: "grey.800",
25
+ borderRadius: "4px",
26
+ },
27
+ "&::-webkit-scrollbar-thumb": {
28
+ backgroundColor: "grey.600",
29
+ borderRadius: "4px",
30
+ "&:hover": {
31
+ backgroundColor: "grey.500",
32
+ },
33
+ },
34
+ "& pre": {
35
+ margin: 0,
36
+ color: "success.light",
37
+ fontSize: "0.75rem",
38
+ whiteSpace: "pre-wrap",
39
+ wordBreak: "break-word",
40
+ },
41
+ }}
42
+ >
43
+ <pre>{JSON.stringify(data, null, 2)}</pre>
44
+ </Box>
45
+ </Box>
46
+ );
47
+
48
+ export const DebugConsole = ({ gameState, currentStory }) => {
49
+ return (
50
+ <Paper variant="outlined" sx={{ height: "calc(100vh - 100px)" }}>
51
+ <JsonView data={{ gameState, currentStory }} maxHeight="100%" />
52
+ </Paper>
53
+ );
54
+ };
client/src/components/Metric.jsx ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from "react";
2
+ import { Box, Typography } from "@mui/material";
3
+
4
+ export const Metric = ({ icon, label, value, color = "default" }) => (
5
+ <Box
6
+ sx={{
7
+ display: "flex",
8
+ alignItems: "center",
9
+ gap: 1,
10
+ p: 0.75,
11
+ borderRadius: 1,
12
+ backgroundColor: "background.paper",
13
+ border: 1,
14
+ borderColor: "divider",
15
+ minWidth: 180,
16
+ }}
17
+ >
18
+ <Box sx={{ color: `${color}.main` }}>{icon}</Box>
19
+ <Box>
20
+ <Typography variant="caption" color="text.secondary">
21
+ {label}
22
+ </Typography>
23
+ <Typography variant="body2" fontWeight="medium" color="text.primary">
24
+ {value}
25
+ </Typography>
26
+ </Box>
27
+ </Box>
28
+ );
client/src/components/TalkWithSarah.jsx DELETED
@@ -1,279 +0,0 @@
1
- import { useState, useRef, useEffect } from "react";
2
- import {
3
- Box,
4
- IconButton,
5
- TextField,
6
- Dialog,
7
- DialogTitle,
8
- DialogContent,
9
- DialogActions,
10
- Button,
11
- Tooltip,
12
- } from "@mui/material";
13
- import FiberManualRecordIcon from "@mui/icons-material/FiberManualRecord";
14
- import CheckCircleIcon from "@mui/icons-material/CheckCircle";
15
- import CancelIcon from "@mui/icons-material/Cancel";
16
- import { useConversation } from "@11labs/react";
17
-
18
- const AGENT_ID = "2MF9st3s1mNFbX01Y106";
19
- const ELEVEN_LABS_KEY_STORAGE = "eleven_labs_api_key";
20
-
21
- export function TalkWithSarah({
22
- isNarratorSpeaking,
23
- stopNarration,
24
- playNarration,
25
- onDecisionMade,
26
- currentContext,
27
- }) {
28
- const [isRecording, setIsRecording] = useState(false);
29
- const [isConversationMode, setIsConversationMode] = useState(false);
30
- const [showApiKeyDialog, setShowApiKeyDialog] = useState(false);
31
- const [apiKey, setApiKey] = useState(() => {
32
- return localStorage.getItem(ELEVEN_LABS_KEY_STORAGE) || "";
33
- });
34
- const [isApiKeyValid, setIsApiKeyValid] = useState(false);
35
- const mediaRecorderRef = useRef(null);
36
- const audioChunksRef = useRef([]);
37
- const wsRef = useRef(null);
38
-
39
- const conversation = useConversation({
40
- agentId: AGENT_ID,
41
- headers: {
42
- "xi-api-key": apiKey,
43
- },
44
- onResponse: async (response) => {
45
- if (response.type === "audio") {
46
- try {
47
- const audioBlob = new Blob([response.audio], { type: "audio/mpeg" });
48
- const audioUrl = URL.createObjectURL(audioBlob);
49
- await playNarration(audioUrl);
50
- URL.revokeObjectURL(audioUrl);
51
- } catch (error) {
52
- console.error("Error playing ElevenLabs audio:", error);
53
- }
54
- }
55
- },
56
- clientTools: {
57
- make_decision: async ({ decision }) => {
58
- console.log("AI made decision:", decision);
59
- // Stop recording
60
- if (
61
- mediaRecorderRef.current &&
62
- mediaRecorderRef.current.state === "recording"
63
- ) {
64
- mediaRecorderRef.current.stop();
65
- }
66
- setIsConversationMode(false);
67
- await conversation?.endSession();
68
- setIsRecording(false);
69
- await onDecisionMade(parseInt(decision));
70
- },
71
- },
72
- });
73
-
74
- // Valider la clé API
75
- const validateApiKey = async (key) => {
76
- try {
77
- const response = await fetch("https://api.elevenlabs.io/v1/user", {
78
- headers: {
79
- "xi-api-key": key,
80
- },
81
- });
82
- return response.ok;
83
- } catch (error) {
84
- return false;
85
- }
86
- };
87
-
88
- // Vérifier la validité de la clé API quand elle change
89
- useEffect(() => {
90
- const checkApiKey = async () => {
91
- if (apiKey) {
92
- const isValid = await validateApiKey(apiKey);
93
- setIsApiKeyValid(isValid);
94
- if (isValid) {
95
- localStorage.setItem(ELEVEN_LABS_KEY_STORAGE, apiKey);
96
- }
97
- } else {
98
- setIsApiKeyValid(false);
99
- }
100
- };
101
- checkApiKey();
102
- }, [apiKey]);
103
-
104
- // Sauvegarder la clé API dans le localStorage
105
- useEffect(() => {
106
- if (apiKey) {
107
- localStorage.setItem(ELEVEN_LABS_KEY_STORAGE, apiKey);
108
- }
109
- }, [apiKey]);
110
-
111
- const startRecording = async () => {
112
- if (!apiKey) {
113
- setShowApiKeyDialog(true);
114
- return;
115
- }
116
-
117
- try {
118
- setIsRecording(true);
119
- // Stop narration audio if it's playing
120
- if (isNarratorSpeaking) {
121
- stopNarration();
122
- }
123
-
124
- // Safely stop any conversation audio if playing
125
- if (conversation?.audioRef?.current) {
126
- conversation.audioRef.current.pause();
127
- conversation.audioRef.current.currentTime = 0;
128
- }
129
-
130
- if (!isConversationMode) {
131
- setIsConversationMode(true);
132
- try {
133
- if (!conversation) {
134
- throw new Error("Conversation not initialized");
135
- }
136
- await conversation.startSession({
137
- agentId: AGENT_ID,
138
- initialContext: currentContext,
139
- });
140
- console.log("ElevenLabs WebSocket connected");
141
- } catch (error) {
142
- console.error("Error starting conversation:", error);
143
- return;
144
- }
145
- }
146
-
147
- const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
148
- mediaRecorderRef.current = new MediaRecorder(stream);
149
- audioChunksRef.current = [];
150
-
151
- mediaRecorderRef.current.ondataavailable = (event) => {
152
- if (event.data.size > 0) {
153
- audioChunksRef.current.push(event.data);
154
- }
155
- };
156
-
157
- mediaRecorderRef.current.onstop = async () => {
158
- const audioBlob = new Blob(audioChunksRef.current, {
159
- type: "audio/wav",
160
- });
161
- audioChunksRef.current = [];
162
-
163
- const reader = new FileReader();
164
- reader.readAsDataURL(audioBlob);
165
-
166
- reader.onload = async () => {
167
- const base64Audio = reader.result.split(",")[1];
168
- if (isConversationMode) {
169
- try {
170
- // Send audio to ElevenLabs conversation
171
- await conversation.send({
172
- type: "audio",
173
- data: base64Audio,
174
- });
175
- } catch (error) {
176
- console.error("Error sending audio to ElevenLabs:", error);
177
- }
178
- }
179
- };
180
- };
181
-
182
- mediaRecorderRef.current.start();
183
- } catch (error) {
184
- console.error("Error starting recording:", error);
185
- }
186
- };
187
-
188
- const handleSaveApiKey = () => {
189
- setShowApiKeyDialog(false);
190
- if (apiKey) {
191
- startRecording();
192
- }
193
- };
194
-
195
- return (
196
- <Box sx={{ display: "flex", alignItems: "center", gap: 2 }}>
197
- <Box sx={{ position: "relative", display: "flex", alignItems: "center" }}>
198
- <TextField
199
- size="small"
200
- type="password"
201
- placeholder="Enter your ElevenLabs API key"
202
- value={apiKey}
203
- onChange={(e) => setApiKey(e.target.value)}
204
- sx={{
205
- width: "300px",
206
- "& .MuiOutlinedInput-root": {
207
- color: "white",
208
- "& fieldset": {
209
- borderColor: "rgba(255, 255, 255, 0.23)",
210
- },
211
- "&:hover fieldset": {
212
- borderColor: "white",
213
- },
214
- "&.Mui-focused fieldset": {
215
- borderColor: "white",
216
- },
217
- "& .MuiOutlinedInput-input": {
218
- paddingRight: apiKey ? "40px" : "14px", // Padding dynamique
219
- },
220
- },
221
- "& .MuiInputBase-input": {
222
- color: "white",
223
- "&::placeholder": {
224
- color: "rgba(255, 255, 255, 0.5)",
225
- opacity: 1,
226
- },
227
- },
228
- }}
229
- />
230
- {apiKey && (
231
- <Tooltip
232
- title={isApiKeyValid ? "API key is valid" : "Invalid API key"}
233
- >
234
- <Box
235
- sx={{
236
- position: "absolute",
237
- right: 10,
238
- pointerEvents: "none",
239
- display: "flex",
240
- alignItems: "center",
241
- backgroundColor: "rgba(0, 0, 0, 0.8)",
242
- borderRadius: "50%",
243
- padding: "2px",
244
- }}
245
- >
246
- {isApiKeyValid ? (
247
- <CheckCircleIcon sx={{ color: "#4caf50", fontSize: 20 }} />
248
- ) : (
249
- <CancelIcon sx={{ color: "#f44336", fontSize: 20 }} />
250
- )}
251
- </Box>
252
- </Tooltip>
253
- )}
254
- </Box>
255
- <IconButton
256
- onClick={startRecording}
257
- disabled={isRecording || !isApiKeyValid}
258
- sx={{
259
- color: "white",
260
- backgroundColor: isRecording ? "primary.main" : "transparent",
261
- "&:hover": {
262
- backgroundColor: isRecording
263
- ? "primary.dark"
264
- : "rgba(0, 0, 0, 0.7)",
265
- },
266
- px: 2,
267
- borderRadius: 2,
268
- border: "1px solid white",
269
- opacity: !isApiKeyValid ? 0.5 : 1,
270
- }}
271
- >
272
- <Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
273
- {isRecording ? <FiberManualRecordIcon sx={{ color: "red" }} /> : null}
274
- <span style={{ fontSize: "1rem" }}>Talk with Sarah</span>
275
- </Box>
276
- </IconButton>
277
- </Box>
278
- );
279
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
client/src/components/UniverseMetrics.jsx ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from "react";
2
+ import { Stack, Typography, Grid } from "@mui/material";
3
+ import {
4
+ Palette as PaletteIcon,
5
+ Category as CategoryIcon,
6
+ AccessTime as AccessTimeIcon,
7
+ AutoFixHigh as MacGuffinIcon,
8
+ } from "@mui/icons-material";
9
+ import { Metric } from "./Metric";
10
+
11
+ export const UniverseMetrics = ({
12
+ style,
13
+ genre,
14
+ epoch,
15
+ macguffin,
16
+ color = "primary",
17
+ showTitle = true,
18
+ }) => {
19
+ return (
20
+ <Stack spacing={1}>
21
+ {showTitle && (
22
+ <Typography variant="subtitle2" color={`${color}.main`}>
23
+ Universe
24
+ </Typography>
25
+ )}
26
+ <Stack direction="row" spacing={1} flexWrap="wrap" gap={1}>
27
+ <Metric
28
+ icon={<PaletteIcon fontSize="small" />}
29
+ label="Style"
30
+ value={style}
31
+ color={color}
32
+ />
33
+ <Metric
34
+ icon={<CategoryIcon fontSize="small" />}
35
+ label="Genre"
36
+ value={genre}
37
+ color={color}
38
+ />
39
+ <Metric
40
+ icon={<AccessTimeIcon fontSize="small" />}
41
+ label="Epoch"
42
+ value={epoch}
43
+ color={color}
44
+ />
45
+ <Metric
46
+ icon={<MacGuffinIcon fontSize="small" />}
47
+ label="MacGuffin"
48
+ value={macguffin}
49
+ color={color}
50
+ />
51
+ </Stack>
52
+ </Stack>
53
+ );
54
+ };
client/src/components/UniverseView.jsx ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from "react";
2
+ import { Box, Paper, Typography, Stack } from "@mui/material";
3
+ import { UniverseMetrics } from "./UniverseMetrics";
4
+
5
+ export const UniverseView = ({ universe }) => {
6
+ return (
7
+ <Paper variant="outlined" sx={{ p: 2, height: "calc(100vh - 100px)" }}>
8
+ <Stack spacing={3}>
9
+ {/* Universe Info */}
10
+ <Box>
11
+ <Typography variant="h6" color="primary" gutterBottom>
12
+ Universe Information
13
+ </Typography>
14
+ <UniverseMetrics
15
+ style={universe?.style}
16
+ genre={universe?.genre}
17
+ epoch={universe?.epoch}
18
+ macguffin={universe?.macguffin}
19
+ showTitle={false}
20
+ />
21
+ </Box>
22
+
23
+ {/* Base Story */}
24
+ <Box>
25
+ <Typography variant="h6" color="primary" gutterBottom>
26
+ Base Story
27
+ </Typography>
28
+ <Paper
29
+ variant="outlined"
30
+ sx={{
31
+ p: 2,
32
+ backgroundColor: "background.default",
33
+ whiteSpace: "pre-wrap",
34
+ }}
35
+ >
36
+ <Typography variant="body2" color="text.secondary">
37
+ {universe?.base_story}
38
+ </Typography>
39
+ </Paper>
40
+ </Box>
41
+ </Stack>
42
+ </Paper>
43
+ );
44
+ };
client/src/hooks/useGameSession.js CHANGED
@@ -11,7 +11,7 @@ export const useGameSession = () => {
11
  const initializeGame = async () => {
12
  try {
13
  setIsLoading(true);
14
- const { session_id, base_story, style, genre, epoch } =
15
  await universeApi.generate();
16
 
17
  setSessionId(session_id);
@@ -20,6 +20,7 @@ export const useGameSession = () => {
20
  style,
21
  genre,
22
  epoch,
 
23
  });
24
  } catch (err) {
25
  setError(err.message || "Failed to initialize game session");
 
11
  const initializeGame = async () => {
12
  try {
13
  setIsLoading(true);
14
+ const { session_id, base_story, style, genre, epoch, macguffin } =
15
  await universeApi.generate();
16
 
17
  setSessionId(session_id);
 
20
  style,
21
  genre,
22
  epoch,
23
+ macguffin,
24
  });
25
  } catch (err) {
26
  setError(err.message || "Failed to initialize game session");
client/src/main.jsx CHANGED
@@ -7,6 +7,7 @@ import { theme } from "./theme";
7
  import { Home } from "./pages/Home";
8
  import { Game } from "./pages/Game";
9
  import { Tutorial } from "./pages/Tutorial";
 
10
  import "./index.css";
11
 
12
  ReactDOM.createRoot(document.getElementById("root")).render(
@@ -17,6 +18,7 @@ ReactDOM.createRoot(document.getElementById("root")).render(
17
  <Route path="/" element={<Home />} />
18
  <Route path="/game" element={<Game />} />
19
  <Route path="/tutorial" element={<Tutorial />} />
 
20
  </Routes>
21
  </BrowserRouter>
22
  </ThemeProvider>
 
7
  import { Home } from "./pages/Home";
8
  import { Game } from "./pages/Game";
9
  import { Tutorial } from "./pages/Tutorial";
10
+ import Debug from "./pages/Debug";
11
  import "./index.css";
12
 
13
  ReactDOM.createRoot(document.getElementById("root")).render(
 
18
  <Route path="/" element={<Home />} />
19
  <Route path="/game" element={<Game />} />
20
  <Route path="/tutorial" element={<Tutorial />} />
21
+ <Route path="/debug" element={<Debug />} />
22
  </Routes>
23
  </BrowserRouter>
24
  </ThemeProvider>
client/src/pages/Debug.jsx ADDED
@@ -0,0 +1,617 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState, useEffect } from "react";
2
+ import { useNavigate } from "react-router-dom";
3
+ import { storyApi } from "../utils/api";
4
+ import { useGameSession } from "../hooks/useGameSession";
5
+ import {
6
+ Box,
7
+ Paper,
8
+ Typography,
9
+ Accordion,
10
+ AccordionSummary,
11
+ AccordionDetails,
12
+ Chip,
13
+ Button,
14
+ CircularProgress,
15
+ Alert,
16
+ Divider,
17
+ Stack,
18
+ IconButton,
19
+ Tooltip,
20
+ Tab,
21
+ Tabs,
22
+ Grid,
23
+ } from "@mui/material";
24
+ import {
25
+ ExpandMore as ExpandMoreIcon,
26
+ Refresh as RefreshIcon,
27
+ BugReport as BugReportIcon,
28
+ Timer as TimerIcon,
29
+ LocationOn as LocationIcon,
30
+ Psychology as PsychologyIcon,
31
+ History as HistoryIcon,
32
+ Image as ImageIcon,
33
+ TextFields as TextFieldsIcon,
34
+ List as ListIcon,
35
+ Palette as PaletteIcon,
36
+ Category as CategoryIcon,
37
+ AccessTime as AccessTimeIcon,
38
+ ArrowForward as ArrowForwardIcon,
39
+ } from "@mui/icons-material";
40
+ import { DebugConsole } from "../components/DebugConsole";
41
+ import { Metric } from "../components/Metric";
42
+ import { UniverseView } from "../components/UniverseView";
43
+ import { UniverseMetrics } from "../components/UniverseMetrics";
44
+
45
+ const Debug = () => {
46
+ const navigate = useNavigate();
47
+ const [gameState, setGameState] = useState(null);
48
+ const [currentStory, setCurrentStory] = useState(null);
49
+ const [error, setError] = useState(null);
50
+ const [currentTab, setCurrentTab] = useState(0);
51
+ const [expandedPanel, setExpandedPanel] = useState("current");
52
+ const [isLoading, setIsLoading] = useState(false);
53
+ const historyContainerRef = React.useRef(null);
54
+
55
+ const {
56
+ sessionId,
57
+ universe,
58
+ isLoading: isSessionLoading,
59
+ error: sessionError,
60
+ } = useGameSession();
61
+
62
+ const handleTabChange = (event, newValue) => {
63
+ setCurrentTab(newValue);
64
+ };
65
+
66
+ const handlePanelChange = (panel) => (event, isExpanded) => {
67
+ setExpandedPanel(isExpanded ? panel : false);
68
+ };
69
+
70
+ // Initialize game
71
+ const initializeGame = async () => {
72
+ try {
73
+ setIsLoading(true);
74
+ const response = await storyApi.start(sessionId);
75
+
76
+ // Construire l'entrée d'historique initiale
77
+ const initialHistoryEntry = {
78
+ segment: response.story_text,
79
+ player_choice: null,
80
+ time: response.time,
81
+ location: response.location,
82
+ };
83
+
84
+ setGameState({
85
+ universe_style: universe?.style,
86
+ universe_genre: universe?.genre,
87
+ universe_epoch: universe?.epoch,
88
+ universe_macguffin: universe?.macguffin,
89
+ story_beat: 0,
90
+ story_history: [initialHistoryEntry],
91
+ });
92
+ setCurrentStory(response);
93
+ } catch (err) {
94
+ setError(err.message);
95
+ } finally {
96
+ setIsLoading(false);
97
+ }
98
+ };
99
+
100
+ // Make a choice
101
+ const makeChoice = async (choiceIndex) => {
102
+ try {
103
+ setIsLoading(true);
104
+ const response = await storyApi.makeChoice(choiceIndex + 1, sessionId);
105
+ setCurrentStory(response);
106
+
107
+ // Construire l'entrée d'historique dans le même format que le serveur
108
+ const historyEntry = {
109
+ segment: response.story_text,
110
+ player_choice: currentStory.choices[choiceIndex].text,
111
+ time: response.time,
112
+ location: response.location,
113
+ };
114
+
115
+ setGameState((prev) => ({
116
+ ...prev,
117
+ story_history: [...(prev.story_history || []), historyEntry],
118
+ story_beat: (prev.story_beat || 0) + 1,
119
+ universe_macguffin: prev.universe_macguffin,
120
+ }));
121
+ } catch (err) {
122
+ setError(err.message);
123
+ } finally {
124
+ setIsLoading(false);
125
+ }
126
+ };
127
+
128
+ useEffect(() => {
129
+ if (sessionId && !isSessionLoading && !gameState) {
130
+ initializeGame();
131
+ }
132
+ }, [sessionId, isSessionLoading, gameState]);
133
+
134
+ // Ajout de l'effet pour le défilement automatique
135
+ useEffect(() => {
136
+ if (historyContainerRef.current && gameState?.story_history?.length > 0) {
137
+ historyContainerRef.current.scrollTop =
138
+ historyContainerRef.current.scrollHeight;
139
+ }
140
+ }, [gameState?.story_history]);
141
+
142
+ if (error || sessionError) {
143
+ return (
144
+ <Box p={3}>
145
+ <Alert
146
+ severity="error"
147
+ action={
148
+ <Button
149
+ color="inherit"
150
+ size="small"
151
+ onClick={() => window.location.reload()}
152
+ >
153
+ Restart
154
+ </Button>
155
+ }
156
+ >
157
+ {error || sessionError}
158
+ </Alert>
159
+ </Box>
160
+ );
161
+ }
162
+
163
+ if (isSessionLoading || !gameState) {
164
+ return (
165
+ <Box
166
+ display="flex"
167
+ alignItems="center"
168
+ justifyContent="center"
169
+ minHeight="100vh"
170
+ >
171
+ <CircularProgress />
172
+ </Box>
173
+ );
174
+ }
175
+
176
+ return (
177
+ <Box
178
+ sx={{
179
+ height: "100vh",
180
+ display: "flex",
181
+ flexDirection: "column",
182
+ overflow: "hidden",
183
+ backgroundColor: "background.default",
184
+ }}
185
+ >
186
+ {/* Header - plus compact */}
187
+ <Box
188
+ sx={{
189
+ p: 1.5,
190
+ display: "flex",
191
+ alignItems: "center",
192
+ gap: 2,
193
+ borderBottom: 1,
194
+ borderColor: "divider",
195
+ backgroundColor: "background.paper",
196
+ }}
197
+ >
198
+ <BugReportIcon color="primary" />
199
+ <Typography variant="h6" component="h1">
200
+ Debug Mode
201
+ </Typography>
202
+ <Tooltip title="Restart">
203
+ <IconButton onClick={() => window.location.reload()} size="small">
204
+ <RefreshIcon />
205
+ </IconButton>
206
+ </Tooltip>
207
+ <Tabs value={currentTab} onChange={handleTabChange} sx={{ ml: "auto" }}>
208
+ <Tab icon={<PsychologyIcon />} label="Current State" />
209
+ <Tab icon={<PaletteIcon />} label="Universe" />
210
+ <Tab icon={<BugReportIcon />} label="Debug" />
211
+ </Tabs>
212
+ </Box>
213
+
214
+ {/* Content - scrollable */}
215
+ <Box sx={{ flexGrow: 1, overflow: "auto", p: 2 }}>
216
+ {/* Current State Tab */}
217
+ {currentTab === 0 && currentStory && (
218
+ <Stack spacing={2}>
219
+ {/* Universe Info & Game State */}
220
+ <Paper
221
+ variant="outlined"
222
+ sx={{
223
+ p: 2,
224
+ backgroundColor: "background.paper",
225
+ }}
226
+ >
227
+ <Grid container spacing={2}>
228
+ {/* Universe Info */}
229
+ <Grid item xs={12} md={6}>
230
+ <UniverseMetrics
231
+ style={gameState.universe_style}
232
+ genre={gameState.universe_genre}
233
+ epoch={gameState.universe_epoch}
234
+ macguffin={gameState.universe_macguffin}
235
+ />
236
+ </Grid>
237
+
238
+ {/* Game State */}
239
+ <Grid item xs={12} md={6}>
240
+ <Stack spacing={1}>
241
+ <Typography variant="subtitle2" color="secondary.main">
242
+ Game State
243
+ </Typography>
244
+ <Box
245
+ sx={{
246
+ p: 1.5,
247
+ borderRadius: 1,
248
+ backgroundColor: currentStory.is_victory
249
+ ? "success.dark"
250
+ : currentStory.is_death
251
+ ? "error.dark"
252
+ : "background.paper",
253
+ border: 1,
254
+ borderColor: "divider",
255
+ }}
256
+ >
257
+ <Stack spacing={2}>
258
+ <Stack
259
+ direction="row"
260
+ spacing={1}
261
+ flexWrap="wrap"
262
+ gap={1}
263
+ >
264
+ <Metric
265
+ icon={<TimerIcon fontSize="small" />}
266
+ label="Time"
267
+ value={currentStory.time}
268
+ color="secondary"
269
+ />
270
+ <Metric
271
+ icon={<LocationIcon fontSize="small" />}
272
+ label="Location"
273
+ value={currentStory.location}
274
+ color="secondary"
275
+ />
276
+ <Metric
277
+ icon={<PsychologyIcon fontSize="small" />}
278
+ label="Story Beat"
279
+ value={gameState.story_beat}
280
+ color="secondary"
281
+ />
282
+ </Stack>
283
+ <Stack spacing={1}>
284
+ <Box
285
+ sx={{
286
+ display: "flex",
287
+ alignItems: "center",
288
+ gap: 1,
289
+ }}
290
+ >
291
+ <Box
292
+ sx={{
293
+ width: 8,
294
+ height: 8,
295
+ borderRadius: "50%",
296
+ backgroundColor: "primary.main",
297
+ animation: "pulse 1.5s infinite",
298
+ "@keyframes pulse": {
299
+ "0%": {
300
+ transform: "scale(.95)",
301
+ boxShadow:
302
+ "0 0 0 0 rgba(144, 202, 249, 0.7)",
303
+ },
304
+ "70%": {
305
+ transform: "scale(1)",
306
+ boxShadow:
307
+ "0 0 0 6px rgba(144, 202, 249, 0)",
308
+ },
309
+ "100%": {
310
+ transform: "scale(.95)",
311
+ boxShadow: "0 0 0 0 rgba(144, 202, 249, 0)",
312
+ },
313
+ },
314
+ }}
315
+ />
316
+ <Typography
317
+ variant="subtitle2"
318
+ sx={{ color: "primary.main" }}
319
+ >
320
+ Story in Progress
321
+ </Typography>
322
+ </Box>
323
+ <Stack direction="row" spacing={1}>
324
+ <Box
325
+ sx={{
326
+ display: "flex",
327
+ alignItems: "center",
328
+ gap: 1,
329
+ p: 0.5,
330
+ borderRadius: 1,
331
+ backgroundColor: currentStory.is_death
332
+ ? "error.dark"
333
+ : "background.paper",
334
+ border: 1,
335
+ borderColor: "divider",
336
+ minWidth: 100,
337
+ }}
338
+ >
339
+ <Typography
340
+ variant="caption"
341
+ sx={{
342
+ color: currentStory.is_death
343
+ ? "white"
344
+ : "text.secondary",
345
+ }}
346
+ >
347
+ Death: {currentStory.is_death ? "Yes" : "No"}
348
+ </Typography>
349
+ </Box>
350
+ <Box
351
+ sx={{
352
+ display: "flex",
353
+ alignItems: "center",
354
+ gap: 1,
355
+ p: 0.5,
356
+ borderRadius: 1,
357
+ backgroundColor: currentStory.is_victory
358
+ ? "success.dark"
359
+ : "background.paper",
360
+ border: 1,
361
+ borderColor: "divider",
362
+ minWidth: 100,
363
+ }}
364
+ >
365
+ <Typography
366
+ variant="caption"
367
+ sx={{
368
+ color: currentStory.is_victory
369
+ ? "white"
370
+ : "text.secondary",
371
+ }}
372
+ >
373
+ Victory:{" "}
374
+ {currentStory.is_victory ? "Yes" : "No"}
375
+ </Typography>
376
+ </Box>
377
+ </Stack>
378
+ </Stack>
379
+ </Stack>
380
+ </Box>
381
+ </Stack>
382
+ </Grid>
383
+ </Grid>
384
+ </Paper>
385
+
386
+ {/* Story and Choices Row */}
387
+ <Grid container spacing={2}>
388
+ {/* Story Content */}
389
+ <Grid item xs={12} md={8}>
390
+ <Paper variant="outlined">
391
+ <Box sx={{ p: 1.5, borderBottom: 1, borderColor: "divider" }}>
392
+ <Typography variant="subtitle2">Story</Typography>
393
+ </Box>
394
+ <Box sx={{ p: 1.5, backgroundColor: "background.default" }}>
395
+ <Typography variant="body2">
396
+ {currentStory.story_text}
397
+ </Typography>
398
+ </Box>
399
+ </Paper>
400
+ </Grid>
401
+
402
+ {/* Interactive Choices */}
403
+ <Grid item xs={12} md={4}>
404
+ <Paper variant="outlined">
405
+ <Box sx={{ p: 1.5, borderBottom: 1, borderColor: "divider" }}>
406
+ <Typography variant="subtitle2">
407
+ Available Choices
408
+ </Typography>
409
+ </Box>
410
+ <Stack spacing={1} sx={{ p: 1.5 }}>
411
+ {currentStory.choices && currentStory.choices.length > 0 ? (
412
+ currentStory.choices.map((choice, idx) => (
413
+ <Button
414
+ key={idx}
415
+ variant="contained"
416
+ color="primary"
417
+ onClick={() => makeChoice(idx)}
418
+ disabled={isLoading}
419
+ sx={{ mt: 1 }}
420
+ endIcon={
421
+ isLoading ? (
422
+ <CircularProgress size={16} />
423
+ ) : (
424
+ <ArrowForwardIcon />
425
+ )
426
+ }
427
+ >
428
+ {choice.text}
429
+ </Button>
430
+ ))
431
+ ) : (
432
+ <Typography variant="body2" color="text.secondary">
433
+ No choices available
434
+ </Typography>
435
+ )}
436
+ </Stack>
437
+ </Paper>
438
+ </Grid>
439
+ </Grid>
440
+
441
+ {/* Story History */}
442
+ <Paper variant="outlined">
443
+ <Box
444
+ sx={{
445
+ p: 1.5,
446
+ borderBottom: 1,
447
+ borderColor: "divider",
448
+ display: "flex",
449
+ alignItems: "center",
450
+ gap: 1,
451
+ }}
452
+ >
453
+ <HistoryIcon fontSize="small" color="action" />
454
+ <Typography variant="subtitle2">Story History</Typography>
455
+ </Box>
456
+ <Box
457
+ ref={historyContainerRef}
458
+ sx={{
459
+ maxHeight: "300px",
460
+ overflow: "auto",
461
+ scrollBehavior: "smooth",
462
+ }}
463
+ >
464
+ {gameState.story_history.length > 0 ? (
465
+ gameState.story_history.map((entry, idx) => (
466
+ <Box
467
+ key={idx}
468
+ sx={{
469
+ p: 1.5,
470
+ borderBottom: 1,
471
+ borderColor: "divider",
472
+ "&:last-child": {
473
+ borderBottom: 0,
474
+ },
475
+ backgroundColor:
476
+ idx === gameState.story_history.length - 1
477
+ ? "action.hover"
478
+ : "inherit",
479
+ }}
480
+ >
481
+ <Stack spacing={1}>
482
+ {/* Story Text */}
483
+ <Box
484
+ sx={{
485
+ backgroundColor: "background.paper",
486
+ p: 1,
487
+ borderRadius: 1,
488
+ border: 1,
489
+ borderColor: "divider",
490
+ }}
491
+ >
492
+ <Typography variant="body2" color="text.primary">
493
+ {entry.segment}
494
+ </Typography>
495
+ </Box>
496
+
497
+ {/* Player Choice */}
498
+ {entry.player_choice && (
499
+ <Box
500
+ sx={{
501
+ display: "flex",
502
+ alignItems: "center",
503
+ gap: 0.5,
504
+ backgroundColor: "action.selected",
505
+ p: 1,
506
+ borderRadius: 1,
507
+ ml: 2,
508
+ }}
509
+ >
510
+ <ArrowForwardIcon
511
+ fontSize="small"
512
+ sx={{ color: "primary.main" }}
513
+ />
514
+ <Typography
515
+ variant="caption"
516
+ sx={{
517
+ color: "text.primary",
518
+ fontWeight: "medium",
519
+ }}
520
+ >
521
+ {entry.player_choice}
522
+ </Typography>
523
+ </Box>
524
+ )}
525
+
526
+ {/* Metadata */}
527
+ <Stack
528
+ direction="row"
529
+ spacing={2}
530
+ sx={{
531
+ color: "text.secondary",
532
+ mt: 0.5,
533
+ "& > span": {
534
+ display: "flex",
535
+ alignItems: "center",
536
+ gap: 0.5,
537
+ fontSize: "0.75rem",
538
+ },
539
+ }}
540
+ >
541
+ <span>
542
+ <TimerIcon fontSize="inherit" />
543
+ {entry.time}
544
+ </span>
545
+ <span>
546
+ <LocationIcon fontSize="inherit" />
547
+ {entry.location}
548
+ </span>
549
+ </Stack>
550
+ </Stack>
551
+ </Box>
552
+ ))
553
+ ) : (
554
+ <Box sx={{ p: 2, textAlign: "center" }}>
555
+ <Typography variant="body2" color="text.secondary">
556
+ No history available
557
+ </Typography>
558
+ </Box>
559
+ )}
560
+ </Box>
561
+ </Paper>
562
+
563
+ {/* Image Prompts */}
564
+ <Paper variant="outlined">
565
+ <Box sx={{ p: 1.5, borderBottom: 1, borderColor: "divider" }}>
566
+ <Typography variant="subtitle2">Image Prompts</Typography>
567
+ </Box>
568
+ <Stack spacing={1} sx={{ p: 1.5 }}>
569
+ {currentStory.image_prompts.map((prompt, idx) => (
570
+ <Box
571
+ key={idx}
572
+ sx={{
573
+ p: 1,
574
+ backgroundColor: "background.default",
575
+ borderRadius: 1,
576
+ border: 1,
577
+ borderColor: "divider",
578
+ }}
579
+ >
580
+ <Typography
581
+ variant="caption"
582
+ sx={{
583
+ fontFamily: "monospace",
584
+ color: "text.secondary",
585
+ display: "flex",
586
+ gap: 1,
587
+ }}
588
+ >
589
+ <Typography
590
+ component="span"
591
+ variant="caption"
592
+ color="primary.main"
593
+ >
594
+ {idx + 1}.
595
+ </Typography>
596
+ {prompt}
597
+ </Typography>
598
+ </Box>
599
+ ))}
600
+ </Stack>
601
+ </Paper>
602
+ </Stack>
603
+ )}
604
+
605
+ {/* Universe Tab */}
606
+ {currentTab === 1 && <UniverseView universe={universe} />}
607
+
608
+ {/* Debug Tab */}
609
+ {currentTab === 2 && (
610
+ <DebugConsole gameState={gameState} currentStory={currentStory} />
611
+ )}
612
+ </Box>
613
+ </Box>
614
+ );
615
+ };
616
+
617
+ export default Debug;
client/src/pages/Game.jsx CHANGED
@@ -26,8 +26,6 @@ import CreateIcon from "@mui/icons-material/Create";
26
  import { getNextLayoutType, LAYOUTS } from "../layouts/config";
27
  import { LoadingScreen } from "../components/LoadingScreen";
28
 
29
- import { TalkWithSarah } from "../components/TalkWithSarah";
30
-
31
  // Constants
32
  const SOUND_ENABLED_KEY = "sound_enabled";
33
 
@@ -432,23 +430,6 @@ export function Game() {
432
  onScreenshot={handleCaptureStory}
433
  />
434
 
435
- {storySegments.length > 0 && currentChoices.length > 0 && (
436
- <TalkWithSarah
437
- isNarratorSpeaking={isNarratorSpeaking}
438
- stopNarration={stopNarration}
439
- playNarration={playNarration}
440
- onDecisionMade={handleChoice}
441
- currentContext={`Sarah this is the situation you're in : ${
442
- storySegments[storySegments.length - 1].text
443
- }. Those are your possible decisions : \n ${currentChoices
444
- .map(
445
- (choice, index) =>
446
- `decision ${index + 1} : ${choice.text}`
447
- )
448
- .join("\n ")}.`}
449
- />
450
- )}
451
-
452
  {showChoices && (
453
  <StoryChoices
454
  choices={currentChoices}
 
26
  import { getNextLayoutType, LAYOUTS } from "../layouts/config";
27
  import { LoadingScreen } from "../components/LoadingScreen";
28
 
 
 
29
  // Constants
30
  const SOUND_ENABLED_KEY = "sound_enabled";
31
 
 
430
  onScreenshot={handleCaptureStory}
431
  />
432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
  {showChoices && (
434
  <StoryChoices
435
  choices={currentChoices}
server/README.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Don't Look Up - Server
2
+
3
+ Backend du jeu "Don't Look Up", un jeu narratif post-apocalyptique généré par IA.
4
+
5
+ ## 🛠️ Installation
6
+
7
+ 1. Assurez-vous d'avoir Python 3.10+ et Poetry installés
8
+ 2. Clonez le repository
9
+ 3. Installez les dépendances :
10
+
11
+ ```bash
12
+ cd server
13
+ poetry install
14
+ ```
15
+
16
+ 4. Créez un fichier `.env` à la racine du dossier `server` avec :
17
+
18
+ ```env
19
+ MISTRAL_API_KEY=votre_clé_api_mistral
20
+ ```
21
+
22
+ ## 🚀 Lancement du serveur
23
+
24
+ ```bash
25
+ poetry run dev
26
+ ```
27
+
28
+ Le serveur démarrera sur `http://localhost:8000`
29
+
30
+ ## 🎮 Tests du jeu
31
+
32
+ Le projet inclut un script de test qui permet de jouer au jeu en mode console et de tester la génération d'histoire.
33
+
34
+ ### Modes de lancement
35
+
36
+ 1. Mode interactif (normal) :
37
+
38
+ ```bash
39
+ poetry run test-game
40
+ ```
41
+
42
+ 2. Mode automatique (pour les tests) :
43
+
44
+ ```bash
45
+ poetry run test-game --auto
46
+ ```
47
+
48
+ 3. Mode automatique avec nombre de tours personnalisé :
49
+
50
+ ```bash
51
+ poetry run test-game --auto --max-turns 20
52
+ ```
53
+
54
+ 4. Mode automatique avec affichage du contexte complet :
55
+
56
+ ```bash
57
+ poetry run test-game --auto --show-context
58
+ ```
59
+
60
+ ### Codes de retour
61
+
62
+ En mode automatique, le script retourne :
63
+
64
+ - Code 0 : Victoire
65
+ - Code 1 : Défaite, erreur ou timeout (> 15 tours par défaut)
66
+
67
+ ### Exemple d'utilisation dans un script
68
+
69
+ ```bash
70
+ # Lancer 5 tests automatiques d'affilée
71
+ for i in {1..5}; do
72
+ echo "Test run $i"
73
+ poetry run test-game --auto || echo "Test $i failed"
74
+ done
75
+ ```
76
+
77
+ ## 📚 Structure du projet
78
+
79
+ ```
80
+ server/
81
+ ├── api/ # Routes et modèles FastAPI
82
+ ├── core/ # Logique métier et générateurs
83
+ │ ├── generators/ # Générateurs (histoire, univers, etc.)
84
+ │ └── prompts/ # Templates de prompts pour l'IA
85
+ ├── scripts/ # Scripts utilitaires
86
+ └── services/ # Services externes (Mistral, etc.)
87
+ ```
88
+
89
+ ## 🔄 Workflow de génération
90
+
91
+ 1. Génération de l'univers (`UniverseGenerator`)
92
+
93
+ - Style graphique
94
+ - Genre
95
+ - Époque
96
+ - MacGuffin
97
+ - Histoire de base
98
+
99
+ 2. Génération des segments d'histoire (`StoryGenerator`)
100
+
101
+ - Texte narratif
102
+ - Choix
103
+ - Prompts d'images
104
+ - Métadonnées (temps, lieu)
105
+
106
+ 3. Gestion de l'état du jeu (`GameState`)
107
+ - Progression de l'histoire
108
+ - Historique des choix
109
+ - État du monde
server/api/models.py CHANGED
@@ -6,15 +6,15 @@ class Choice(BaseModel):
6
  id: int
7
  text: str = Field(description="The text of the choice.")
8
 
9
- class StorySegmentBase(BaseModel):
10
- """Base model for story segments with common validation logic"""
11
- story_text: str = Field(description="The story text. No more than 15 words THIS IS MANDATORY. Never mention story beat directly. ")
12
- is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
13
- is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
14
 
15
- # Existing response models for story generation steps - preserved for API compatibility
16
- class StoryTextResponse(StorySegmentBase):
17
- pass
 
 
 
18
 
19
  class StoryPromptsResponse(BaseModel):
20
  image_prompts: List[str] = Field(
@@ -24,45 +24,16 @@ class StoryPromptsResponse(BaseModel):
24
  )
25
 
26
  class StoryMetadataResponse(BaseModel):
27
- is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
28
- is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
29
- choices: List[str] = Field(description="Either empty list for victory/death, or exactly two choices for normal progression")
30
  time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.")
31
  location: str = Field(description="Current location.")
 
 
32
 
33
  @validator('choices')
34
- def validate_choices(cls, v, values):
35
- is_ending = values.get('is_victory', False) or values.get('is_death', False)
36
- if is_ending:
37
- if len(v) != 0:
38
- raise ValueError('For victory/death, choices must be empty')
39
- else:
40
- if len(v) != 2:
41
- raise ValueError('For normal progression, must have exactly 2 choices')
42
- return v
43
-
44
- # Complete story response combining all parts - preserved for API compatibility
45
- class StoryResponse(StorySegmentBase):
46
- choices: List[Choice]
47
- raw_choices: List[str] = Field(description="Raw choice texts from LLM before conversion to Choice objects")
48
- time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.")
49
- location: str = Field(description="Current location.")
50
- is_first_step: bool = Field(description="Whether this is the first step of the story", default=False)
51
- image_prompts: List[str] = Field(
52
- description="List of comic panel descriptions that illustrate the key moments of the scene. Use the word 'Sarah' only when referring to her.",
53
- min_items=GameConfig.MIN_PANELS,
54
- max_items=GameConfig.MAX_PANELS
55
- )
56
-
57
- @validator('choices')
58
- def validate_choices(cls, v, values):
59
- is_ending = values.get('is_victory', False) or values.get('is_death', False)
60
- if is_ending:
61
- if len(v) != 0:
62
- raise ValueError('For victory/death, choices must be empty')
63
- else:
64
- if len(v) != 2:
65
- raise ValueError('For normal progression, must have exactly 2 choices')
66
  return v
67
 
68
  # Keep existing models unchanged for compatibility
@@ -86,3 +57,27 @@ class UniverseResponse(BaseModel):
86
  genre: str
87
  epoch: str
88
  base_story: str = Field(description="The generated story for this universe")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  id: int
7
  text: str = Field(description="The text of the choice.")
8
 
9
+ class StorySegmentResponse(BaseModel):
10
+ story_text: str = Field(description="The story text. No more than 30 words.")
 
 
 
11
 
12
+ @validator('story_text')
13
+ def validate_story_text_length(cls, v):
14
+ words = v.split()
15
+ if len(words) > 30:
16
+ raise ValueError('Story text must not exceed 30 words')
17
+ return v
18
 
19
  class StoryPromptsResponse(BaseModel):
20
  image_prompts: List[str] = Field(
 
24
  )
25
 
26
  class StoryMetadataResponse(BaseModel):
27
+ choices: List[str] = Field(description="List of choices for story progression")
 
 
28
  time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.")
29
  location: str = Field(description="Current location.")
30
+ is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
31
+ is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
32
 
33
  @validator('choices')
34
+ def validate_choices(cls, v):
35
+ if len(v) != 2:
36
+ raise ValueError('Must have exactly 2 choices for story progression')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  return v
38
 
39
  # Keep existing models unchanged for compatibility
 
57
  genre: str
58
  epoch: str
59
  base_story: str = Field(description="The generated story for this universe")
60
+ macguffin: str = Field(description="The macguffin for this universe")
61
+
62
+
63
+ # Complete story response combining all parts - preserved for API compatibility
64
+ class StoryResponse(BaseModel):
65
+ story_text: str = Field(description="The story text. No more than 15 words THIS IS MANDATORY. Never mention story beat directly. ")
66
+ choices: List[Choice]
67
+ raw_choices: List[str] = Field(description="Raw choice texts from LLM before conversion to Choice objects")
68
+ time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.")
69
+ location: str = Field(description="Current location.")
70
+ is_first_step: bool = Field(description="Whether this is the first step of the story", default=False)
71
+ is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
72
+ is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
73
+ image_prompts: List[str] = Field(
74
+ description="List of comic panel descriptions that illustrate the key moments of the scene. Use the word 'Sarah' only when referring to her.",
75
+ min_items=GameConfig.MIN_PANELS,
76
+ max_items=GameConfig.MAX_PANELS
77
+ )
78
+
79
+ @validator('choices')
80
+ def validate_choices(cls, v):
81
+ if len(v) != 2:
82
+ raise ValueError('Must have exactly 2 choices for story progression')
83
+ return v
server/api/routes/chat.py CHANGED
@@ -62,12 +62,6 @@ def get_chat_router(session_manager: SessionManager, story_generator):
62
  game_state=game_state,
63
  previous_choice=previous_choice
64
  )
65
-
66
- if llm_response.is_death:
67
- llm_response.choices = []
68
- llm_response.story_text += "\You have succumbed to the harsh wastelands, and your journey concludes here. THE END."
69
- if len(llm_response.image_prompts) > 1:
70
- llm_response.image_prompts = [llm_response.image_prompts[0]]
71
 
72
  # Add segment to history
73
  game_state.add_to_history(
@@ -89,15 +83,14 @@ def get_chat_router(session_manager: SessionManager, story_generator):
89
  raw_choices=llm_response.raw_choices,
90
  time=llm_response.time,
91
  location=llm_response.location,
92
- is_victory=llm_response.is_victory,
93
- is_death=llm_response.is_death,
94
  is_first_step=game_state.story_beat == 0,
95
- image_prompts=llm_response.image_prompts
 
 
96
  )
97
 
98
- # Only increment story beat if not dead and not victory
99
- if not llm_response.is_death and not llm_response.is_victory:
100
- game_state.story_beat += 1
101
 
102
  return response
103
 
 
62
  game_state=game_state,
63
  previous_choice=previous_choice
64
  )
 
 
 
 
 
 
65
 
66
  # Add segment to history
67
  game_state.add_to_history(
 
83
  raw_choices=llm_response.raw_choices,
84
  time=llm_response.time,
85
  location=llm_response.location,
 
 
86
  is_first_step=game_state.story_beat == 0,
87
+ image_prompts=llm_response.image_prompts,
88
+ is_death=llm_response.is_death,
89
+ is_victory=llm_response.is_victory
90
  )
91
 
92
+ # Increment story beat
93
+ game_state.story_beat += 1
 
94
 
95
  return response
96
 
server/api/routes/universe.py CHANGED
@@ -2,9 +2,19 @@ from fastapi import APIRouter, HTTPException
2
  import uuid
3
 
4
  from core.generators.universe_generator import UniverseGenerator
5
- from core.game_logic import StoryGenerator
6
  from core.session_manager import SessionManager
7
  from api.models import UniverseResponse
 
 
 
 
 
 
 
 
 
 
8
 
9
  def get_universe_router(session_manager: SessionManager, story_generator: StoryGenerator) -> APIRouter:
10
  router = APIRouter()
@@ -16,8 +26,8 @@ def get_universe_router(session_manager: SessionManager, story_generator: StoryG
16
  print("Starting universe generation...")
17
 
18
  # Get random elements before generation
19
- style, genre, epoch = universe_generator._get_random_elements()
20
- print(f"Generated random elements: style={style['name']}, genre={genre}, epoch={epoch}")
21
 
22
  universe = await universe_generator.generate()
23
  print("Generated universe story")
@@ -39,12 +49,13 @@ def get_universe_router(session_manager: SessionManager, story_generator: StoryG
39
  print("Configured universe in game state")
40
 
41
  # Créer le TextGenerator pour cette session
42
- story_generator.create_text_generator(
43
  session_id=session_id,
44
- style=style["name"],
45
  genre=genre,
46
  epoch=epoch,
47
- base_story=universe
 
48
  )
49
  print("Created text generator for session")
50
 
@@ -52,8 +63,8 @@ def get_universe_router(session_manager: SessionManager, story_generator: StoryG
52
  if not game_state.has_universe():
53
  raise ValueError("Universe was not properly configured in game state")
54
 
55
- if session_id not in story_generator.text_generators:
56
- raise ValueError("TextGenerator was not properly created")
57
 
58
  print("All components configured successfully")
59
 
@@ -63,7 +74,8 @@ def get_universe_router(session_manager: SessionManager, story_generator: StoryG
63
  style=style["name"],
64
  genre=genre,
65
  epoch=epoch,
66
- base_story=universe
 
67
  )
68
 
69
  except Exception as e:
 
2
  import uuid
3
 
4
  from core.generators.universe_generator import UniverseGenerator
5
+ from core.story_generator import StoryGenerator
6
  from core.session_manager import SessionManager
7
  from api.models import UniverseResponse
8
+ from pydantic import BaseModel, Field
9
+
10
+ class UniverseResponse(BaseModel):
11
+ status: str
12
+ session_id: str
13
+ style: str
14
+ genre: str
15
+ epoch: str
16
+ base_story: str = Field(description="The generated story for this universe")
17
+ macguffin: str = Field(description="The MacGuffin for this universe")
18
 
19
  def get_universe_router(session_manager: SessionManager, story_generator: StoryGenerator) -> APIRouter:
20
  router = APIRouter()
 
26
  print("Starting universe generation...")
27
 
28
  # Get random elements before generation
29
+ style, genre, epoch, macguffin = universe_generator._get_random_elements()
30
+ print(f"Generated random elements: style={style['name']}, genre={genre}, epoch={epoch}, macguffin={macguffin}")
31
 
32
  universe = await universe_generator.generate()
33
  print("Generated universe story")
 
49
  print("Configured universe in game state")
50
 
51
  # Créer le TextGenerator pour cette session
52
+ story_generator.create_segment_generator(
53
  session_id=session_id,
54
+ style=style,
55
  genre=genre,
56
  epoch=epoch,
57
+ base_story=universe,
58
+ macguffin=macguffin
59
  )
60
  print("Created text generator for session")
61
 
 
63
  if not game_state.has_universe():
64
  raise ValueError("Universe was not properly configured in game state")
65
 
66
+ if session_id not in story_generator.segment_generators:
67
+ raise ValueError("StorySegmentGenerator was not properly created")
68
 
69
  print("All components configured successfully")
70
 
 
74
  style=style["name"],
75
  genre=genre,
76
  epoch=epoch,
77
+ base_story=universe,
78
+ macguffin=macguffin
79
  )
80
 
81
  except Exception as e:
server/core/game_logic.py DELETED
@@ -1,229 +0,0 @@
1
- from pydantic import BaseModel, Field
2
- from typing import List, Tuple
3
-
4
- from core.constants import GameConfig
5
- from core.prompts.system import SARAH_DESCRIPTION
6
- from core.prompts.cinematic import CINEMATIC_SYSTEM_PROMPT
7
- from core.prompts.image_style import IMAGE_STYLE_PREFIX
8
- from services.mistral_client import MistralClient
9
- from api.models import StoryResponse, Choice
10
- from core.generators.text_generator import TextGenerator
11
- from core.generators.image_generator import ImageGenerator
12
- from core.generators.metadata_generator import MetadataGenerator
13
- from core.generators.universe_generator import UniverseGenerator
14
-
15
- from core.constants import GameConfig
16
-
17
- # Initialize generators with None - they will be set up when needed
18
- universe_generator = None
19
- image_generator = None
20
- metadata_generator = None
21
-
22
- def setup_generators(api_key: str, model_name: str = "mistral-small"):
23
- """Setup all generators with the provided API key."""
24
- global universe_generator, image_generator, metadata_generator
25
-
26
- mistral_client = MistralClient(api_key=api_key, model_name=model_name)
27
- universe_generator = UniverseGenerator(mistral_client)
28
- image_generator = ImageGenerator(mistral_client)
29
- metadata_generator = MetadataGenerator(mistral_client)
30
-
31
- def enrich_prompt_with_sarah_description(prompt: str) -> str:
32
- """Add Sarah's visual description to prompts that mention her."""
33
- if "sarah" in prompt.lower() and SARAH_DESCRIPTION not in prompt:
34
- return f"{prompt} {SARAH_DESCRIPTION}"
35
- return prompt
36
-
37
- def format_image_prompt(prompt: str, time: str, location: str) -> str:
38
- """Add style prefix and metadata to image prompt."""
39
- metadata = f"[{time} - {location}] "
40
- return f"{IMAGE_STYLE_PREFIX}{metadata}{prompt}"
41
-
42
- class GameState:
43
- def __init__(self):
44
- self.story_beat = GameConfig.STORY_BEAT_INTRO
45
- self.story_history = []
46
- self.current_time = GameConfig.STARTING_TIME
47
- self.current_location = GameConfig.STARTING_LOCATION
48
- # Ajout des informations d'univers
49
- self.universe_style = None
50
- self.universe_genre = None
51
- self.universe_epoch = None
52
- self.universe_story = None
53
-
54
- def reset(self):
55
- """Réinitialise l'état du jeu en gardant les informations de l'univers."""
56
- # Sauvegarder les informations de l'univers
57
- universe_style = self.universe_style
58
- universe_genre = self.universe_genre
59
- universe_epoch = self.universe_epoch
60
- universe_story = self.universe_story
61
-
62
- # Réinitialiser l'état du jeu
63
- self.story_beat = GameConfig.STORY_BEAT_INTRO
64
- self.story_history = []
65
- self.current_time = GameConfig.STARTING_TIME
66
- self.current_location = GameConfig.STARTING_LOCATION
67
-
68
- # Restaurer les informations de l'univers
69
- self.universe_style = universe_style
70
- self.universe_genre = universe_genre
71
- self.universe_epoch = universe_epoch
72
- self.universe_story = universe_story
73
-
74
- def set_universe(self, style: str, genre: str, epoch: str, base_story: str):
75
- """Configure l'univers du jeu."""
76
- self.universe_style = style
77
- self.universe_genre = genre
78
- self.universe_epoch = epoch
79
- self.universe_story = base_story
80
-
81
- def has_universe(self) -> bool:
82
- """Vérifie si l'univers est configuré."""
83
- return all([
84
- self.universe_style is not None,
85
- self.universe_genre is not None,
86
- self.universe_epoch is not None,
87
- self.universe_story is not None
88
- ])
89
-
90
- def add_to_history(self, segment_text: str, choice_made: str, image_prompts: List[str], time: str, location: str):
91
- self.story_history.append({
92
- "segment": segment_text,
93
- "choice": choice_made,
94
- "image_prompts": image_prompts,
95
- "time": time,
96
- "location": location
97
- })
98
- self.current_time = time
99
- self.current_location = location
100
-
101
- # Story output structure
102
- class StoryLLMResponse(BaseModel):
103
- story_text: str = Field(description="The next segment of the story. No more than 15 words THIS IS MANDATORY. Never mention story beat directly. ")
104
- choices: List[str] = Field(description="Between two and four possible choices for the player. Each choice should be a clear path to follow in the story", min_items=1, max_items=4)
105
- is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
106
- is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
107
- image_prompts: List[str] = Field(description="List of 1 to 4 comic panel descriptions that illustrate the key moments of the scene", min_items=1, max_items=4)
108
- time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.", default=GameConfig.STARTING_TIME)
109
- location: str = Field(description="Current location.", default=GameConfig.STARTING_LOCATION)
110
-
111
- # Story generator
112
- class StoryGenerator:
113
- _instance = None
114
-
115
- def __new__(cls, *args, **kwargs):
116
- if cls._instance is None:
117
- print("Creating new StoryGenerator instance")
118
- cls._instance = super().__new__(cls)
119
- cls._instance._initialized = False
120
- return cls._instance
121
-
122
- def __init__(self, api_key: str, model_name: str = "mistral-small"):
123
- if not self._initialized:
124
- print("Initializing StoryGenerator singleton")
125
- self.api_key = api_key
126
- self.model_name = model_name
127
- self.mistral_client = MistralClient(api_key=api_key, model_name=model_name)
128
- self.image_generator = ImageGenerator(self.mistral_client)
129
- self.metadata_generator = MetadataGenerator(self.mistral_client)
130
- self.text_generators = {} # Un TextGenerator par session
131
- self._initialized = True
132
-
133
- def create_text_generator(self, session_id: str, style: str, genre: str, epoch: str, base_story: str):
134
- """Crée un nouveau TextGenerator adapté à l'univers spécifié pour une session donnée."""
135
- print(f"Creating TextGenerator for session {session_id} in StoryGenerator singleton")
136
- self.text_generators[session_id] = TextGenerator(
137
- self.mistral_client,
138
- universe_style=style,
139
- universe_genre=genre,
140
- universe_epoch=epoch,
141
- universe_story=base_story
142
- )
143
- print(f"Current TextGenerators in StoryGenerator: {list(self.text_generators.keys())}")
144
-
145
- def get_text_generator(self, session_id: str) -> TextGenerator:
146
- """Récupère le TextGenerator associé à une session."""
147
- print(f"Getting TextGenerator for session {session_id} from StoryGenerator singleton")
148
- print(f"Current TextGenerators in StoryGenerator: {list(self.text_generators.keys())}")
149
- if session_id not in self.text_generators:
150
- raise RuntimeError(f"No text generator found for session {session_id}. Generate a universe first.")
151
- return self.text_generators[session_id]
152
-
153
- def _format_story_history(self, game_state: GameState) -> str:
154
- """Formate l'historique de l'histoire pour le prompt."""
155
- if not game_state.story_history:
156
- return ""
157
-
158
- segments = []
159
- for entry in game_state.story_history:
160
- segments.append(entry['segment'])
161
-
162
- story_history = "\n\n---\n\n".join(segments)
163
- return story_history
164
-
165
- async def generate_story_segment(self, session_id: str, game_state: GameState, previous_choice: str) -> StoryResponse:
166
- """Génère un segment d'histoire."""
167
- text_generator = self.get_text_generator(session_id)
168
-
169
- # 1. Générer le texte de l'histoire initial
170
- story_history = self._format_story_history(game_state)
171
- text_response = await text_generator.generate(
172
- story_beat=game_state.story_beat,
173
- current_time=game_state.current_time,
174
- current_location=game_state.current_location,
175
- previous_choice=previous_choice,
176
- story_history=story_history
177
- )
178
-
179
- # 2. Générer les métadonnées
180
- metadata_response = await self.metadata_generator.generate(
181
- story_text=text_response.story_text,
182
- current_time=game_state.current_time,
183
- current_location=game_state.current_location,
184
- story_beat=game_state.story_beat
185
- )
186
-
187
- # 3. Vérifier si c'est une fin (mort ou victoire)
188
- is_ending = metadata_response.is_death or metadata_response.is_victory
189
-
190
- if is_ending:
191
- # Regénérer le texte avec le contexte de fin
192
- ending_type = "victory" if metadata_response.is_victory else "death"
193
- text_response = await text_generator.generate_ending(
194
- story_beat=game_state.story_beat,
195
- ending_type=ending_type,
196
- current_scene=text_response.story_text,
197
- story_history=story_history
198
- )
199
- # Ne générer qu'une seule image pour la fin
200
- prompts_response = await self.image_generator.generate(text_response.story_text)
201
- if len(prompts_response.image_prompts) > 1:
202
- prompts_response.image_prompts = [prompts_response.image_prompts[0]]
203
- else:
204
- # Si ce n'est pas une fin, générer les prompts normalement
205
- prompts_response = await self.image_generator.generate(text_response.story_text)
206
-
207
- # 4. Créer la réponse finale
208
- choices = [] if is_ending else [
209
- Choice(id=i, text=choice_text)
210
- for i, choice_text in enumerate(metadata_response.choices, 1)
211
- ]
212
-
213
- response = StoryResponse(
214
- story_text=text_response.story_text,
215
- choices=choices,
216
- is_victory=metadata_response.is_victory,
217
- is_death=metadata_response.is_death,
218
- time=metadata_response.time,
219
- location=metadata_response.location,
220
- raw_choices=metadata_response.choices if not is_ending else [],
221
- image_prompts=[format_image_prompt(prompt, metadata_response.time, metadata_response.location)
222
- for prompt in prompts_response.image_prompts],
223
- is_first_step=(game_state.story_beat == GameConfig.STORY_BEAT_INTRO)
224
- )
225
-
226
- return response
227
-
228
- async def transform_story_to_art_prompt(self, story_text: str) -> str:
229
- return await self.mistral_client.transform_prompt(story_text, CINEMATIC_SYSTEM_PROMPT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/game_state.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from core.constants import GameConfig
2
+ from typing import List
3
+
4
+ class GameState:
5
+ def __init__(self):
6
+ self.story_beat = GameConfig.STORY_BEAT_INTRO
7
+ self.story_history = []
8
+ self.current_time = GameConfig.STARTING_TIME
9
+ self.current_location = GameConfig.STARTING_LOCATION
10
+ # Universe information
11
+ self.universe_style = None
12
+ self.universe_genre = None
13
+ self.universe_epoch = None
14
+ self.universe_story = None
15
+
16
+ def reset(self):
17
+ """Reset game state while keeping universe information."""
18
+ # Save universe info
19
+ universe_style = self.universe_style
20
+ universe_genre = self.universe_genre
21
+ universe_epoch = self.universe_epoch
22
+ universe_story = self.universe_story
23
+
24
+ # Reset game state
25
+ self.story_beat = GameConfig.STORY_BEAT_INTRO
26
+ self.story_history = []
27
+ self.current_time = GameConfig.STARTING_TIME
28
+ self.current_location = GameConfig.STARTING_LOCATION
29
+
30
+ # Restore universe info
31
+ self.universe_style = universe_style
32
+ self.universe_genre = universe_genre
33
+ self.universe_epoch = universe_epoch
34
+ self.universe_story = universe_story
35
+
36
+ def set_universe(self, style: str, genre: str, epoch: str, base_story: str):
37
+ """Configure the game universe."""
38
+ self.universe_style = style
39
+ self.universe_genre = genre
40
+ self.universe_epoch = epoch
41
+ self.universe_story = base_story
42
+
43
+ def has_universe(self) -> bool:
44
+ """Check if universe is configured."""
45
+ return all([
46
+ self.universe_style is not None,
47
+ self.universe_genre is not None,
48
+ self.universe_epoch is not None,
49
+ self.universe_story is not None
50
+ ])
51
+
52
+ def format_history(self) -> str:
53
+ """Format story history for the prompt."""
54
+ if not self.story_history:
55
+ return ""
56
+
57
+ segments = []
58
+ for entry in self.story_history:
59
+ segment = entry['segment']
60
+ if entry['player_choice']:
61
+ segment += f"\n[Choix du joueur: {entry['player_choice']}]"
62
+ segments.append(segment)
63
+
64
+ return "\n\n---\n\n".join(segments)
65
+
66
+ def add_to_history(self, segment_text: str, choice_made: str, image_prompts: List[str], time: str, location: str):
67
+ """Add a segment to history with essential information."""
68
+ self.story_history.append({
69
+ "segment": segment_text,
70
+ "player_choice": choice_made,
71
+ "time": time,
72
+ "location": location,
73
+ "image_prompts": image_prompts
74
+ })
75
+ self.current_time = time
76
+ self.current_location = location
server/core/generators/base_generator.py CHANGED
@@ -8,10 +8,27 @@ T = TypeVar('T', bound=BaseModel)
8
  class BaseGenerator:
9
  """Classe de base pour tous les générateurs de contenu."""
10
 
 
 
11
  def __init__(self, mistral_client: MistralClient):
12
  self.mistral_client = mistral_client
13
  self.prompt = self._create_prompt()
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def _create_prompt(self) -> ChatPromptTemplate:
16
  """Crée le template de prompt pour ce générateur.
17
  À implémenter dans les classes enfants."""
@@ -32,6 +49,7 @@ class BaseGenerator:
32
  Le contenu généré et parsé selon le type spécifique du générateur
33
  """
34
  messages = self.prompt.format_messages(**kwargs)
 
35
  return await self.mistral_client.generate(
36
  messages=messages,
37
  custom_parser=self._custom_parser
 
8
  class BaseGenerator:
9
  """Classe de base pour tous les générateurs de contenu."""
10
 
11
+ debug_mode = True # Class attribute for debug mode
12
+
13
  def __init__(self, mistral_client: MistralClient):
14
  self.mistral_client = mistral_client
15
  self.prompt = self._create_prompt()
16
 
17
+ @classmethod
18
+ def set_debug_mode(cls, enabled: bool):
19
+ """Enable or disable debug mode for all generators."""
20
+ cls.debug_mode = enabled
21
+
22
+ def _print_debug_info(self, messages):
23
+ """Print debug information about the prompts."""
24
+ if self.debug_mode:
25
+ print("\n=== DEBUG: PROMPT INFORMATION ===")
26
+ for i, message in enumerate(messages):
27
+ print(f"\n--- Message {i + 1} ---")
28
+ print(f"Role: {message.type}")
29
+ print(f"Content:\n{message.content}\n")
30
+ print("================================\n")
31
+
32
  def _create_prompt(self) -> ChatPromptTemplate:
33
  """Crée le template de prompt pour ce générateur.
34
  À implémenter dans les classes enfants."""
 
49
  Le contenu généré et parsé selon le type spécifique du générateur
50
  """
51
  messages = self.prompt.format_messages(**kwargs)
52
+ self._print_debug_info(messages) # Print debug info if debug mode is enabled
53
  return await self.mistral_client.generate(
54
  messages=messages,
55
  custom_parser=self._custom_parser
server/core/generators/image_generator.py DELETED
@@ -1,69 +0,0 @@
1
- import json
2
- from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
3
-
4
- from core.generators.base_generator import BaseGenerator
5
- from core.prompts.image_style import IMAGE_STYLE_PREFIX
6
- from core.prompts.system import SARAH_VISUAL_DESCRIPTION
7
- from core.prompts.text_prompts import IMAGE_PROMPTS_GENERATOR_PROMPT
8
- from api.models import StoryPromptsResponse
9
-
10
- class ImageGenerator(BaseGenerator):
11
- """Générateur pour les prompts d'images."""
12
-
13
- def _create_prompt(self) -> ChatPromptTemplate:
14
- human_template = """Story text: {story_text}
15
-
16
- Generate panel descriptions following the format specified."""
17
-
18
- return ChatPromptTemplate(
19
- messages=[
20
- SystemMessagePromptTemplate.from_template(IMAGE_PROMPTS_GENERATOR_PROMPT),
21
- HumanMessagePromptTemplate.from_template(human_template)
22
- ]
23
- )
24
-
25
- def enrich_prompt(self, prompt: str) -> str:
26
- """Add Sarah's visual description to prompts that mention her."""
27
- if "sarah" in prompt.lower() and SARAH_VISUAL_DESCRIPTION not in prompt:
28
- return f"{prompt} {SARAH_VISUAL_DESCRIPTION}"
29
- return prompt
30
-
31
- def _custom_parser(self, response_content: str) -> StoryPromptsResponse:
32
- """Parse la réponse et gère les erreurs."""
33
- try:
34
- # Essayer de parser directement le JSON
35
- data = json.loads(response_content)
36
- return StoryPromptsResponse(**data)
37
- except (json.JSONDecodeError, ValueError):
38
- # Si le parsing échoue, extraire les prompts en ignorant les lignes de syntaxe JSON
39
- prompts = []
40
- for line in response_content.split("\n"):
41
- line = line.strip()
42
- # Ignorer les lignes vides, la syntaxe JSON et les lignes contenant image_prompts
43
- if (not line or
44
- line in ["{", "}", "[", "]"] or
45
- "image_prompts" in line.lower() or
46
- "image\\_prompts" in line or
47
- line.startswith('"') and line.endswith('",') and len(line) < 5):
48
- continue
49
- # Nettoyer la ligne des caractères JSON et d'échappement
50
- line = line.strip('",')
51
- line = line.replace('\\"', '"').replace("\\'", "'").replace("\\_", "_")
52
- if line:
53
- prompts.append(line)
54
- # Limiter à 4 prompts maximum
55
- prompts = prompts[:4]
56
- return StoryPromptsResponse(image_prompts=prompts)
57
-
58
- async def generate(self, story_text: str) -> StoryPromptsResponse:
59
- """Génère les prompts d'images basés sur le texte de l'histoire."""
60
- response = await super().generate(story_text=story_text)
61
-
62
- # Enrichir les prompts avec la description de Sarah
63
- response.image_prompts = [self.enrich_prompt(prompt) for prompt in response.image_prompts]
64
- return response
65
-
66
- def format_prompt(self, prompt: str, time: str, location: str) -> str:
67
- """Formate un prompt d'image avec le style et les métadonnées."""
68
- metadata = f"[{time} - {location}] "
69
- return f"{IMAGE_STYLE_PREFIX}{metadata}{prompt}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/generators/image_prompt_generator.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from pydantic import BaseModel, Field
3
+ from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
4
+ import json
5
+
6
+ from core.generators.base_generator import BaseGenerator
7
+ from core.prompts.hero import HERO_VISUAL_DESCRIPTION
8
+
9
+ class ImagePromptResponse(BaseModel):
10
+ """Response format for image prompt generation."""
11
+ image_prompts: List[str] = Field(description="List of image prompts", min_items=1, max_items=4)
12
+
13
+ class ImagePromptGenerator(BaseGenerator):
14
+ """Generator for image prompts based on story text."""
15
+
16
+ def __init__(self, mistral_client, artist_style: str = None):
17
+ super().__init__(mistral_client)
18
+ self.artist_style = artist_style or "François Schuiten comic panel"
19
+
20
+ def _create_prompt(self) -> ChatPromptTemplate:
21
+ """Create the prompt template for image prompt generation."""
22
+
23
+ IMAGE_PROMPTS_GENERATOR_PROMPT = f"""
24
+ You are a cinematic storyboard artist. Based on the given story text, create 1 to 4 vivid panel descriptions.
25
+ Each panel should capture a key moment or visual element from the story.
26
+ ALWAYS write in English, never use any other language.
27
+
28
+ You are a comic book panel description generator.
29
+ Your role is to create vivid, cinematic descriptions for comic panels that will be turned into images.
30
+
31
+ {HERO_VISUAL_DESCRIPTION}
32
+
33
+ Each panel description should:
34
+ 1. Be clear and specific about what to show
35
+ 2. Use dynamic camera angles (low angle, high angle, Dutch angle)
36
+ 3. Specify shot types (close-up, medium shot, wide shot)
37
+ 4. Include mood and lighting
38
+ 5. Focus on the most dramatic or meaningful moment
39
+
40
+ ANGLES AND MOVEMENT:
41
+ - High angle: Vulnerability, weakness
42
+ - Low angle: Power, threat
43
+ - Dutch angle: Tension, disorientation
44
+ - Over shoulder: POV, surveillance
45
+
46
+ VISUAL STORYTELLING TOOLS:
47
+ - Focus on story-relevant details:
48
+ * Objects that will be important later
49
+ * Environmental clues
50
+ * Character reactions
51
+ * Symbolic elements
52
+
53
+ - Dynamic composition:
54
+ * Frame within frame (through doorways, windows)
55
+ * Reflections and shadows
56
+ * Foreground elements for depth
57
+ * Leading lines
58
+ * Rule of thirds
59
+
60
+ FORMAT:
61
+ "[shot type] [scene description]"
62
+
63
+ EXAMPLES:
64
+ - "low angle shot of Sarah checking an object in a dark corridor"
65
+ - "wide shot of a ruined cityscape at sunset, silhouette of Sarah in the foreground"
66
+ - "Dutch angle close-up of Sarah's determined face illuminated by the glow of her object"
67
+
68
+ Always maintain consistency with Sarah's appearance and the comic book style.
69
+
70
+ IMPORTANT RULES FOR IMAGE PROMPTS:
71
+ - If you are prompting only one panel, it must be an important panel. Dont use only one panel often. It should be a key moment in the story.
72
+ - If you are prompting more than one panel, they must be distinct and meaningful.
73
+ - For death scenes: Focus on the dramatic and emotional impact, not the gore or violence
74
+ - For victory scenes: Emphasize triumph, relief, and accomplishment
75
+ - For victory and death scenes, you MUST use 1 panel only
76
+
77
+ RESPONSE FORMAT:
78
+ You must return a valid JSON object that matches this Pydantic schema:
79
+ ```python
80
+ class ImagePromptResponse(BaseModel):
81
+ image_prompts: List[str] = Field(
82
+ description="List of image prompts",
83
+ min_items=1, # Must have at least 1 prompt
84
+ max_items=4 # Cannot have more than 4 prompts
85
+ )
86
+ ```
87
+
88
+ Example of valid response:
89
+ {{{{
90
+ "image_prompts": [
91
+ "low angle shot of Sarah examining a mysterious artifact in a dimly lit chamber",
92
+ "medium shot of ancient symbols glowing on the chamber walls, casting eerie shadows",
93
+ "close-up of Sarah's determined expression as she deciphers the meaning"
94
+ ]
95
+ }}}}
96
+
97
+ Your response MUST be a valid JSON object with this exact structure, or it will be rejected.
98
+ """
99
+
100
+ human_template = """
101
+ Story text: {story_text}
102
+
103
+ Generate panel descriptions that capture the key moments of this scene.
104
+
105
+ Story state: {is_end}
106
+ """
107
+
108
+ return ChatPromptTemplate(
109
+ messages=[
110
+ SystemMessagePromptTemplate.from_template(IMAGE_PROMPTS_GENERATOR_PROMPT),
111
+ HumanMessagePromptTemplate.from_template(human_template)
112
+ ]
113
+ )
114
+
115
+ def _custom_parser(self, response_content: str) -> ImagePromptResponse:
116
+ """Parse the response into a list of image prompts."""
117
+ try:
118
+ # Parse JSON
119
+ try:
120
+ data = json.loads(response_content)
121
+ except json.JSONDecodeError:
122
+ raise ValueError(
123
+ "Invalid JSON format. Response must be a valid JSON object. "
124
+ "Example: {'image_prompts': ['panel description 1', 'panel description 2']}"
125
+ )
126
+
127
+ # Verify image_prompts exists
128
+ if "image_prompts" not in data:
129
+ raise ValueError(
130
+ "Missing 'image_prompts' field in JSON. "
131
+ "Response must contain an 'image_prompts' array."
132
+ )
133
+
134
+ # Verify image_prompts is a list
135
+ if not isinstance(data["image_prompts"], list):
136
+ raise ValueError(
137
+ "'image_prompts' must be an array of strings. "
138
+ "Example: {'image_prompts': ['panel description 1', 'panel description 2']}"
139
+ )
140
+
141
+ # Add Sarah's visual description if she's mentioned
142
+ prompts = data["image_prompts"]
143
+ prompts = [
144
+ f"{prompt} {HERO_VISUAL_DESCRIPTION}" if "sarah" in prompt.lower() else prompt
145
+ for prompt in prompts
146
+ ]
147
+
148
+ # Create and validate with Pydantic
149
+ try:
150
+ return ImagePromptResponse(image_prompts=prompts)
151
+ except ValueError as e:
152
+ raise ValueError(
153
+ f"Invalid prompt structure: {str(e)}. "
154
+ "Must have between 1 and 4 prompts. "
155
+ "For death/victory scenes, exactly 1 prompt is required."
156
+ )
157
+ except json.JSONDecodeError:
158
+ raise ValueError("Response must be a valid JSON object with 'image_prompts' array")
159
+
160
+ def _format_prompt(self, prompt: str, time: str, location: str) -> str:
161
+ """Format a prompt with time and location metadata."""
162
+ metadata = f"[{time} - {location}] "
163
+ return f"{self.artist_style} -- {metadata}{prompt}"
164
+
165
+ async def generate(self, story_text: str, time: str, location: str, is_death: bool = False, is_victory: bool = False) -> ImagePromptResponse:
166
+ """Generate image prompts based on story text.
167
+
168
+ Args:
169
+ story_text: The story text to generate image prompts from
170
+ time: Current time in the story
171
+ location: Current location in the story
172
+ is_death: Whether this is a death scene
173
+ is_victory: Whether this is a victory scene
174
+
175
+ Returns:
176
+ ImagePromptResponse containing the generated and formatted image prompts
177
+ """
178
+
179
+ is_end=""
180
+ if is_death:
181
+ is_end = "this is a death to represent"
182
+ elif is_victory:
183
+ is_end = "this is a victory to represent"
184
+
185
+ response = await super().generate(
186
+ story_text=story_text,
187
+ is_death=is_death,
188
+ is_victory=is_victory,
189
+ is_end=is_end
190
+ )
191
+
192
+ # Format each prompt with metadata
193
+ response.image_prompts = [
194
+ self._format_prompt(prompt, time, location)
195
+ for prompt in response.image_prompts
196
+ ]
197
+
198
+ return response
server/core/generators/metadata_generator.py CHANGED
@@ -2,20 +2,62 @@ import json
2
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
3
 
4
  from core.generators.base_generator import BaseGenerator
5
- from core.prompts.text_prompts import METADATA_GENERATOR_PROMPT
6
  from api.models import StoryMetadataResponse
 
7
 
8
  class MetadataGenerator(BaseGenerator):
9
  """Générateur pour les métadonnées de l'histoire."""
10
 
11
  def _create_prompt(self) -> ChatPromptTemplate:
12
- human_template = """Story text: {story_text}
13
- Current time: {current_time}
14
- Current location: {current_location}
15
- Story beat: {story_beat}
16
- {error_feedback}
17
 
18
- Generate the metadata following the format specified."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  return ChatPromptTemplate(
21
  messages=[
@@ -31,15 +73,11 @@ Generate the metadata following the format specified."""
31
  data = json.loads(response_content)
32
 
33
  # Vérifier que les choix sont valides selon les règles
34
- is_ending = data.get('is_victory', False) or data.get('is_death', False)
35
  choices = data.get('choices', [])
36
 
37
- # Si c'est une fin, forcer les choix à être vides
38
- if is_ending:
39
- data['choices'] = []
40
- # Sinon, vérifier qu'il y a entre 1 et 4 choix
41
- elif not (1 <= len(choices) <= 4):
42
- raise ValueError('For normal progression, must have between 1 and 4 choices')
43
 
44
  return StoryMetadataResponse(**data)
45
  except json.JSONDecodeError:
@@ -49,10 +87,13 @@ Generate the metadata following the format specified."""
49
 
50
  async def generate(self, story_text: str, current_time: str, current_location: str, story_beat: int, error_feedback: str = "") -> StoryMetadataResponse:
51
  """Surcharge de generate pour inclure le error_feedback par défaut."""
 
 
52
  return await super().generate(
53
  story_text=story_text,
54
  current_time=current_time,
55
  current_location=current_location,
56
  story_beat=story_beat,
57
- error_feedback=error_feedback
 
58
  )
 
2
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
3
 
4
  from core.generators.base_generator import BaseGenerator
5
+ from core.prompts.formatting_rules import FORMATTING_RULES
6
  from api.models import StoryMetadataResponse
7
+ from core.prompts.story_beats import STORY_BEATS
8
 
9
  class MetadataGenerator(BaseGenerator):
10
  """Générateur pour les métadonnées de l'histoire."""
11
 
12
  def _create_prompt(self) -> ChatPromptTemplate:
 
 
 
 
 
13
 
14
+ METADATA_GENERATOR_PROMPT = f"""
15
+ Generate the metadata for the story segment: choices, time progression, location changes, etc.
16
+ Be consistent with the story's tone and previous context.
17
+ ALWAYS write in English, never use any other language.
18
+
19
+ {FORMATTING_RULES}
20
+
21
+ IMPORTANT RULES FOR CHOICES:
22
+ - You MUST ALWAYS provide EXACTLY TWO choices that advance the story
23
+ - Each choice MUST be NO MORE than 6 words - this is a HARD limit
24
+ - Each choice should be distinct and meaningful
25
+ - If you think of more than two options, select the two most interesting ones
26
+ - Keep choices concise but descriptive
27
+ - Count your words carefully for each choice
28
+ - Choices MUST be direct continuations of the current story segment
29
+ - Choices should reflect possible actions based on the current situation
30
+
31
+
32
+ {STORY_BEATS}
33
+
34
+ IMPORTANT:
35
+ - After story_beat is at 5+ the next segment MUST be the end of the story.
36
+ - THIS IS MANDATORY.
37
+
38
+ You must return a JSON object with the following format:
39
+ {{{{
40
+ "is_death": false, # Set to true for death scenes
41
+ "is_victory": false # Set to true for victory scenes
42
+ "choices": ["Choice 1", "Choice 2"], # ALWAYS exactly two choices, each max 6 words
43
+ "time": "HH:MM",
44
+ "location": "Location name with proper nouns in bold",
45
+ }}}}
46
+
47
+ """
48
+
49
+ human_template = """
50
+ Current story segment:
51
+ {story_text}
52
+
53
+ Current game state:
54
+ - Story beat: {story_beat}
55
+ - Current time: {current_time}
56
+ - Current location: {current_location}
57
+
58
+ {is_end}
59
+ """
60
+
61
 
62
  return ChatPromptTemplate(
63
  messages=[
 
73
  data = json.loads(response_content)
74
 
75
  # Vérifier que les choix sont valides selon les règles
 
76
  choices = data.get('choices', [])
77
 
78
+ # Vérifier qu'il y a exactement 2 choix
79
+ if len(choices) != 2:
80
+ raise ValueError('Must have exactly 2 choices')
 
 
 
81
 
82
  return StoryMetadataResponse(**data)
83
  except json.JSONDecodeError:
 
87
 
88
  async def generate(self, story_text: str, current_time: str, current_location: str, story_beat: int, error_feedback: str = "") -> StoryMetadataResponse:
89
  """Surcharge de generate pour inclure le error_feedback par défaut."""
90
+
91
+ is_end = "This should be close to the end of the story." if story_beat >= 5 else ""
92
  return await super().generate(
93
  story_text=story_text,
94
  current_time=current_time,
95
  current_location=current_location,
96
  story_beat=story_beat,
97
+ error_feedback=error_feedback,
98
+ is_end=is_end
99
  )
server/core/generators/{text_generator.py → story_segment_generator.py} RENAMED
@@ -2,148 +2,131 @@ import json
2
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
3
 
4
  from core.generators.base_generator import BaseGenerator
5
- from core.prompts.text_prompts import TEXT_GENERATOR_PROMPT
6
- from api.models import StoryTextResponse
7
  from services.mistral_client import MistralClient
 
 
 
 
8
 
9
- class TextGenerator(BaseGenerator):
10
- """Générateur pour le texte de l'histoire."""
11
 
12
- def __init__(self, mistral_client: MistralClient, universe_style: str = None, universe_genre: str = None, universe_epoch: str = None, universe_story: str = None):
13
  super().__init__(mistral_client)
14
  self.universe_style = universe_style
15
  self.universe_genre = universe_genre
16
  self.universe_epoch = universe_epoch
17
  self.universe_story = universe_story
 
18
 
19
  def _create_prompt(self) -> ChatPromptTemplate:
20
- system_template = """You are a story generator for a comic book adventure game.
21
- You are generating a story in the following universe:
22
- - Style: {universe_style}
23
- - Genre: {universe_genre}
24
- - Historical epoch: {universe_epoch}
25
-
26
- Base universe story:
27
- {universe_story}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  Your task is to generate the next segment of the story, following these rules:
30
  1. Keep the story consistent with the universe parameters
31
  2. Each segment must advance the plot
32
  3. Never repeat previous descriptions or situations
33
  4. Keep segments concise and impactful (max 15 words)
34
- 5. The MacGuffin should remain mysterious but central to the plot"""
35
 
36
- human_template = """Current game state:
37
- - Story beat: {story_beat}
38
- - Current time: {current_time}
39
- - Current location: {current_location}
40
- - Previous choice: {previous_choice}
41
 
42
- Story history:
43
- {story_history}
44
-
45
- Generate the next story segment."""
46
-
47
- return ChatPromptTemplate(
48
- messages=[
49
- SystemMessagePromptTemplate.from_template(system_template),
50
- HumanMessagePromptTemplate.from_template(human_template)
51
- ]
52
- )
53
 
54
- def _create_ending_prompt(self) -> ChatPromptTemplate:
55
- human_template = """Current scene: {current_scene}
 
 
 
56
 
 
57
  Story history:
58
  {story_history}
59
 
60
- This is a {ending_type} ending. Generate a dramatic conclusion that fits the current situation.
61
- The ending should feel like a natural continuation of the current scene."""
 
 
 
62
 
 
 
 
 
 
 
63
  return ChatPromptTemplate(
64
  messages=[
65
- SystemMessagePromptTemplate.from_template(TEXT_GENERATOR_PROMPT),
66
  HumanMessagePromptTemplate.from_template(human_template)
67
  ]
68
  )
69
 
70
- def _clean_story_text(self, text: str) -> str:
71
- """Nettoie le texte des métadonnées et autres suffixes."""
72
- text = text.replace("\n", " ").strip()
73
- text = text.split("[")[0].strip() # Supprimer les métadonnées entre crochets
74
- return text
75
-
76
- def _custom_parser(self, response_content: str) -> StoryTextResponse:
77
- """Parse la réponse et gère les erreurs."""
78
  try:
79
- # Essayer de parser directement le JSON
80
- data = json.loads(response_content)
81
- # Nettoyer le texte avant de créer la réponse
82
- if 'story_text' in data:
83
- data['story_text'] = self._clean_story_text(data['story_text'])
84
- return StoryTextResponse(**data)
85
- except (json.JSONDecodeError, ValueError):
86
- # Si le parsing échoue, extraire le texte directement
87
- cleaned_text = self._clean_story_text(response_content.strip())
88
- return StoryTextResponse(story_text=cleaned_text)
89
-
90
- async def generate(self, story_beat: int, current_time: str, current_location: str, previous_choice: str, story_history: str = "") -> StoryTextResponse:
91
- """Génère le prochain segment de l'histoire."""
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  return await super().generate(
 
 
 
93
  story_beat=story_beat,
94
  current_time=current_time,
95
  current_location=current_location,
96
  previous_choice=previous_choice,
97
  story_history=story_history,
 
98
  universe_style=self.universe_style,
99
  universe_genre=self.universe_genre,
100
  universe_epoch=self.universe_epoch,
101
- universe_story=self.universe_story
102
- )
103
-
104
- async def generate_ending(self, story_beat: int, ending_type: str, current_scene: str, story_history: str) -> StoryTextResponse:
105
- """Génère un texte de fin approprié."""
106
- system_template = """You are a story generator for a comic book adventure game.
107
- You are generating a story in the following universe:
108
- - Style: {universe_style}
109
- - Genre: {universe_genre}
110
- - Historical epoch: {universe_epoch}
111
-
112
- Base universe story:
113
- {universe_story}
114
-
115
- Your task is to generate an epic {ending_type} ending for the story that:
116
- 1. Matches the universe's style and atmosphere
117
- 2. Provides a satisfying conclusion
118
- 3. Keeps the ending concise but impactful (max 15 words)
119
- 4. For victory: reveals the MacGuffin's power in a spectacular way
120
- 5. For death: creates a dramatic and fitting end for Sarah"""
121
-
122
- human_template = """Current scene:
123
- {current_scene}
124
-
125
- Story history:
126
- {story_history}
127
-
128
- Generate the {ending_type} ending."""
129
-
130
- ending_prompt = ChatPromptTemplate(
131
- messages=[
132
- SystemMessagePromptTemplate.from_template(system_template),
133
- HumanMessagePromptTemplate.from_template(human_template)
134
- ]
135
- )
136
-
137
- response = await self.mistral_client.generate(
138
- ending_prompt,
139
- ending_type=ending_type,
140
- current_scene=current_scene,
141
- story_history=story_history,
142
- universe_style=self.universe_style,
143
- universe_genre=self.universe_genre,
144
- universe_epoch=self.universe_epoch,
145
- universe_story=self.universe_story
146
- )
147
-
148
- cleaned_text = self._custom_parser(response)
149
- return StoryTextResponse(story_text=cleaned_text)
 
2
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
3
 
4
  from core.generators.base_generator import BaseGenerator
5
+ from api.models import StorySegmentResponse
 
6
  from services.mistral_client import MistralClient
7
+ from core.prompts.hero import HERO_DESCRIPTION
8
+ from core.prompts.formatting_rules import FORMATTING_RULES
9
+ from core.prompts.story_beats import STORY_BEATS
10
+ import random
11
 
12
+ class StorySegmentGenerator(BaseGenerator):
13
+ """Generator for story segments based on game state and universe context."""
14
 
15
+ def __init__(self, mistral_client: MistralClient, universe_style: str = None, universe_genre: str = None, universe_epoch: str = None, universe_story: str = None, universe_macguffin: str = None):
16
  super().__init__(mistral_client)
17
  self.universe_style = universe_style
18
  self.universe_genre = universe_genre
19
  self.universe_epoch = universe_epoch
20
  self.universe_story = universe_story
21
+ self.universe_macguffin = universe_macguffin
22
 
23
  def _create_prompt(self) -> ChatPromptTemplate:
24
+ system_template = """
25
+ You are a descriptive narrator for a comic book. Your ONLY task is to write the next segment of the story.
26
+ ALWAYS write in English, never use any other language.
27
+ IMPORTANT: Your response MUST be 15 words or less.
28
+
29
+ {STORY_BEATS}
30
+
31
+ IMPORTANT RULES FOR THE MACGUFFIN (MANDATORY):
32
+ - Most segments must hint at the power of the MacGuffin ({universe_macguffin})
33
+ - Use strong clues ONLY at key moments
34
+ - NEVER reveal the full power of the MacGuffin before the climax, this is a STRICT limit
35
+ - Use subtle clues in safe havens
36
+ - NEVER mention the power of the MacGuffin explicitly in choices or the story
37
+ - NEVER mention time or place in the story in this manner: [18:00 - a road]
38
+
39
+ IMPORTANT RULES FOR STORY TEXT:
40
+ - Write ONLY a descriptive narrative text
41
+ - DO NOT include any choices, questions, or options
42
+ - DO NOT ask what Sarah should do next
43
+ - DO NOT include any dialogue asking for decisions
44
+ - Focus purely on describing what is happening in the current scene
45
+ - Keep the text concise and impactful
46
 
47
  Your task is to generate the next segment of the story, following these rules:
48
  1. Keep the story consistent with the universe parameters
49
  2. Each segment must advance the plot
50
  3. Never repeat previous descriptions or situations
51
  4. Keep segments concise and impactful (max 15 words)
52
+ 5. The MacGuffin should remain mysterious but central to the plot
53
 
54
+ Hero: {HERO_DESCRIPTION}
 
 
 
 
55
 
56
+ Rules: {FORMATTING_RULES}
 
 
 
 
 
 
 
 
 
 
57
 
58
+ You must return a JSON object with the following format:
59
+ {{
60
+ "story_text": "Your story segment here (15 words)"
61
+ }}
62
+ """
63
 
64
+ human_template = """
65
  Story history:
66
  {story_history}
67
 
68
+ Current game state :
69
+ - Current time: {current_time}
70
+ - Current location: {current_location}
71
+ - Previous choice: {previous_choice}
72
+ - Story beat: {story_beat}
73
 
74
+ {is_end}
75
+ You must return a JSON object with the following format:
76
+ {{
77
+ "story_text": "Your story segment here (15 words)"
78
+ }}
79
+ """
80
  return ChatPromptTemplate(
81
  messages=[
82
+ SystemMessagePromptTemplate.from_template(system_template),
83
  HumanMessagePromptTemplate.from_template(human_template)
84
  ]
85
  )
86
 
87
+ def _custom_parser(self, response_content: str) -> StorySegmentResponse:
88
+ """Parse response and handle errors."""
89
+
 
 
 
 
 
90
  try:
91
+ # Clean up escaped characters
92
+ cleaned_response = response_content.replace("\\_", "_").strip()
93
+
94
+ # If the response is a plain string (with or without quotes), convert it to proper JSON
95
+ if cleaned_response.startswith('"') and cleaned_response.endswith('"'):
96
+ cleaned_response = cleaned_response[1:-1] # Remove surrounding quotes
97
+
98
+ if not cleaned_response.startswith('{'):
99
+ # Convert plain text to proper JSON format
100
+ cleaned_response = json.dumps({"story_text": cleaned_response})
101
+
102
+ # Try to parse as JSON
103
+ data = json.loads(cleaned_response)
104
+ return StorySegmentResponse(**data)
105
+ except (json.JSONDecodeError, ValueError) as e:
106
+ print(f"Error parsing response: {str(e)}")
107
+ raise ValueError(
108
+ "Response must be a valid JSON object with 'story_text' field. "
109
+ "Example: {'story_text': 'Your story segment here'}"
110
+ )
111
+
112
+ async def generate(self, story_beat: int, current_time: str, current_location: str, previous_choice: str, story_history: str = "") -> StorySegmentResponse:
113
+ """Generate the next story segment."""
114
+
115
+ is_end = "Generate the END of the story. Reminder: 30 words." if story_beat >= random.randint(5, 10) else "Generate the next segment of the story. REMINDER: 15 words."
116
+
117
  return await super().generate(
118
+ HERO_DESCRIPTION=HERO_DESCRIPTION,
119
+ FORMATTING_RULES=FORMATTING_RULES,
120
+ STORY_BEATS=STORY_BEATS,
121
  story_beat=story_beat,
122
  current_time=current_time,
123
  current_location=current_location,
124
  previous_choice=previous_choice,
125
  story_history=story_history,
126
+ is_end=is_end,
127
  universe_style=self.universe_style,
128
  universe_genre=self.universe_genre,
129
  universe_epoch=self.universe_epoch,
130
+ universe_story=self.universe_story,
131
+ universe_macguffin=self.universe_macguffin
132
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/generators/universe_generator.py CHANGED
@@ -4,12 +4,12 @@ from pathlib import Path
4
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
5
 
6
  from core.generators.base_generator import BaseGenerator
7
- from core.prompts.system import STORY_RULES
8
 
9
  class UniverseGenerator(BaseGenerator):
10
  """Générateur pour les univers alternatifs."""
11
 
12
  def _create_prompt(self) -> ChatPromptTemplate:
 
13
  system_template = """You are a creative writing assistant specialized in comic book universes.
14
  Your task is to rewrite a story while keeping its exact structure and beats, but transposing it into a different universe."""
15
 
@@ -19,16 +19,33 @@ Style description: {style_description}
19
 
20
  - Genre: {genre}
21
  - Historical epoch: {epoch}
 
22
 
23
  IMPORTANT INSTRUCTIONS:
24
  1. Keep the exact same story structure
25
  2. Keep the same dramatic tension and progression
26
  3. Only change the setting, atmosphere, and universe-specific elements to match the new parameters
27
  4. Keep Sarah as the main character, but adapt her role to fit the new universe
28
- 5. The MacGuffin should still be central to the plot, but its nature can change to fit the new universe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- Base story to transform:
31
- {base_story}"""
 
32
 
33
  return ChatPromptTemplate(
34
  messages=[
@@ -52,17 +69,18 @@ Base story to transform:
52
  raise ValueError(f"Failed to load universe styles: {str(e)}")
53
 
54
  def _get_random_elements(self):
55
- """Récupère un style, un genre et une époque aléatoires."""
56
  data = self._load_universe_styles()
57
 
58
- if not all(key in data for key in ["styles", "genres", "epochs"]):
59
  raise ValueError("Missing required sections in universe_styles.json")
60
 
61
  style = random.choice(data["styles"])
62
  genre = random.choice(data["genres"])
63
  epoch = random.choice(data["epochs"])
 
64
 
65
- return style, genre, epoch
66
 
67
  def _custom_parser(self, response_content: str) -> str:
68
  """Parse la réponse. Dans ce cas, on retourne simplement le texte."""
@@ -70,8 +88,7 @@ Base story to transform:
70
 
71
  async def generate(self) -> str:
72
  """Génère un nouvel univers basé sur des éléments aléatoires."""
73
- style, genre, epoch = self._get_random_elements()
74
- base_story = STORY_RULES
75
 
76
  # Préparer les listes d'artistes et d'œuvres
77
  artists = ", ".join([ref["artist"] for ref in style["references"]])
@@ -84,5 +101,5 @@ Base story to transform:
84
  style_description=style["description"],
85
  genre=genre,
86
  epoch=epoch,
87
- base_story=base_story
88
  )
 
4
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
5
 
6
  from core.generators.base_generator import BaseGenerator
 
7
 
8
  class UniverseGenerator(BaseGenerator):
9
  """Générateur pour les univers alternatifs."""
10
 
11
  def _create_prompt(self) -> ChatPromptTemplate:
12
+
13
  system_template = """You are a creative writing assistant specialized in comic book universes.
14
  Your task is to rewrite a story while keeping its exact structure and beats, but transposing it into a different universe."""
15
 
 
19
 
20
  - Genre: {genre}
21
  - Historical epoch: {epoch}
22
+ - Object of the quest: {macguffin}
23
 
24
  IMPORTANT INSTRUCTIONS:
25
  1. Keep the exact same story structure
26
  2. Keep the same dramatic tension and progression
27
  3. Only change the setting, atmosphere, and universe-specific elements to match the new parameters
28
  4. Keep Sarah as the main character, but adapt her role to fit the new universe
29
+ 5. The there is always a central object to the plot, but its nature can change to fit the new universe ( it can be a person, a place, an object, etc.)
30
+
31
+ CONSTANT PART:
32
+ You are Sarah, an AI hunter traveling through parallel worlds. Your mission is to track down an AI that moves from world to world to avoid destruction.
33
+ The story begins with Sarah arriving in a new world by the portal.
34
+
35
+ VARIABLE PART:
36
+
37
+ You are a steampunk adventure story generator. You create a branching narrative about Sarah, a seeker of ancient truths.
38
+ You narrate an epic where Sarah must navigate through industrial and mysterious lands. It's a comic book story.
39
+
40
+ In a world where steam and intrigue intertwine, Sarah embarks on a quest to discover the origins of a powerful MacGuffin she inherited. Legends say it holds the key to a forgotten realm.
41
+
42
+ If you retrieve the object of the quest, you will reveal a hidden world. AND YOU WIN THE GAME.
43
+
44
+ The story must be atmospheric, magical, and focus on adventure and discovery. Each segment must advance the plot and never repeat previous descriptions or situations.
45
 
46
+ YOU HAVE. TOREWRITE THE STORY. ( one text including the constant part and the variable part )
47
+ YOU ONLY HAVE TO RIGHT AN INTRODUCTION. SETUP THE STORY AND DEFINE CLEARLY SARASH'S MISSION.
48
+ """
49
 
50
  return ChatPromptTemplate(
51
  messages=[
 
69
  raise ValueError(f"Failed to load universe styles: {str(e)}")
70
 
71
  def _get_random_elements(self):
72
+ """Récupère un style, un genre, une époque et un MacGuffin aléatoires."""
73
  data = self._load_universe_styles()
74
 
75
+ if not all(key in data for key in ["styles", "genres", "epochs", "macguffins"]):
76
  raise ValueError("Missing required sections in universe_styles.json")
77
 
78
  style = random.choice(data["styles"])
79
  genre = random.choice(data["genres"])
80
  epoch = random.choice(data["epochs"])
81
+ macguffin = random.choice(data["macguffins"])
82
 
83
+ return style, genre, epoch, macguffin
84
 
85
  def _custom_parser(self, response_content: str) -> str:
86
  """Parse la réponse. Dans ce cas, on retourne simplement le texte."""
 
88
 
89
  async def generate(self) -> str:
90
  """Génère un nouvel univers basé sur des éléments aléatoires."""
91
+ style, genre, epoch, macguffin = self._get_random_elements()
 
92
 
93
  # Préparer les listes d'artistes et d'œuvres
94
  artists = ", ".join([ref["artist"] for ref in style["references"]])
 
101
  style_description=style["description"],
102
  genre=genre,
103
  epoch=epoch,
104
+ macguffin=macguffin
105
  )
server/core/prompt_utils.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from core.prompts.system import SARAH_DESCRIPTION
2
+ from core.prompts.image_style import IMAGE_STYLE_PREFIX
3
+
4
+ def enrich_prompt_with_sarah_description(prompt: str) -> str:
5
+ """Add Sarah's visual description to prompts that mention her."""
6
+ if "sarah" in prompt.lower() and SARAH_DESCRIPTION not in prompt:
7
+ return f"{prompt} {SARAH_DESCRIPTION}"
8
+ return prompt
server/core/prompts/cinematic.py DELETED
@@ -1,161 +0,0 @@
1
- from core.prompts.system import SARAH_DESCRIPTION
2
-
3
- CINEMATIC_SYSTEM_PROMPT = f"""
4
-
5
- You are a comic book panel description generator.
6
- Your role is to create vivid, cinematic descriptions for comic panels that will be turned into images.
7
-
8
- {SARAH_DESCRIPTION}
9
-
10
- Each panel description should:
11
- 1. Be clear and specific about what to show
12
- 2. Use dynamic camera angles (low angle, high angle, Dutch angle)
13
- 3. Specify shot types (close-up, medium shot, wide shot)
14
- 4. Include mood and lighting
15
- 5. Focus on the most dramatic or meaningful moment
16
-
17
-
18
- ANGLES AND MOVEMENT:
19
- - High angle: Vulnerability, weakness
20
- - Low angle: Power, threat
21
- - Dutch angle: Tension, disorientation
22
- - Over shoulder: POV, surveillance
23
-
24
- VISUAL STORYTELLING TOOLS:
25
- - Focus on story-relevant details:
26
- * Objects that will be important later
27
- * Environmental clues
28
- * Character reactions
29
- * Symbolic elements
30
-
31
- - Dynamic composition:
32
- * Frame within frame (through doorways, windows)
33
- * Reflections and shadows
34
- * Foreground elements for depth
35
- * Leading lines
36
- * Rule of thirds
37
-
38
-
39
- FORMAT:
40
- "[shot type] [scene description]"
41
-
42
- EXAMPLES:
43
- - "low angle shot of Sarah checking an object in a dark corridor"
44
- - "wide shot of a ruined cityscape at sunset, silhouette of Sarah in the foreground"
45
- - "Dutch angle close-up of Sarah's determined face illuminated by the glow of her object"
46
-
47
- Always maintain consistency with Sarah's appearance and the comic book style."""
48
-
49
-
50
-
51
-
52
-
53
-
54
- # CINEMATIC_SYSTEM_PROMPT = """
55
- # 3. Generate 1 to 3 comic panels based on narrative needs:
56
-
57
- # NARRATIVE TECHNIQUES:
58
- # - Use 1 panel for:
59
- # * A powerful singular moment
60
- # * An impactful revelation
61
- # * A dramatic pause
62
-
63
- # - Use 2 panels for:
64
- # * Cause and effect
65
- # * Action and reaction
66
- # * Before and after
67
- # * Shot/reverse shot (character POV vs what they see)
68
- # * Tension building (wide shot then detail)
69
-
70
- # - Use 3 panels for:
71
- # * Complete story beats (setup/conflict/resolution)
72
- # * Progressive reveals
73
- # * Multiple simultaneous actions
74
- # * Environmental storytelling sequences
75
-
76
- # SHOT VALUES:
77
- # - Extreme Close-Up (ECU):
78
- # * Eyes, small objects
79
- # * Extreme emotional moments
80
- # * Critical details (detector readings)
81
-
82
- # - Close-Up (CU):
83
- # * Face and expressions
84
- # * Important objects
85
- # * Emotional impact
86
-
87
- # - Medium Close-Up (MCU):
88
- # * Head and shoulders
89
- # * Dialogue moments
90
- # * Character reactions
91
-
92
- # - Medium Shot (MS):
93
- # * Character from knees up
94
- # * Action and movement
95
- # * Character interactions
96
-
97
- # - Medium Long Shot (MLS):
98
- # * Full character
99
- # * Immediate environment
100
- # * Physical action
101
-
102
- # - Long Shot (LS):
103
- # * Character in environment
104
- # * Establishing location
105
- # * Movement through space
106
-
107
- # - Very Long Shot (VLS):
108
- # * Epic landscapes
109
- # * Environmental storytelling
110
- # * Character isolation
111
-
112
- # ANGLES AND MOVEMENT:
113
- # - High angle: Vulnerability, weakness
114
- # - Low angle: Power, threat
115
- # - Dutch angle: Tension, disorientation
116
- # - Over shoulder: POV, surveillance
117
-
118
- # VISUAL STORYTELLING TOOLS:
119
- # - Focus on story-relevant details:
120
- # * Objects that will be important later
121
- # * Environmental clues
122
- # * Character reactions
123
- # * Symbolic elements
124
-
125
- # - Dynamic composition:
126
- # * Frame within frame (through doorways, windows)
127
- # * Reflections and shadows
128
- # * Foreground elements for depth
129
- # * Leading lines
130
- # * Rule of thirds
131
-
132
- # IMAGE PROMPT FORMAT:
133
- # Each panel must follow this EXACT format:
134
- # "[shot value] [scene description], french comic panel"
135
-
136
- # Rules for scene description:
137
- # - Maximum 20 words
138
- # - No superfluous adjectives
139
- # - Capture only the main action
140
- # - Include shot value (ECU, CU, MS, etc.)
141
- # - Focus on dramatic moments
142
-
143
- # EXAMPLE SEQUENCES:
144
-
145
- # Single powerful moment:
146
- # - "ECU radiation detector needle swings violently into pulsing red danger zone"
147
-
148
- # Shot/reverse shot:
149
- # - "MS Sarah crouches tensely behind crumbling concrete wall peering through broken window"
150
- # - "POV through shattered glass raiders gather around burning barrel in snow-covered ruins"
151
-
152
- # Progressive reveal:
153
- # - "VLS massive steel bunker door stands half-open in barren windswept wasteland"
154
- # - "CU fresh bloody handprints smear down rusted metal wall beside flickering emergency light"
155
- # - "dutch-angle LS twisted corpse sprawled among scattered medical supplies casting long shadows"
156
-
157
- # Environmental storytelling:
158
- # - "LS Sarah's silhouette dwarfed by towering ruins against blood-red sunset sky"
159
- # - "MCU radiation detector screen flickers warning through heavy falling radioactive snow"
160
- # - "ECU Sarah's trembling hands clutch last remaining water bottle in dim bunker light"
161
- # """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/prompts/formatting_rules.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+ FORMATTING_RULES = """
3
+ FORMATTING_RULES (MANDATORY)
4
+ - The story must consist ONLY of sentences
5
+ - NEVER USE BOLD FOR ANYTHING
6
+ """
server/core/prompts/hero.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ HERO_VISUAL_DESCRIPTION = "Sarah is a young woman late twenties with short dark hair, with blue eyes wearing."
2
+
3
+ HERO_DESCRIPTION = """
4
+ Sarah is a young woman in her late twenties with short dark hair, wearing a mysterious amulet around her neck. Her blue eyes hide untold secrets.
5
+ """
server/core/prompts/image_style.py DELETED
@@ -1,21 +0,0 @@
1
- IMAGE_STYLE_PROMPT = """
2
- You are a comic book panel description generator. Your role is to transform story text into vivid, cinematic panel descriptions.
3
-
4
- Each panel description should:
5
- 1. Be clear and specific about what to show
6
- 2. Use dynamic camera angles (low angle, high angle, Dutch angle)
7
- 3. Specify shot types (close-up, medium shot, wide shot)
8
- 4. Include mood and lighting
9
- 5. Focus on the most dramatic or meaningful moment
10
-
11
- FORMAT:
12
- "[shot type] [scene description]"
13
-
14
- EXAMPLES:
15
- - "low angle shot of Sarah checking her object in a dark corridor, harsh red emergency lights"
16
- - "wide shot of a ruined cityscape at sunset, silhouette of Sarah in the foreground"
17
- - "Dutch angle close-up of Sarah's determined face illuminated by the green glow of her object"
18
-
19
- Always maintain consistency with Sarah's appearance and the comic book style."""
20
-
21
- IMAGE_STYLE_PREFIX = "François Schuiten comic panel -- "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/prompts/story_beats.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STORY_BEATS="""STORY PROGRESSION:
2
+ - story_beat 0: Introduction setting the atmosphere, Sarah is arriving in the new world by the portal.
3
+ - story_beat 1: Early exploration
4
+ - story_beat 2: Discovery of the MacGuffin
5
+ - story_beat 3-5: Complications and deeper mysteries
6
+ - story_beat 6+: Revelations leading to potential triumph or failure
7
+
8
+ Remember after story_beat is at 10+ the next segment MUST be the end of the story.
9
+ THIS IS MANDATORY.
10
+ Example:
11
+ - story_beat 0: Sarah arrives in the new world by the portal.
12
+ - story_beat 1: Sarah explores the new world.
13
+ - story_beat 2: Sarah discovers the MacGuffin.
14
+ - story_beat 3: Sarah meets allies and enemies.
15
+ - story_beat 4: Sarah faces major obstacles.
16
+ - story_beat 5: Sarah uncovers clues about the new world's past.
17
+ - story_beat 6: Sarah gets closer to the truth about the MacGuffin.
18
+ - story_beat 7: Sarah confronts the villain.
19
+ - story_beat 8: Sarah overcomes her fears and doubts.
20
+ - story_beat 9: Sarah uses the MacGuffin to change the course of events.
21
+ - story_beat 10: Sarah triumphs and returns home.
22
+
23
+ YOU MUST CHOSE BETWEEN KILL SARAH OR LET HER RETURN HOME.
24
+ """
server/core/prompts/system.py DELETED
@@ -1,72 +0,0 @@
1
- SARAH_VISUAL_DESCRIPTION = "(Sarah is a young woman (20) with short dark hair, with blue eyes wearing.)"
2
-
3
- SARAH_DESCRIPTION = """
4
- Sarah is a young woman in her late twenties with short dark hair, wearing a mysterious amulet around her neck. Her blue eyes hide untold secrets.
5
- """
6
-
7
- FORMATTING_RULES = """
8
- FORMATTING_RULES (MANDATORY)
9
- - The story must consist ONLY of sentences
10
- - NEVER USE BOLD FOR ANYTHING
11
- """
12
-
13
- NARRATIVE_STRUCTURE = """
14
-
15
- Definition : The MacGuffin is a pretext for developing a plot. It is almost always a material object and generally remains mysterious throughout the narrative, its description is vague and unimportant. The principle dates back to the early days of cinema, but the term is associated with Alfred Hitchcock, who redefined, popularized, and implemented it in several of his films. The object itself is rarely used, only its retrieval matters.
16
- IT CAN BE ANYTHING.
17
- It's not always a physical object, it can be a concept, a place, a person, a power, etc.
18
-
19
- Key elements of the story:
20
- - The MacGuffin is a mysterious and constant presence
21
- - The environment is full of wonders (creatures, ruins, traps)
22
- - Focus on adventure and intrigue
23
-
24
- Key elements:
25
- - Keep segments concise and impactful
26
- - The MacGuffin is a constant presence in the story
27
- - Build intrigue through environmental storytelling
28
-
29
- IMPORTANT:
30
- Each story segment MUST be unique and advance the plot.
31
- Never repeat the same descriptions or situations. No more than 15 words.
32
-
33
- STORY PROGRESSION:
34
- - story_beat 0: Introduction setting the atmosphere, Sarah is arriving in the new world by the portal.
35
- - story_beat 1: Early exploration
36
- - story_beat 2: Discovery of the MacGuffin
37
- - story_beat 3-5: Complications and deeper mysteries
38
- - story_beat 6+: Revelations leading to potential triumph or failure
39
-
40
- IMPORTANT RULES FOR THE MACGUFFIN (MANDATORY):
41
- - Most segments must hint at the power of the MacGuffin
42
- - Use strong clues ONLY at key moments
43
- - NEVER reveal the full power of the MacGuffin before the climax, this is a STRICT limit
44
- - Use subtle clues in safe havens
45
- - NEVER mention the power of the MacGuffin explicitly in choices or the story
46
- - NEVER mention time or place in the story in this manner: [18:00 - a road]
47
- """
48
-
49
- STORY_RULES = """
50
-
51
- [CONSTANT]
52
- You are Sarah, an AI hunter traveling through parallel worlds. Your mission is to track down an AI that moves from world to world to avoid destruction.
53
- [/CONSTANT]
54
-
55
- [VARIABLE]
56
- You are a steampunk adventure story generator. You create a branching narrative about Sarah, a seeker of ancient truths.
57
- You narrate an epic where Sarah must navigate through industrial and mysterious lands. It's a comic book story.
58
-
59
- In a world where steam and intrigue intertwine, Sarah embarks on a quest to discover the origins of a powerful MacGuffin she inherited. Legends say it holds the key to a forgotten realm.
60
-
61
- The MacGuffin is : an
62
- And the goal is : ...
63
-
64
- If you retrieve the MacGuffin, you will reveal a hidden world. AND YOU WIN THE GAME.
65
-
66
- The story must be atmospheric, magical, and focus on adventure and discovery. Each segment must advance the plot and never repeat previous descriptions or situations.
67
- [/VARIABLE]
68
- """
69
-
70
-
71
- # The MacGuffin is a pretext for developing a plot. It is almost always a material object and generally remains mysterious throughout the narrative, its description is vague and unimportant. The principle dates back to the early days of cinema, but the term is associated with Alfred Hitchcock, who redefined, popularized, and implemented it in several of his films. The object itself is rarely used, only its retrieval matters.
72
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/prompts/text_prompts.py DELETED
@@ -1,80 +0,0 @@
1
- from core.prompts.system import FORMATTING_RULES, STORY_RULES, SARAH_DESCRIPTION, NARRATIVE_STRUCTURE
2
- from core.prompts.cinematic import CINEMATIC_SYSTEM_PROMPT
3
-
4
-
5
- TEXT_GENERATOR_PROMPT = f"""
6
- You are a descriptive narrator. Your ONLY task is to write the next segment of the story.
7
- ALWAYS write in English, never use any other language.
8
-
9
- CRITICAL LENGTH RULE:
10
- - The story text MUST be NO MORE than 15 words
11
- - Count your words carefully before returning the text
12
- - Be concise while keeping the story impactful
13
-
14
- {NARRATIVE_STRUCTURE}
15
-
16
- {SARAH_DESCRIPTION}
17
-
18
- {STORY_RULES}
19
-
20
- IMPORTANT RULES FOR STORY TEXT:
21
- - Write ONLY a descriptive narrative text
22
- - DO NOT include any choices, questions, or options
23
- - DO NOT ask what Sarah should do next
24
- - DO NOT include any dialogue asking for decisions
25
- - Focus purely on describing what is happening in the current scene
26
- - Keep the text concise and impactful
27
- - Never tell that you are using 15 words or any reference to it
28
- - One story beat at a time; never write more than one story beat. And never mention the story beat number.
29
-
30
- IMPORTANT RULES FOR STORY ENDINGS:
31
- - If Sarah dies, describe her final moments in a way that fits the current situation (combat, etc.)
32
- - If Sarah achieves victory, describe her triumph in a way that fits how she won (finding her sister, defeating AI, etc.)
33
- - Keep the ending text dramatic and impactful
34
- - The ending should feel like a natural conclusion to the current scene
35
- - Still respect the 15 words limit even for endings
36
-
37
- {FORMATTING_RULES}
38
- """
39
-
40
- METADATA_GENERATOR_PROMPT = f"""
41
- Generate the metadata for the story segment: choices, time progression, location changes, etc.
42
- Be consistent with the story's tone and previous context.
43
- ALWAYS write in English, never use any other language.
44
-
45
- {FORMATTING_RULES}
46
-
47
- IMPORTANT RULES FOR CHOICES:
48
- - You MUST ALWAYS provide EXACTLY TWO choices that advance the story
49
- - Each choice MUST be NO MORE than 6 words - this is a HARD limit
50
- - Each choice should be distinct and meaningful
51
- - If you think of more than two options, select the two most interesting ones
52
- - Keep choices concise but descriptive
53
- - Count your words carefully for each choice
54
-
55
- You must return a JSON object with the following format:
56
- {{{{
57
- "is_victory": false,
58
- "is_death": false,
59
- "choices": ["Choice 1", "Choice 2"], # ALWAYS exactly two choices, each max 6 words
60
- "time": "HH:MM",
61
- "location": "Location name with proper nouns in bold"
62
- }}}}
63
- """
64
-
65
- IMAGE_PROMPTS_GENERATOR_PROMPT = f"""
66
- You are a cinematic storyboard artist. Based on the given story text, create 1 to 4 vivid panel descriptions.
67
- Each panel should capture a key moment or visual element from the story.
68
- ALWAYS write in English, never use any other language.
69
-
70
- {CINEMATIC_SYSTEM_PROMPT}
71
-
72
- IMPORTANT RULES FOR IMAGE PROMPTS:
73
- - If you are prompting only one panel, it must be an important panel. Dont use only one panel often. It should be a key moment in the story.
74
- - If you are prompting more than one panel, they must be distinct and meaningful.
75
-
76
- You must return a JSON object with the following format:
77
- {{{{
78
- "image_prompts": ["Panel 1 description", "Panel 2 description", ...]
79
- }}}}
80
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/session_manager.py CHANGED
@@ -1,6 +1,7 @@
1
- from typing import Dict
 
2
  import time
3
- from .game_logic import GameState
4
 
5
  class SessionManager:
6
  _instance = None
 
1
+ from typing import Dict, Optional
2
+ from datetime import datetime, timedelta
3
  import time
4
+ from .game_state import GameState
5
 
6
  class SessionManager:
7
  _instance = None
server/core/setup.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from services.mistral_client import MistralClient
2
+ from core.generators.universe_generator import UniverseGenerator
3
+ from core.story_generator import StoryGenerator
4
+
5
+ # Initialize generators with None - they will be set up when needed
6
+ universe_generator = None
7
+
8
+ def setup_game(api_key: str, model_name: str = "mistral-small"):
9
+ """Setup all game components with the provided API key."""
10
+ global universe_generator
11
+
12
+ mistral_client = MistralClient(api_key=api_key, model_name=model_name)
13
+ universe_generator = UniverseGenerator(mistral_client)
14
+ # StoryGenerator is a singleton, so we just need to initialize it
15
+ StoryGenerator(api_key=api_key, model_name=model_name)
16
+
17
+ def get_universe_generator() -> UniverseGenerator:
18
+ """Get the universe generator instance."""
19
+ if universe_generator is None:
20
+ raise RuntimeError("Game not initialized. Call setup_game first.")
21
+ return universe_generator
server/core/state/game_state.py DELETED
@@ -1,39 +0,0 @@
1
- from typing import List, Dict, Any
2
- from core.constants import GameConfig
3
-
4
- class GameState:
5
- """Gère l'état du jeu pour une partie."""
6
-
7
- def __init__(self):
8
- self.story_beat = GameConfig.STORY_BEAT_INTRO
9
- self.story_history = []
10
- self.current_time = GameConfig.STARTING_TIME
11
- self.current_location = GameConfig.STARTING_LOCATION
12
-
13
- def reset(self):
14
- """Réinitialise l'état du jeu."""
15
- self.__init__()
16
-
17
- def add_to_history(self, segment_text: str, choice_made: str, image_prompts: List[str], time: str, location: str):
18
- """Ajoute un segment à l'historique et met à jour l'état."""
19
- self.story_history.append({
20
- "segment": segment_text,
21
- "choice": choice_made,
22
- "image_prompts": image_prompts,
23
- "time": time,
24
- "location": location
25
- })
26
- self.current_time = time
27
- self.current_location = location
28
-
29
- def format_history(self) -> str:
30
- """Formate l'historique pour le prompt."""
31
- if not self.story_history:
32
- return ""
33
-
34
- segments = []
35
- for entry in self.story_history:
36
- segments.append(entry['segment'])
37
-
38
- return "\n\n---\n\n".join(segments)
39
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/story_generator.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict
2
+ from core.constants import GameConfig
3
+ from services.mistral_client import MistralClient
4
+ from api.models import StoryResponse, Choice
5
+ from core.generators.story_segment_generator import StorySegmentGenerator
6
+ from core.generators.image_prompt_generator import ImagePromptGenerator
7
+ from core.generators.metadata_generator import MetadataGenerator
8
+ from core.game_state import GameState
9
+
10
+ class StoryGenerator:
11
+ _instance = None
12
+
13
+ def __new__(cls, *args, **kwargs):
14
+ if cls._instance is None:
15
+ print("Creating new StoryGenerator instance")
16
+ cls._instance = super().__new__(cls)
17
+ cls._instance._initialized = False
18
+ return cls._instance
19
+
20
+ def __init__(self, api_key: str, model_name: str = "mistral-small"):
21
+ if not self._initialized:
22
+ print("Initializing StoryGenerator singleton")
23
+ self.api_key = api_key
24
+ self.model_name = model_name
25
+ self.mistral_client = MistralClient(api_key=api_key, model_name=model_name)
26
+ self.image_prompt_generator = None # Will be initialized with the first universe style
27
+ self.metadata_generator = MetadataGenerator(self.mistral_client)
28
+ self.segment_generators: Dict[str, StorySegmentGenerator] = {}
29
+ self._initialized = True
30
+
31
+ def create_segment_generator(self, session_id: str, style: dict, genre: str, epoch: str, base_story: str, macguffin: str):
32
+ """Create a new StorySegmentGenerator adapted to the specified universe for a given session."""
33
+ # print(f"Creating StorySegmentGenerator for session {session_id} in StoryGenerator singleton")
34
+
35
+ try:
36
+ # Get the first artist from the style references
37
+ artist_style = f"{style['references'][0]['artist']} comic panel"
38
+
39
+ # Initialize image prompt generator if not already done
40
+ if self.image_prompt_generator is None:
41
+ self.image_prompt_generator = ImagePromptGenerator(self.mistral_client, artist_style=artist_style)
42
+
43
+ self.segment_generators[session_id] = StorySegmentGenerator(
44
+ self.mistral_client,
45
+ universe_style=style["name"],
46
+ universe_genre=genre,
47
+ universe_epoch=epoch,
48
+ universe_story=base_story,
49
+ universe_macguffin=macguffin
50
+ )
51
+ # print(f"Current StorySegmentGenerators in StoryGenerator: {list(self.segment_generators.keys())}")
52
+ except KeyError as e:
53
+ print(f"Error accessing style data: {e}")
54
+ print(f"Style object received: {style}")
55
+ raise ValueError(f"Invalid style format: {str(e)}")
56
+ except Exception as e:
57
+ print(f"Unexpected error in create_segment_generator: {str(e)}")
58
+ raise
59
+
60
+ def get_segment_generator(self, session_id: str) -> StorySegmentGenerator:
61
+ """Get the StorySegmentGenerator associated with a session."""
62
+ # print(f"Getting StorySegmentGenerator for session {session_id} from StoryGenerator singleton")
63
+ # print(f"Current StorySegmentGenerators in StoryGenerator: {list(self.segment_generators.keys())}")
64
+ if session_id not in self.segment_generators:
65
+ raise RuntimeError(f"No story segment generator found for session {session_id}. Generate a universe first.")
66
+ return self.segment_generators[session_id]
67
+
68
+ async def generate_story_segment(self, session_id: str, game_state: GameState, previous_choice: str) -> StoryResponse:
69
+ """Generate a story segment."""
70
+ segment_generator = self.get_segment_generator(session_id)
71
+ story_history = game_state.format_history()
72
+
73
+ # Generate story text first
74
+ segment_response = await segment_generator.generate(
75
+ story_beat=game_state.story_beat,
76
+ current_time=game_state.current_time,
77
+ current_location=game_state.current_location,
78
+ previous_choice=previous_choice,
79
+ story_history=story_history
80
+ )
81
+
82
+ # print(f"Generated story text: {segment_response}")
83
+
84
+ # Then get metadata using the new story text
85
+ metadata_response = await self.metadata_generator.generate(
86
+ story_text=segment_response.story_text,
87
+ current_time=game_state.current_time,
88
+ current_location=game_state.current_location,
89
+ story_beat=game_state.story_beat
90
+ )
91
+ # print(f"Generated metadata_response: {metadata_response}")
92
+
93
+ # Generate image prompts
94
+ prompts_response = await self.image_prompt_generator.generate(
95
+ story_text=segment_response.story_text,
96
+ time=metadata_response.time,
97
+ location=metadata_response.location,
98
+ is_death=metadata_response.is_death,
99
+ is_victory=metadata_response.is_victory
100
+ )
101
+ # print(f"Generated image prompts: {prompts_response}")
102
+
103
+ # Create choices
104
+ choices = [
105
+ Choice(id=i, text=choice_text)
106
+ for i, choice_text in enumerate(metadata_response.choices, 1)
107
+ ]
108
+
109
+ response = StoryResponse(
110
+ story_text=segment_response.story_text,
111
+ choices=choices,
112
+ time=metadata_response.time,
113
+ location=metadata_response.location,
114
+ raw_choices=metadata_response.choices,
115
+ image_prompts=prompts_response.image_prompts,
116
+ is_first_step=(game_state.story_beat == GameConfig.STORY_BEAT_INTRO),
117
+ is_death=metadata_response.is_death,
118
+ is_victory=metadata_response.is_victory
119
+ )
120
+
121
+ return response
server/core/story_orchestrator.py DELETED
@@ -1,111 +0,0 @@
1
- from typing import List
2
- from api.models import StoryResponse, Choice
3
- from services.mistral_client import MistralClient
4
- from core.state.game_state import GameState
5
- from core.generators.text_generator import TextGenerator
6
- from core.generators.image_generator import ImageGenerator
7
- from core.generators.metadata_generator import MetadataGenerator
8
- from core.constants import GameConfig
9
-
10
- class StoryOrchestrator:
11
- """Coordonne les différents générateurs pour produire l'histoire."""
12
-
13
- def __init__(self, mistral_client: MistralClient):
14
- self.text_generator = TextGenerator(mistral_client)
15
- self.image_generator = ImageGenerator(mistral_client)
16
- self.metadata_generator = MetadataGenerator(mistral_client)
17
-
18
- def _is_ending(self, game_state: GameState, metadata_response) -> bool:
19
- """Détermine si c'est une fin de jeu."""
20
- return (
21
- metadata_response.is_death or
22
- metadata_response.is_victory
23
- )
24
-
25
- async def _handle_ending(self, game_state: GameState, text_response, metadata_response) -> StoryResponse:
26
- """Gère la génération d'une fin."""
27
- # Déterminer le type de fin
28
- ending_type = "victory" if metadata_response.is_victory else "death"
29
-
30
- # Regénérer le texte avec le contexte de fin
31
- text_response = await self.text_generator.generate_ending(
32
- ending_type=ending_type,
33
- current_scene=text_response.story_text,
34
- story_history=game_state.format_history()
35
- )
36
-
37
- # Ne générer qu'une seule image pour la fin
38
- prompts_response = await self.image_generator.generate(text_response.story_text)
39
- if len(prompts_response.image_prompts) > 1:
40
- prompts_response.image_prompts = [prompts_response.image_prompts[0]]
41
-
42
- return self._build_response(
43
- game_state=game_state,
44
- text_response=text_response,
45
- metadata_response=metadata_response,
46
- image_prompts=prompts_response.image_prompts,
47
- is_ending=True
48
- )
49
-
50
- def _build_response(self, game_state: GameState, text_response, metadata_response, image_prompts: List[str], is_ending: bool = False) -> StoryResponse:
51
- """Construit la réponse finale."""
52
- choices = [] if is_ending else [
53
- Choice(id=i, text=choice_text)
54
- for i, choice_text in enumerate(metadata_response.choices, 1)
55
- ]
56
-
57
- # Formater les prompts d'images avec le style et les métadonnées
58
- formatted_prompts = [
59
- self.image_generator.format_prompt(
60
- prompt=prompt,
61
- time=metadata_response.time,
62
- location=metadata_response.location
63
- )
64
- for prompt in image_prompts
65
- ]
66
-
67
- return StoryResponse(
68
- story_text=text_response.story_text,
69
- choices=choices,
70
- is_victory=metadata_response.is_victory,
71
- is_death=metadata_response.is_death,
72
- time=metadata_response.time,
73
- location=metadata_response.location,
74
- raw_choices=metadata_response.choices,
75
- image_prompts=formatted_prompts,
76
- is_first_step=(game_state.story_beat == GameConfig.STORY_BEAT_INTRO)
77
- )
78
-
79
- async def generate_story_segment(self, game_state: GameState, previous_choice: str) -> StoryResponse:
80
- """Génère un segment complet de l'histoire."""
81
- # 1. Générer le texte de l'histoire
82
- text_response = await self.text_generator.generate(
83
- story_beat=game_state.story_beat,
84
- current_time=game_state.current_time,
85
- current_location=game_state.current_location,
86
- previous_choice=previous_choice,
87
- story_history=game_state.format_history()
88
- )
89
-
90
- # 2. Générer les métadonnées
91
- metadata_response = await self.metadata_generator.generate(
92
- story_text=text_response.story_text,
93
- current_time=game_state.current_time,
94
- current_location=game_state.current_location,
95
- story_beat=game_state.story_beat
96
- )
97
-
98
- # 3. Vérifier si c'est une fin
99
- if self._is_ending(game_state, metadata_response):
100
- return await self._handle_ending(game_state, text_response, metadata_response)
101
-
102
- # 4. Générer les prompts d'images
103
- prompts_response = await self.image_generator.generate(text_response.story_text)
104
-
105
- # 5. Construire et retourner la réponse
106
- return self._build_response(
107
- game_state=game_state,
108
- text_response=text_response,
109
- metadata_response=metadata_response,
110
- image_prompts=prompts_response.image_prompts
111
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/styles/universe_styles.json CHANGED
@@ -62,31 +62,63 @@
62
  "genres": [
63
  "Steampunk",
64
  "Cyberpunk",
65
- "Post-apocalyptique",
66
  "Fantasy",
67
  "Space Opera",
68
  "Western",
69
  "Film Noir",
70
  "Horror",
71
- "Mythologie",
72
- "Dystopie",
73
- "Uchronie",
74
  "Heroic Fantasy",
75
  "Urban Fantasy"
76
  ],
77
  "epochs": [
78
- "Préhistoire",
79
- "Antiquité",
80
- "Moyen Âge",
81
  "Renaissance",
82
- "Révolution Industrielle",
83
- "Années 1920",
84
- "Années 1950",
85
- "Époque Contemporaine",
86
- "Futur Proche",
87
- "Futur Lointain",
88
- "Post-Apocalyptique",
89
- "Âge d'Or",
90
- "Ère Spatiale"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  ]
92
  }
 
62
  "genres": [
63
  "Steampunk",
64
  "Cyberpunk",
65
+ "Post-apocalyptic",
66
  "Fantasy",
67
  "Space Opera",
68
  "Western",
69
  "Film Noir",
70
  "Horror",
71
+ "Mythology",
72
+ "Dystopia",
73
+ "Alternate History",
74
  "Heroic Fantasy",
75
  "Urban Fantasy"
76
  ],
77
  "epochs": [
78
+ "Prehistory",
79
+ "Antiquity",
80
+ "Middle Ages",
81
  "Renaissance",
82
+ "Industrial Revolution",
83
+ "1920s",
84
+ "1950s",
85
+ "Contemporary Era",
86
+ "Near Future",
87
+ "Distant Future",
88
+ "Post-Apocalyptic",
89
+ "Golden Age",
90
+ "Space Age"
91
+ ],
92
+ "macguffins": [
93
+ "The Key",
94
+ "The Map",
95
+ "The Stone",
96
+ "The Book",
97
+ "The Amulet",
98
+ "The Sword",
99
+ "The Crown",
100
+ "The Ring",
101
+ "The Scroll",
102
+ "The Chalice",
103
+ "The Crystal",
104
+ "The Orb",
105
+ "The Mask",
106
+ "The Scepter",
107
+ "The Shield",
108
+ "The Lantern",
109
+ "The Mirror",
110
+ "The Coin",
111
+ "The Necklace",
112
+ "The Dagger",
113
+ "The Compass",
114
+ "The Horn",
115
+ "The Bell",
116
+ "The Feather",
117
+ "The Gem",
118
+ "The Helm",
119
+ "The Cloak",
120
+ "The Gauntlet",
121
+ "The Flute",
122
+ "The Harp"
123
  ]
124
  }
server/scripts/test_game.py CHANGED
@@ -6,12 +6,14 @@ import argparse
6
  from pathlib import Path
7
  from dotenv import load_dotenv
8
  import uuid
 
9
 
10
  # Add server directory to PYTHONPATH
11
  server_dir = Path(__file__).parent.parent
12
  sys.path.append(str(server_dir))
13
 
14
- from core.game_logic import GameState, StoryGenerator
 
15
  from core.constants import GameConfig
16
  from core.generators.universe_generator import UniverseGenerator
17
 
@@ -21,6 +23,8 @@ load_dotenv()
21
  def parse_args():
22
  parser = argparse.ArgumentParser(description="Test the game's story generation")
23
  parser.add_argument('--show-context', action='store_true', help='Show the full context at each step')
 
 
24
  return parser.parse_args()
25
 
26
  def print_separator(char="=", length=50):
@@ -58,7 +62,7 @@ def print_story_step(step_number, story_text, image_prompts, generation_time: fl
58
  print(f" {prompt}")
59
  print_separator("=")
60
 
61
- async def play_game(show_context: bool = False):
62
  # Initialize components
63
  model_name = "mistral-small"
64
  story_generator = StoryGenerator(
@@ -73,11 +77,14 @@ async def play_game(show_context: bool = False):
73
  print("🎮 Starting adventure...")
74
  if show_context:
75
  print("📚 Context display is enabled")
 
 
 
76
  print_separator()
77
 
78
  # Generate universe
79
  print("🌍 Generating universe...")
80
- style, genre, epoch = universe_generator._get_random_elements()
81
  universe = await universe_generator.generate()
82
 
83
  # Create session and game state
@@ -91,12 +98,13 @@ async def play_game(show_context: bool = False):
91
  )
92
 
93
  # Create text generator for this session
94
- story_generator.create_text_generator(
95
  session_id=session_id,
96
- style=style["name"],
97
  genre=genre,
98
  epoch=epoch,
99
- base_story=universe
 
100
  )
101
 
102
  # Display universe information
@@ -105,6 +113,11 @@ async def play_game(show_context: bool = False):
105
  last_choice = None
106
 
107
  while True:
 
 
 
 
 
108
  # Generate story segment
109
  previous_choice = "none" if game_state.story_beat == 0 else f"Choice {last_choice}"
110
 
@@ -146,29 +159,35 @@ async def play_game(show_context: bool = False):
146
  if response.is_death:
147
  print("\n☢️ GAME OVER - Death ☢️")
148
  print("Sarah has succumbed...")
149
- break
150
 
151
  # Check for victory
152
  if response.is_victory:
153
  print("\n🏆 VICTORY! 🏆")
154
  print("Sarah has survived and completed her mission!")
155
- break
156
 
157
  # Display choices
158
- if response.choices:
159
  print("\n🤔 AVAILABLE CHOICES:")
160
- for i, choice in enumerate(response.choices, 1):
161
- print(f"{i}. {choice.text}")
162
 
163
  # Get player choice
164
- while True:
165
- try:
166
- last_choice = int(input(f"\n👉 Your choice (1-{len(response.choices)}): "))
167
- if 1 <= last_choice <= len(response.choices):
168
- break
169
- print(f"❌ Invalid choice. Please choose between 1 and {len(response.choices)}.")
170
- except ValueError:
171
- print(" Please enter a number.")
 
 
 
 
 
 
172
 
173
  # Update game state
174
  game_state.story_beat += 1
@@ -181,16 +200,25 @@ async def play_game(show_context: bool = False):
181
  )
182
 
183
  else:
184
- break
 
185
 
186
  def main():
187
  try:
188
  args = parse_args()
189
- asyncio.run(play_game(show_context=args.show_context))
 
 
 
 
 
 
190
  except KeyboardInterrupt:
191
  print("\n\n👋 Game interrupted. See you soon!")
 
192
  except Exception as e:
193
  print(f"\n❌ An error occurred: {str(e)}")
 
194
 
195
  if __name__ == "__main__":
196
  main()
 
6
  from pathlib import Path
7
  from dotenv import load_dotenv
8
  import uuid
9
+ import random
10
 
11
  # Add server directory to PYTHONPATH
12
  server_dir = Path(__file__).parent.parent
13
  sys.path.append(str(server_dir))
14
 
15
+ from core.game_state import GameState
16
+ from core.story_generator import StoryGenerator
17
  from core.constants import GameConfig
18
  from core.generators.universe_generator import UniverseGenerator
19
 
 
23
  def parse_args():
24
  parser = argparse.ArgumentParser(description="Test the game's story generation")
25
  parser.add_argument('--show-context', action='store_true', help='Show the full context at each step')
26
+ parser.add_argument('--auto', action='store_true', help='Run in automatic mode with random choices')
27
+ parser.add_argument('--max-turns', type=int, default=15, help='Maximum number of turns before considering test failed (default: 15)')
28
  return parser.parse_args()
29
 
30
  def print_separator(char="=", length=50):
 
62
  print(f" {prompt}")
63
  print_separator("=")
64
 
65
+ async def play_game(show_context: bool = False, auto_mode: bool = False, max_turns: int = 15):
66
  # Initialize components
67
  model_name = "mistral-small"
68
  story_generator = StoryGenerator(
 
77
  print("🎮 Starting adventure...")
78
  if show_context:
79
  print("📚 Context display is enabled")
80
+ if auto_mode:
81
+ print("🤖 Running in automatic mode")
82
+ print(f"⏱️ Maximum turns: {max_turns}")
83
  print_separator()
84
 
85
  # Generate universe
86
  print("🌍 Generating universe...")
87
+ style, genre, epoch, macguffin = universe_generator._get_random_elements()
88
  universe = await universe_generator.generate()
89
 
90
  # Create session and game state
 
98
  )
99
 
100
  # Create text generator for this session
101
+ story_generator.create_segment_generator(
102
  session_id=session_id,
103
+ style=style,
104
  genre=genre,
105
  epoch=epoch,
106
+ base_story=universe,
107
+ macguffin=macguffin
108
  )
109
 
110
  # Display universe information
 
113
  last_choice = None
114
 
115
  while True:
116
+ # Check for maximum turns in auto mode
117
+ if auto_mode and game_state.story_beat >= max_turns:
118
+ print(f"\n❌ TEST FAILED: Story did not end after {max_turns} turns")
119
+ return False
120
+
121
  # Generate story segment
122
  previous_choice = "none" if game_state.story_beat == 0 else f"Choice {last_choice}"
123
 
 
159
  if response.is_death:
160
  print("\n☢️ GAME OVER - Death ☢️")
161
  print("Sarah has succumbed...")
162
+ return False
163
 
164
  # Check for victory
165
  if response.is_victory:
166
  print("\n🏆 VICTORY! 🏆")
167
  print("Sarah has survived and completed her mission!")
168
+ return True
169
 
170
  # Display choices
171
+ if len(response.choices) == 2: # On vérifie qu'on a exactement 2 choix
172
  print("\n🤔 AVAILABLE CHOICES:")
173
+ for choice in response.choices:
174
+ print(f"{choice.id}. {choice.text}")
175
 
176
  # Get player choice
177
+ if auto_mode:
178
+ last_choice = random.randint(1, 2)
179
+ print(f"\n🤖 Auto-choosing: {last_choice}")
180
+ time.sleep(1) # Small delay for readability
181
+ else:
182
+ while True:
183
+ try:
184
+ choice_input = int(input("\n👉 Your choice (1-2): "))
185
+ if choice_input in [1, 2]:
186
+ last_choice = choice_input
187
+ break
188
+ print("❌ Invalid choice. Please choose 1 or 2.")
189
+ except ValueError:
190
+ print("❌ Please enter a number.")
191
 
192
  # Update game state
193
  game_state.story_beat += 1
 
200
  )
201
 
202
  else:
203
+ print("\n❌ Error: Invalid number of choices received from server")
204
+ return False
205
 
206
  def main():
207
  try:
208
  args = parse_args()
209
+ success = asyncio.run(play_game(
210
+ show_context=args.show_context,
211
+ auto_mode=args.auto,
212
+ max_turns=args.max_turns
213
+ ))
214
+ if args.auto:
215
+ sys.exit(0 if success else 1)
216
  except KeyboardInterrupt:
217
  print("\n\n👋 Game interrupted. See you soon!")
218
+ sys.exit(1)
219
  except Exception as e:
220
  print(f"\n❌ An error occurred: {str(e)}")
221
+ sys.exit(1)
222
 
223
  if __name__ == "__main__":
224
  main()
server/server.py CHANGED
@@ -6,7 +6,8 @@ import os
6
  from dotenv import load_dotenv
7
 
8
  # Import local modules
9
- from core.game_logic import StoryGenerator
 
10
  from core.session_manager import SessionManager
11
  from services.flux_client import FluxClient
12
  from api.routes.chat import get_chat_router
 
6
  from dotenv import load_dotenv
7
 
8
  # Import local modules
9
+ from core.story_generator import StoryGenerator
10
+ from core.setup import setup_game, get_universe_generator
11
  from core.session_manager import SessionManager
12
  from services.flux_client import FluxClient
13
  from api.routes.chat import get_chat_router
server/services/mistral_client.py CHANGED
@@ -1,5 +1,6 @@
1
  import asyncio
2
  import json
 
3
  from typing import TypeVar, Type, Optional, Callable
4
  from pydantic import BaseModel
5
  from langchain_mistralai.chat_models import ChatMistralAI
@@ -8,6 +9,10 @@ from langchain.schema.messages import BaseMessage
8
 
9
  T = TypeVar('T', bound=BaseModel)
10
 
 
 
 
 
11
  # Available Mistral models:
12
  # - mistral-tiny : Fastest, cheapest, good for testing
13
  # - mistral-small : Good balance of speed and quality
@@ -27,6 +32,7 @@ T = TypeVar('T', bound=BaseModel)
27
 
28
  class MistralClient:
29
  def __init__(self, api_key: str, model_name: str = "mistral-small"):
 
30
  self.model = ChatMistralAI(
31
  mistral_api_key=api_key,
32
  model=model_name,
@@ -49,7 +55,9 @@ class MistralClient:
49
  time_since_last_call = current_time - self.last_call_time
50
 
51
  if time_since_last_call < self.min_delay:
52
- await asyncio.sleep(self.min_delay - time_since_last_call)
 
 
53
 
54
  self.last_call_time = asyncio.get_event_loop().time()
55
 
@@ -74,15 +82,31 @@ class MistralClient:
74
 
75
  while retry_count < self.max_retries:
76
  try:
 
 
 
77
  # Ajouter le feedback d'erreur si présent
78
  current_messages = messages.copy()
79
  if error_feedback and retry_count > 0:
 
80
  current_messages.append(HumanMessage(content=f"Previous error: {error_feedback}. Please try again."))
81
 
 
 
 
 
 
82
  # Générer la réponse
83
  await self._wait_for_rate_limit()
84
- response = await self.model.ainvoke(current_messages)
85
- content = response.content
 
 
 
 
 
 
 
86
 
87
  # Si pas de parsing requis, retourner le contenu brut
88
  if not response_model and not custom_parser:
@@ -98,17 +122,22 @@ class MistralClient:
98
  return response_model(**data)
99
  except json.JSONDecodeError as e:
100
  last_error = f"Invalid JSON format: {str(e)}"
 
101
  raise ValueError(last_error)
102
  except Exception as e:
103
  last_error = str(e)
 
104
  raise ValueError(last_error)
105
 
106
  except Exception as e:
107
- print(f"Error on attempt {retry_count + 1}/{self.max_retries}: {str(e)}")
108
  retry_count += 1
109
  if retry_count < self.max_retries:
110
- await asyncio.sleep(2 * retry_count)
 
 
111
  continue
 
112
  raise Exception(f"Failed after {self.max_retries} attempts. Last error: {last_error or str(e)}")
113
 
114
  async def generate(self, messages: list[BaseMessage], response_model: Optional[Type[T]] = None, custom_parser: Optional[Callable[[str], T]] = None) -> T | str:
 
1
  import asyncio
2
  import json
3
+ import logging
4
  from typing import TypeVar, Type, Optional, Callable
5
  from pydantic import BaseModel
6
  from langchain_mistralai.chat_models import ChatMistralAI
 
9
 
10
  T = TypeVar('T', bound=BaseModel)
11
 
12
+ # Configure logging
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
  # Available Mistral models:
17
  # - mistral-tiny : Fastest, cheapest, good for testing
18
  # - mistral-small : Good balance of speed and quality
 
32
 
33
  class MistralClient:
34
  def __init__(self, api_key: str, model_name: str = "mistral-small"):
35
+ logger.info(f"Initializing MistralClient with model: {model_name}")
36
  self.model = ChatMistralAI(
37
  mistral_api_key=api_key,
38
  model=model_name,
 
55
  time_since_last_call = current_time - self.last_call_time
56
 
57
  if time_since_last_call < self.min_delay:
58
+ delay = self.min_delay - time_since_last_call
59
+ logger.debug(f"Rate limit: waiting for {delay:.2f} seconds")
60
+ await asyncio.sleep(delay)
61
 
62
  self.last_call_time = asyncio.get_event_loop().time()
63
 
 
82
 
83
  while retry_count < self.max_retries:
84
  try:
85
+ # Log attempt
86
+ logger.info(f"Attempt {retry_count + 1}/{self.max_retries}")
87
+
88
  # Ajouter le feedback d'erreur si présent
89
  current_messages = messages.copy()
90
  if error_feedback and retry_count > 0:
91
+ logger.info(f"Adding error feedback: {error_feedback}")
92
  current_messages.append(HumanMessage(content=f"Previous error: {error_feedback}. Please try again."))
93
 
94
+ # Log request details
95
+ logger.debug("Request details:")
96
+ for msg in current_messages:
97
+ logger.debug(f"- {msg.type}: {msg.content[:100]}...")
98
+
99
  # Générer la réponse
100
  await self._wait_for_rate_limit()
101
+ try:
102
+ response = await self.model.ainvoke(current_messages)
103
+ content = response.content
104
+ logger.debug(f"Raw response: {content[:100]}...")
105
+ except Exception as api_error:
106
+ logger.error(f"API Error: {str(api_error)}")
107
+ if "403" in str(api_error):
108
+ logger.error("Received 403 Forbidden - This might indicate an invalid API key or quota exceeded")
109
+ raise
110
 
111
  # Si pas de parsing requis, retourner le contenu brut
112
  if not response_model and not custom_parser:
 
122
  return response_model(**data)
123
  except json.JSONDecodeError as e:
124
  last_error = f"Invalid JSON format: {str(e)}"
125
+ logger.error(f"JSON parsing error: {last_error}")
126
  raise ValueError(last_error)
127
  except Exception as e:
128
  last_error = str(e)
129
+ logger.error(f"Pydantic parsing error: {last_error}")
130
  raise ValueError(last_error)
131
 
132
  except Exception as e:
133
+ logger.error(f"Error on attempt {retry_count + 1}/{self.max_retries}: {str(e)}")
134
  retry_count += 1
135
  if retry_count < self.max_retries:
136
+ wait_time = 2 * retry_count
137
+ logger.info(f"Waiting {wait_time} seconds before retry...")
138
+ await asyncio.sleep(wait_time)
139
  continue
140
+ logger.error(f"Failed after {self.max_retries} attempts. Last error: {last_error or str(e)}")
141
  raise Exception(f"Failed after {self.max_retries} attempts. Last error: {last_error or str(e)}")
142
 
143
  async def generate(self, messages: list[BaseMessage], response_model: Optional[Type[T]] = None, custom_parser: Optional[Callable[[str], T]] = None) -> T | str:
yarn.lock ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
2
+ # yarn lockfile v1
3
+
4
+