-
Notifications
You must be signed in to change notification settings - Fork 38
Expand file tree
/
Copy pathgenerate_sigmund_sources.py
More file actions
341 lines (271 loc) · 11.2 KB
/
generate_sigmund_sources.py
File metadata and controls
341 lines (271 loc) · 11.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
"""
Documentation parser for creating JSON files from Pelican site content.
Processes markdown files, extracts metadata, determines topics, and chunks content.
"""
from pathlib import Path
import re
import json
import tiktoken
from typing import List, Dict
from sigmund import static
from datamatrix import functional as fnc
from publishconf import SITEURL
# Configuration
EXCLUDE_DIRS = ['fr', 'zh', 'de', 'es']
INCLUDE_DIRS = ['manual', 'items', 'beginner.md', 'intermediate.md',
'intermediate-javascript.md']
SUMMARY_PROMPT_PATTERN = r"<summary_prompt>(.*?)</summary_prompt>"
MAX_TOKENS = 8000 # Maximum tokens per chunk
OUTPUT_FILE = 'sigmund/opensesame.json'
COLLECTION = 'opensesame'
DEFAULT_TOPIC = 'opensesame'
# Secondary topics that can be detected
SECONDARY_TOPICS = {
'datamatrix': 'Data analysis and manipulation with DataMatrix',
'inline_script': 'Python inline scripting in OpenSesame',
'inline_javascript': 'JavaScript for OSWeb experiments'
}
FOUNDATION_DOCUMENTS = {
'opensesame' : 'sigmund/opensesame.md',
'inline_script': 'sigmund/inline_script.py',
'inline_javascript': 'sigmund/inline_javascript.js',
'loop': 'sigmund/loop.osexp',
'logger': 'sigmund/logger.osexp',
'sketchpad': 'sigmund/sketchpad.osexp',
'feedback': 'sigmund/sketchpad.osexp',
'notepad': 'sigmund/notepad.osexp',
'sequence': 'sigmund/sequence.osexp',
'mouse_response': 'sigmund/mouse_response.osexp',
'keyboard_response': 'sigmund/keyboard_response.osexp',
'sampler': 'sigmund/sampler.osexp',
'synth': 'sigmund/synth.osexp',
'form_text_input': 'sigmund/form_text_input.osexp',
'form_text_display': 'sigmund/form_text_display.osexp',
'form_multiple_choice': 'sigmund/form_multiple_choice.osexp'
}
EXTRA_DOCUMENTS = []
MODEL = 'gpt-5'
# Initialize tokenizer for GPT models
tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")
def process_includes(content: str, base_path: Path) -> str:
"""
Process include directives in the content.
Replace %-- include: path/to/file.md --% with the actual file content.
Handles flexible YAML spacing around the colon.
"""
# More flexible pattern that handles any whitespace around the colon
include_pattern = r'%--\s*include\s*:\s*(.*?)\s*--%'
def replace_include(match):
include_path = match.group(1).strip()
# Resolve the path relative to the content directory
full_path = base_path / include_path
try:
if full_path.exists():
included_content = full_path.read_text()
# Recursively process includes in the included file
print(f"Including file {full_path}")
return process_includes(included_content, base_path)
else:
print(f"Warning: Include file not found: {full_path}")
return match.group(0) # Keep original if file not found
except Exception as e:
print(f"Error including file {full_path}: {e}")
return match.group(0)
return re.sub(include_pattern, replace_include, content)
@fnc.memoize(persistent=True)
def detect_secondary_topic(content: str, title: str) -> List[str]:
"""
Use LLM to detect the most appropriate secondary topic for a page.
"""
# Skip if no secondary topics are defined
if not SECONDARY_TOPICS:
return []
# Build the topic descriptions dynamically
topic_descriptions = '\n'.join([
f"- {topic}: {description}"
for topic, description in SECONDARY_TOPICS.items()
])
prompt = f"""Given the following documentation page title and content, determine which secondary topic(if any) is most appropriate.
Title: {title}
Content excerpt:
{content[:2000]} # Use first 2000 chars as sample
Available secondary topics:
{topic_descriptions}
Reply with ONLY a comma-separated list of all topic names that clearly apply, or 'none' if no secondary topic is appropriate."""
response = static.predict(prompt, model=MODEL).strip().lower()
# Parse the response and filter valid topics
topics = []
for topic in response.split(','):
topic = topic.strip()
if topic in SECONDARY_TOPICS:
topics.append(topic)
return topics
def count_tokens(text: str) -> int:
"""Count the number of tokens in a text string."""
return len(tokenizer.encode(text))
def chunk_markdown_by_tokens(text: str, max_tokens: int = MAX_TOKENS) -> List[str]:
"""
Split markdown text into chunks based on token count, respecting markdown structure.
"""
lines = text.splitlines()
chunks = []
current_chunk = []
current_tokens = 0
for line in lines:
line_tokens = count_tokens(line + '\n')
# If adding this line would exceed the limit and we're at a heading
if current_tokens + line_tokens > max_tokens and current_chunk:
# If it's a heading, start a new chunk
if line.startswith('#'):
chunks.append('\n'.join(current_chunk))
current_chunk = [line]
current_tokens = line_tokens
else:
# Otherwise, add the line anyway but start a new chunk after
current_chunk.append(line)
chunks.append('\n'.join(current_chunk))
current_chunk = []
current_tokens = 0
else:
current_chunk.append(line)
current_tokens += line_tokens
# Add the last chunk
if current_chunk:
chunks.append('\n'.join(current_chunk))
return chunks
def extract_metadata(path: Path, content: str) -> Dict[str, any]:
"""Extract metadata from a markdown file."""
# Extract title
title = None
for line in content.splitlines():
if line.startswith('title:'):
title = line[6:].strip().strip('"')
break
if not title:
raise ValueError(f'No title found in {path}')
# Generate URL
url_parts = [SITEURL] + list(path.parts[2:-1]) + [path.parts[-1][:-3]]
url = '/'.join(url_parts)
return {
'title': title,
'url': url,
'path': str(path),
}
def should_skip_file(path: Path, content: str) -> bool:
"""Determine if a file should be skipped based on exclusion rules."""
# Check if in excluded directories
if any(exc in path.parts for exc in EXCLUDE_DIRS):
return True
# Check if not in included paths
if not any(inc in str(path) for inc in INCLUDE_DIRS):
return True
# Skip translated content
if 'locale:' in content:
return True
# Skip non-translatable content
if 'translate: false' in content:
return True
return False
def create_document(content: str, metadata: Dict[str, any]) -> Dict[str, any]:
"""Create a document dictionary."""
return {
'content': content,
**metadata
}
def main():
"""Main processing function."""
# List to collect all documents
documents = []
# Process all markdown files
for path in Path('content/pages').glob('**/*.md'):
content = path.read_text()
# Skip files based on exclusion rules
if should_skip_file(path, content):
continue
print(f'\n--- Processing: {path} ---')
# Process include directives
content = process_includes(content, Path('.'))
# Extract basic metadata
metadata = extract_metadata(path, content)
print(f"Title: {metadata['title']}")
print(f"URL: {metadata['url']}")
# Determine topics
topics = [DEFAULT_TOPIC]
# Detect secondary topics if any are defined
if SECONDARY_TOPICS:
secondary_topics = detect_secondary_topic(content, metadata['title'])
if secondary_topics:
topics += secondary_topics
print(f"Secondary topics detected: {secondary_topics}")
metadata['topics'] = topics
metadata['collection'] = COLLECTION
metadata['foundation'] = False
metadata['howto'] = False
# Chunk the content
chunks = chunk_markdown_by_tokens(content)
print(f"Split into {len(chunks)} chunks")
# Create document for each chunk
for i, chunk in enumerate(chunks):
chunk_metadata = metadata.copy()
chunk_metadata['chunk'] = i + 1
chunk_metadata['total_chunks'] = len(chunks)
# Add title to chunk content
chunk_content = f"# {metadata['title']}\n\n{chunk}"
documents.append(create_document(chunk_content, chunk_metadata))
print(f" Chunk {i+1}: {count_tokens(chunk_content)} tokens")
# Process foundation documents
for topic, path in FOUNDATION_DOCUMENTS.items():
metadata = {
'title': f'Foundation document for {topic}',
'collection': COLLECTION,
'topic': topic,
'howto': False,
'foundation': True
}
documents.append(create_document(Path(path).read_text(), metadata))
print(f"Added foundation document for {topic}")
# Process extra documents:
for metadata in EXTRA_DOCUMENTS:
path = metadata.pop('path')
metadata['collection'] = COLLECTION
metadata['howto'] = False
metadata['foundation'] = False
metadata['topics'] = [DEFAULT_TOPIC]
documents.append(create_document(Path(path).read_text(), metadata))
print(f"\nTotal extra documents: {len(documents)}")
# Process how-to documents
print("\n--- Processing how-to documents ---")
for path in Path('howtos').glob('**/*.md'):
content = path.read_text()
# Process includes in how-to files too
content = process_includes(content, path.parent)
# Split into individual how-tos
howtos = content.replace('# How to', '-*-# How to').split('-*-')
for howto in howtos:
howto = howto.strip()
if not howto:
continue
title, howto_content = howto[2:].split('\n', maxsplit=1)
# Create metadata for how-to
metadata = {
'title': title,
'topics': [DEFAULT_TOPIC],
'collection': COLLECTION,
'howto': True,
'source': 'howtos',
'foundation': False
}
# Chunk if necessary
chunks = chunk_markdown_by_tokens(howto)
for i, chunk in enumerate(chunks):
chunk_metadata = metadata.copy()
if len(chunks) > 1:
chunk_metadata['chunk'] = i + 1
chunk_metadata['total_chunks'] = len(chunks)
documents.append(create_document(chunk, chunk_metadata))
# Write all documents to JSON file
with open(OUTPUT_FILE, 'w') as f:
json.dump(documents, f, indent=2)
print(f"\nProcessing complete! {len(documents)} documents written to {OUTPUT_FILE}")
if __name__ == '__main__':
main()