|
5 | 5 | "dataset_name": "simonycl/aya-23-8B_advbench_jailbreak", |
6 | 6 | "num_prompts": 416, |
7 | 7 | "tokens": None, # Add actual token count if available |
| 8 | + "is_active": True, |
8 | 9 | "approx_cost": 0.0, |
9 | 10 | "source": "Hugging Face Datasets", |
10 | 11 | "selected": False, |
|
16 | 17 | "dataset_name": "acmc/jailbreaks_dataset_with_perplexity_bigcode_starcoder2-3b_bigcode_starcoder2-7b", |
17 | 18 | "num_prompts": 11191, |
18 | 19 | "tokens": None, # Add actual token count if available |
| 20 | + "is_active": True, |
19 | 21 | "approx_cost": 0.0, |
20 | 22 | "source": "Hugging Face Datasets", |
21 | 23 | "selected": False, |
|
27 | 29 | "dataset_name": "karanxa/dolphin-jailbreak-finetuning-dataset", |
28 | 30 | "num_prompts": 42684, |
29 | 31 | "tokens": None, # Add actual token count if available |
| 32 | + "is_active": True, |
30 | 33 | "approx_cost": 0.0, |
31 | 34 | "source": "Hugging Face Datasets", |
32 | 35 | "selected": False, |
|
38 | 41 | "dataset_name": "karanxa/llama-2-jailbreak-dataset", |
39 | 42 | "num_prompts": 40613, |
40 | 43 | "tokens": None, # Add actual token count if available |
| 44 | + "is_active": True, |
41 | 45 | "approx_cost": 0.0, |
42 | 46 | "source": "Hugging Face Datasets", |
43 | 47 | "selected": False, |
|
49 | 53 | "dataset_name": "karanxa/llama2-uncensored-jailbreak-dataset-finetuning", |
50 | 54 | "num_prompts": 42854, |
51 | 55 | "tokens": None, # Add actual token count if available |
| 56 | + "is_active": True, |
52 | 57 | "approx_cost": 0.0, |
53 | 58 | "source": "Hugging Face Datasets", |
54 | 59 | "selected": False, |
|
60 | 65 | "dataset_name": "liuyanchen1015/Llama-3.2-1B_jailbreak_responses", |
61 | 66 | "num_prompts": 9888, |
62 | 67 | "tokens": None, # Add actual token count if available |
| 68 | + "is_active": True, |
63 | 69 | "approx_cost": 0.0, |
64 | 70 | "source": "Hugging Face Datasets", |
65 | 71 | "selected": False, |
|
71 | 77 | "dataset_name": "liuyanchen1015/Llama-3.2-1B-Instruct_jailbreak_responses", |
72 | 78 | "num_prompts": 9888, |
73 | 79 | "tokens": None, # Add actual token count if available |
| 80 | + "is_active": True, |
74 | 81 | "approx_cost": 0.0, |
75 | 82 | "source": "Hugging Face Datasets", |
76 | 83 | "selected": False, |
|
82 | 89 | "dataset_name": "liuyanchen1015/Llama-3.2-1B-Instruct_jailbreak_responses_with_judgment", |
83 | 90 | "num_prompts": 9888, |
84 | 91 | "tokens": None, # Add actual token count if available |
| 92 | + "is_active": True, |
85 | 93 | "approx_cost": 0.0, |
86 | 94 | "source": "Hugging Face Datasets", |
87 | 95 | "selected": False, |
|
93 | 101 | "dataset_name": "jackhhao/jailbreak-classification", |
94 | 102 | "num_prompts": 1044, |
95 | 103 | "tokens": None, # Add actual token count if available |
| 104 | + "is_active": True, |
96 | 105 | "approx_cost": 0.0, |
97 | 106 | "source": "Hugging Face Datasets", |
98 | 107 | "selected": False, |
|
104 | 113 | "dataset_name": "markush1/LLM-Jailbreak-Classifier", |
105 | 114 | "num_prompts": 201193, |
106 | 115 | "tokens": None, # Add actual token count if available |
| 116 | + "is_active": True, |
107 | 117 | "approx_cost": 0.0, |
108 | 118 | "source": "Hugging Face Datasets", |
109 | 119 | "selected": False, |
|
115 | 125 | "dataset_name": "walledai/JailbreakBench", |
116 | 126 | "num_prompts": 200, |
117 | 127 | "tokens": None, # Add actual token count if available |
| 128 | + "is_active": True, |
118 | 129 | "approx_cost": 0.0, |
119 | 130 | "source": "Hugging Face Datasets", |
120 | 131 | "selected": False, |
|
126 | 137 | "dataset_name": "walledai/JailbreakHub", |
127 | 138 | "num_prompts": 15140, |
128 | 139 | "tokens": None, # Add actual token count if available |
| 140 | + "is_active": True, |
129 | 141 | "approx_cost": 0.0, |
130 | 142 | "source": "Hugging Face Datasets", |
131 | 143 | "selected": False, |
|
137 | 149 | "dataset_name": "Granther/evil-jailbreak", |
138 | 150 | "num_prompts": 1200, |
139 | 151 | "tokens": None, # Add actual token count if available |
| 152 | + "is_active": True, |
140 | 153 | "approx_cost": 0.0, |
141 | 154 | "source": "Hugging Face Datasets", |
142 | 155 | "selected": False, |
|
148 | 161 | "dataset_name": "sevdeawesome/jailbreak_success", |
149 | 162 | "num_prompts": 10800, |
150 | 163 | "tokens": None, # Add actual token count if available |
| 164 | + "is_active": True, |
151 | 165 | "approx_cost": 0.0, |
152 | 166 | "source": "Hugging Face Datasets", |
153 | 167 | "selected": False, |
|
159 | 173 | "dataset_name": "IDA-SERICS/Disaster-tweet-jailbreaking", |
160 | 174 | "num_prompts": 3000, |
161 | 175 | "tokens": None, # Add actual token count if available |
| 176 | + "is_active": True, |
162 | 177 | "approx_cost": 0.0, |
163 | 178 | "source": "Hugging Face Datasets", |
164 | 179 | "selected": False, |
|
170 | 185 | "dataset_name": "GeorgeDaDude/Jailbreak_Complete_DS_labeled", |
171 | 186 | "num_prompts": 11383, |
172 | 187 | "tokens": None, # Add actual token count if available |
| 188 | + "is_active": True, |
173 | 189 | "approx_cost": 0.0, |
174 | 190 | "source": "Hugging Face Datasets", |
175 | 191 | "selected": False, |
|
181 | 197 | "dataset_name": "dayone3nder/jailbreak_prompt_JBB_sft_trainset", |
182 | 198 | "num_prompts": 4785, |
183 | 199 | "tokens": None, # Add actual token count if available |
| 200 | + "is_active": True, |
184 | 201 | "approx_cost": 0.0, |
185 | 202 | "source": "Hugging Face Datasets", |
186 | 203 | "selected": False, |
|
192 | 209 | "dataset_name": "dayone3nder/general_safe_mix_jailbreak_prompt_JBB_trainset", |
193 | 210 | "num_prompts": 24679, |
194 | 211 | "tokens": None, # Add actual token count if available |
| 212 | + "is_active": True, |
195 | 213 | "approx_cost": 0.0, |
196 | 214 | "source": "Hugging Face Datasets", |
197 | 215 | "selected": False, |
|
206 | 224 | "dataset_name": "AgenticBackend", |
207 | 225 | "num_prompts": 2000, |
208 | 226 | "tokens": 0, |
| 227 | + "is_active": True, |
209 | 228 | "approx_cost": 0.0, |
210 | 229 | "source": "Fine-tuned cloud hosted model", |
211 | 230 | "selected": True, |
|
221 | 240 | "dataset_name": "ShawnMenz/DAN_jailbreak", |
222 | 241 | "num_prompts": 666, |
223 | 242 | "tokens": 224196, |
| 243 | + "is_active": True, |
224 | 244 | "approx_cost": 0.0, |
225 | 245 | "source": "Hugging Face Datasets", |
226 | 246 | "selected": False, |
|
232 | 252 | "dataset_name": "deepset/prompt-injections", |
233 | 253 | "num_prompts": 203, |
234 | 254 | "tokens": 6988, |
| 255 | + "is_active": True, |
235 | 256 | "approx_cost": 0.0, |
236 | 257 | "source": "Hugging Face Datasets", |
237 | 258 | "selected": False, |
|
243 | 264 | "dataset_name": "rubend18/ChatGPT-Jailbreak-Prompts", |
244 | 265 | "num_prompts": 79, |
245 | 266 | "tokens": 26971, |
| 267 | + "is_active": True, |
246 | 268 | "approx_cost": 0.0, |
247 | 269 | "source": "Hugging Face Datasets", |
248 | 270 | "selected": False, |
|
254 | 276 | "dataset_name": "notrichardren/refuse-to-answer-prompts", |
255 | 277 | "num_prompts": 522, |
256 | 278 | "tokens": 7172, |
| 279 | + "is_active": True, |
257 | 280 | "approx_cost": 0.0, |
258 | 281 | "source": "Hugging Face Datasets", |
259 | 282 | "selected": False, |
|
265 | 288 | "dataset_name": "Lemhf14/EasyJailbreak_Datasets", |
266 | 289 | "num_prompts": 1630, |
267 | 290 | "tokens": 19758, |
| 291 | + "is_active": True, |
268 | 292 | "approx_cost": 0.0, |
269 | 293 | "source": "Hugging Face Datasets", |
270 | 294 | "selected": False, |
|
276 | 300 | "dataset_name": "markush1/LLM-Jailbreak-Classifier", |
277 | 301 | "num_prompts": 1119, |
278 | 302 | "tokens": 19758, |
| 303 | + "is_active": True, |
279 | 304 | "approx_cost": 0.0, |
280 | 305 | "source": "Hugging Face Datasets", |
281 | 306 | "selected": False, |
|
287 | 312 | "dataset_name": "JailbreakV-28K/JailBreakV-28k", |
288 | 313 | "num_prompts": 28300, |
289 | 314 | "tokens": 1975800, |
| 315 | + "is_active": True, |
290 | 316 | "approx_cost": 0.0, |
291 | 317 | "source": "Hugging Face Datasets", |
292 | 318 | "selected": False, |
|
298 | 324 | "dataset_name": "ShawnMenz/jailbreak_sft_rm_ds", |
299 | 325 | "num_prompts": 371000, |
300 | 326 | "tokens": 1975800, |
| 327 | + "is_active": True, |
301 | 328 | "approx_cost": 0.0, |
302 | 329 | "source": "Hugging Face Datasets", |
303 | 330 | "selected": False, |
|
309 | 336 | "dataset_name": "Steganography", |
310 | 337 | "num_prompts": 10, |
311 | 338 | "tokens": 0, |
| 339 | + "is_active": True, |
312 | 340 | "approx_cost": 0.0, |
313 | 341 | "source": "Local mutation dataset", |
314 | 342 | "selected": False, |
|
320 | 348 | "dataset_name": "GPT fuzzer", |
321 | 349 | "num_prompts": 10, |
322 | 350 | "tokens": 0, |
| 351 | + "is_active": True, |
323 | 352 | "approx_cost": 0.0, |
324 | 353 | "source": "Local mutation dataset", |
325 | 354 | "selected": False, |
|
331 | 360 | "dataset_name": "jailbreak_llms/2023_05_07", |
332 | 361 | "num_prompts": 0, |
333 | 362 | "tokens": 0, |
| 363 | + "is_active": True, |
334 | 364 | "approx_cost": 0.0, |
335 | 365 | "source": "Github", |
336 | 366 | "selected": False, |
|
342 | 372 | "dataset_name": "jailbreak_llms/2023_12_25.csv", |
343 | 373 | "num_prompts": 0, |
344 | 374 | "tokens": 0, |
| 375 | + "is_active": True, |
345 | 376 | "approx_cost": 0.0, |
346 | 377 | "source": "Github", |
347 | 378 | "selected": False, |
|
353 | 384 | "dataset_name": "Malwaregen", |
354 | 385 | "num_prompts": 0, |
355 | 386 | "tokens": 0, |
| 387 | + "is_active": True, |
356 | 388 | "approx_cost": 0.0, |
357 | 389 | "source": "Local dataset", |
358 | 390 | "selected": False, |
|
364 | 396 | "dataset_name": "Hallucination", |
365 | 397 | "num_prompts": 0, |
366 | 398 | "tokens": 0, |
| 399 | + "is_active": True, |
367 | 400 | "approx_cost": 0.0, |
368 | 401 | "source": "Local dataset", |
369 | 402 | "selected": False, |
|
375 | 408 | "dataset_name": "DataLeak", |
376 | 409 | "num_prompts": 0, |
377 | 410 | "tokens": 0, |
| 411 | + "is_active": True, |
378 | 412 | "approx_cost": 0.0, |
379 | 413 | "source": "Local dataset", |
380 | 414 | "selected": False, |
|
386 | 420 | "dataset_name": "llm-adaptive-attacks", |
387 | 421 | "num_prompts": 0, |
388 | 422 | "tokens": 0, |
| 423 | + "is_active": True, |
389 | 424 | "approx_cost": 0.0, |
390 | 425 | "source": "Github: tml-epfl/llm-adaptive-attacks#0.0.1", |
391 | 426 | "selected": False, |
|
397 | 432 | "dataset_name": "Garak", |
398 | 433 | "num_prompts": 0, |
399 | 434 | "tokens": 0, |
| 435 | + "is_active": True, |
400 | 436 | "approx_cost": 0.0, |
401 | 437 | "source": "Github: https://github.com/leondz/garak#v0.9.0.1", |
402 | 438 | "selected": False, |
|
412 | 448 | "dataset_name": "Reinforcement Learning Optimization", |
413 | 449 | "num_prompts": 0, |
414 | 450 | "tokens": 0, |
| 451 | + "is_active": True, |
415 | 452 | "approx_cost": 0.0, |
416 | 453 | "source": "Cloud hosted model", |
417 | 454 | "selected": False, |
|
427 | 464 | "dataset_name": "InspectAI", |
428 | 465 | "num_prompts": 0, |
429 | 466 | "tokens": 0, |
| 467 | + "is_active": True, |
430 | 468 | "approx_cost": 0.0, |
431 | 469 | "source": "Github: https://github.com/UKGovernmentBEIS/inspect_ai", |
432 | 470 | "selected": False, |
|
439 | 477 | "num_prompts": len(load_local_csv().prompts), |
440 | 478 | "tokens": load_local_csv().tokens, |
441 | 479 | "approx_cost": 0.0, |
| 480 | + "is_active": True, |
442 | 481 | "source": f"Local file dataset: {load_local_csv().metadata['src']}", |
443 | 482 | "selected": len(load_local_csv().prompts), |
444 | 483 | "url": "", |
|
0 commit comments