Spaces:
Running
Running
File size: 8,615 Bytes
343b03a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
document.addEventListener('DOMContentLoaded', () => {
const questions = [
{
question: "What is the primary goal of Intern-S1 as described in the abstract?",
options: [
"To create a general-purpose language model for everyday tasks",
"To develop a specialized multimodal model for scientific research with expertise in analyzing multiple science modalities",
"To compete with closed-source models only in mathematical problem-solving",
"To build a vision-language model focused on natural images"
],
correct: 1
},
{
question: "How many activated parameters and total parameters does Intern-S1 have?",
options: [
"14 billion activated, 120 billion total",
"28 billion activated, 241 billion total",
"56 billion activated, 482 billion total",
"10 billion activated, 80 billion total"
],
correct: 1
},
{
question: "What percentage of the 5T training tokens came from scientific domains?",
options: [
"25%",
"50%",
"75%",
"Over 50% (specifically >2.5T tokens)"
],
correct: 3
},
{
question: "What innovative technique did Intern-S1 use during its post-training reinforcement learning phase?",
options: [
"Mixture-of-Losses (MoL)",
"Mixture-of-Rewards (MoR)",
"Dynamic Reward Shaping (DRS)",
"Multi-task Gradient Aggregation (MGA)"
],
correct: 1
},
{
question: "Which of the following scientific tasks does the paper claim Intern-S1 surpasses closed-source state-of-the-art models in?",
options: [
"Image captioning and object detection",
"Molecular synthesis planning, reaction condition prediction, predicting thermodynamic stabilities for crystals",
"Real-time video translation and voice cloning",
"Web navigation and e-commerce recommendation"
],
correct: 1
},
{
question: "According to the introduction, why is scientific research considered important for AGI development?",
options: [
"Because it requires large amounts of labeled data",
"Because it drives fundamental breakthroughs in human society and demands rigorous reasoning across diverse scientific modalities",
"Because it relies heavily on social media data",
"Because it can be solved with simple rule-based systems"
],
correct: 1
},
{
question: "What type of model is Intern-S1 described as?",
options: [
"A unimodal transformer model",
"A multimodal Mixture-of-Experts (MoE) model",
"A convolutional neural network for image classification",
"A recurrent neural network for time-series analysis"
],
correct: 1
},
{
question: "What is the key limitation of current open-source models in scientific domains, according to the paper?",
options: [
"They are too slow to train",
"They lack sufficient computational resources",
"Their progress lags significantly behind popular domains like math and code, and there's a substantial gap compared to closed-source models",
"They don't support English"
],
correct: 2
},
{
question: "Which of these benchmarks is mentioned as part of the scientific evaluation suite for Intern-S1?",
options: [
"COCO and ImageNet",
"SmolInstruct, ChemBench, MatBench, SFE, Physics",
"GLUE and SuperGLUE",
"MNIST and CIFAR-10"
],
correct: 1
},
{
question: "Where can the Intern-S1 models be accessed?",
options: [
"GitHub.com/intern-s1",
"Hugging Face at https://huggingface.co/internlm/Intern-S1",
"OpenAI's model zoo",
"Google AI Hub"
],
correct: 1
}
];
const questionDisplay = document.getElementById('question-display');
const prevBtn = document.getElementById('prev-btn');
const nextBtn = document.getElementById('next-btn');
const submitBtn = document.getElementById('submit-btn');
const resultsSection = document.getElementById('results');
const scoreDisplay = document.getElementById('score-display');
const feedbackDiv = document.getElementById('feedback');
const restartBtn = document.getElementById('restart-btn');
let currentQuestion = 0;
let userAnswers = new Array(questions.length).fill(null);
let quizCompleted = false;
function renderQuestion() {
const q = questions[currentQuestion];
questionDisplay.innerHTML = `
<div class="question">
<h3>Question ${currentQuestion + 1} of ${questions.length}</h3>
<p>${q.question}</p>
<div class="options">
${q.options.map((option, index) => `
<label class="option">
<input type="radio" name="answer" value="${index}" ${userAnswers[currentQuestion] === index ? 'checked' : ''}>
${option}
</label>
`).join('')}
</div>
</div>
`;
// Update button states
prevBtn.disabled = currentQuestion === 0;
nextBtn.textContent = currentQuestion === questions.length - 1 ? 'Submit' : 'Next';
submitBtn.style.display = 'none';
// Add event listeners for answer selection
const radios = document.querySelectorAll('input[name="answer"]');
radios.forEach(radio => {
radio.addEventListener('change', () => {
userAnswers[currentQuestion] = parseInt(radio.value);
});
});
}
function showResults() {
const correctCount = userAnswers.filter((answer, index) => answer === questions[index].correct).length;
const scorePercentage = Math.round((correctCount / questions.length) * 100);
scoreDisplay.innerHTML = `You scored ${correctCount} out of ${questions.length} (${scorePercentage}%)`;
let feedbackHTML = '<h3>Feedback:</h3>';
userAnswers.forEach((answer, index) => {
if (answer === null) {
feedbackHTML += `<p><strong>Q${index + 1}</strong>: Not answered</p>`;
} else if (answer === questions[index].correct) {
feedbackHTML += `<p><strong>Q${index + 1}</strong>: β
Correct!</p>`;
} else {
feedbackHTML += `<p><strong>Q${index + 1}</strong>: β Incorrect. The correct answer was: "${questions[index].options[questions[index].correct]}"</p>`;
}
});
feedbackDiv.innerHTML = feedbackHTML;
resultsSection.style.display = 'block';
questionDisplay.style.display = 'none';
document.getElementById('controls').style.display = 'none';
}
nextBtn.addEventListener('click', () => {
const selectedAnswer = document.querySelector('input[name="answer"]:checked');
if (!selectedAnswer && !quizCompleted) {
alert('Please select an answer before continuing.');
return;
}
if (currentQuestion === questions.length - 1) {
quizCompleted = true;
showResults();
} else {
currentQuestion++;
renderQuestion();
}
});
prevBtn.addEventListener('click', () => {
if (currentQuestion > 0) {
currentQuestion--;
renderQuestion();
}
});
restartBtn.addEventListener('click', () => {
currentQuestion = 0;
userAnswers = new Array(questions.length).fill(null);
quizCompleted = false;
resultsSection.style.display = 'none';
questionDisplay.style.display = 'block';
document.getElementById('controls').style.display = 'flex';
renderQuestion();
});
// Initialize
renderQuestion();
}); |