|
4 | 4 | name: 'OpenMOSS', |
5 | 5 | tagline: '开放、可信赖的基础模型研究' |
6 | 6 | }, |
| 7 | + highlights: [ |
| 8 | + { |
| 9 | + title: { zh: 'MOSS-Speech: 真语音到语音生成', en: 'MOSS-Speech: True Speech-to-Speech Generation' }, |
| 10 | + desc: { zh: '原生端到端语音交互,无需任何中间文本引导', en: 'Native end-to-end speech interaction without any intermediate text guidance' }, |
| 11 | + date: '2025.10.1', |
| 12 | + url: 'https://arxiv.org/abs/2510.00499' |
| 13 | + }, |
| 14 | + { |
| 15 | + title: { zh: 'XY-Tokenizer: 低码率声学语义统一编码', en: 'XY-Tokenizer: Low-Bitrate Unified Acoustic-Semantic Encoding' }, |
| 16 | + desc: { zh: '1kbps最强声学语义统一编码及离散化工具', en: 'State-of-the-art 1kbps unified acoustic-semantic encoding and discretization tool' }, |
| 17 | + date: '2025.6.28', |
| 18 | + url: 'https://arxiv.org/abs/2506.23325' |
| 19 | + }, |
| 20 | + { |
| 21 | + title: { zh: 'MOSS-TTSD: 文本到对话语音生成', en: 'MOSS-TTSD: Text-to-Spoken Dialogue Generation' }, |
| 22 | + desc: { zh: '开源对话语音生成模型,高表现力,多说话人,超长语音生成', en: 'Open-source dialogue speech generation model with high expressiveness, multi-speaker, and long-form speech generation' }, |
| 23 | + date: '2025.6.20', |
| 24 | + url: 'https://www.open-moss.com/en/moss-ttsd/' |
| 25 | + } |
| 26 | + ], |
| 27 | + courses: [ |
| 28 | + { titleKey: 'resources.course.prml', descKey: 'resources.course.prml.desc', url: 'https://mooc1.chaoxing.com/course/224348208.html', labelKey: 'resources.btn.course' }, |
| 29 | + { titleKey: 'resources.course.exercises', descKey: 'resources.course.exercises.desc', url: 'https://fudan-nlp.feishu.cn/wiki/WFifwXxfQiI3PKkn9FEcy0wKnjh', labelKey: 'resources.btn.exercise' }, |
| 30 | + { titleKey: 'resources.course.community', descKey: 'resources.course.community.desc', url: 'https://github.com/WillQvQ/SummerQuest-2025', labelKey: 'resources.btn.summer' } |
| 31 | + ], |
| 32 | + projects: [ |
| 33 | + { name: 'MOSS', descKey: 'resources.project.moss', stars: '12k+ ⭐', stack: 'Python', url: 'https://github.com/OpenMOSS/MOSS' }, |
| 34 | + { name: 'AnyGPT', descKey: 'resources.project.anygpt', stars: '500+ ⭐', stack: 'Python', url: 'https://github.com/OpenMOSS/AnyGPT' }, |
| 35 | + { name: 'MOSS-TTSD', descKey: 'resources.project.ttsd', stars: '200+ ⭐', stack: 'Python', url: 'https://github.com/OpenMOSS/MOSS-TTSD' }, |
| 36 | + { name: 'SpeechGPT-2.0', descKey: 'resources.project.speechgpt', stars: '360+ ⭐', stack: 'Python', url: 'https://github.com/OpenMOSS/SpeechGPT-2.0-preview' }, |
| 37 | + { name: 'DiRL', descKey: 'resources.project.dirl', stars: '100+ ⭐', stack: 'Python', url: 'https://github.com/OpenMOSS/DiRL' }, |
| 38 | + { name: 'Language-Model-SAEs', descKey: 'resources.project.saes', stars: '164+ ⭐', stack: 'Python', url: 'https://github.com/OpenMOSS/Language-Model-SAEs' } |
| 39 | + ], |
| 40 | + positionCards: [ |
| 41 | + { id: 'graduate', titleKey: 'positions.card.phd', descKey: 'positions.card.phd.desc' }, |
| 42 | + { id: 'graduate', titleKey: 'positions.card.master', descKey: 'positions.card.master.desc' }, |
| 43 | + { id: 'intern', titleKey: 'positions.card.intern', descKey: 'positions.card.intern.desc' }, |
| 44 | + { id: 'postdoc', titleKey: 'positions.card.postdoc', descKey: 'positions.card.postdoc.desc' }, |
| 45 | + { id: 'engineer', titleKey: 'positions.card.engineer', descKey: 'positions.card.engineer.desc' }, |
| 46 | + { id: 'visiting', titleKey: 'positions.card.visiting', descKey: 'positions.card.visiting.desc' } |
| 47 | + ], |
| 48 | + whyUs: [ |
| 49 | + { icon: '✨', titleKey: 'positions.why.research', descKey: 'positions.why.research.desc' }, |
| 50 | + { icon: '🚀', titleKey: 'positions.why.resources', descKey: 'positions.why.resources.desc' }, |
| 51 | + { icon: '👥', titleKey: 'positions.why.team', descKey: 'positions.why.team.desc' }, |
| 52 | + { icon: '💡', titleKey: 'positions.why.opensource', descKey: 'positions.why.opensource.desc' }, |
| 53 | + { icon: '🌏', titleKey: 'positions.why.collaboration', descKey: 'positions.why.collaboration.desc' }, |
| 54 | + { icon: '📈', titleKey: 'positions.why.career', descKey: 'positions.why.career.desc' } |
| 55 | + ], |
7 | 56 | positions: { |
8 | 57 | applyUrl: 'https://fudannlp.feishu.cn/share/base/form/shrcn29UYq1MCpTH0GBZh3AWPPg', |
9 | 58 | // 职位详情(双语支持) |
10 | 59 | details: [ |
11 | 60 | { |
12 | | - id: 'phd', |
13 | | - title: { zh: '博士研究生', en: 'PhD Students' }, |
| 61 | + id: 'graduate', |
| 62 | + title: { zh: '博士/硕士研究生', en: 'PhD/Master\'s Students' }, |
14 | 63 | blocks: [ |
15 | 64 | { |
16 | 65 | subtitle: { zh: '招收对象', en: 'Target Candidates' }, |
|
47 | 96 | '(3)招生途径:夏令营、秋季保研、考研面试均需通过学院初审后联系,我们会安排组内面试。', |
48 | 97 | '(4)本组面试:重视科研潜质、工程能力和团队协作。高质量完成 nlp-beginner 的学生优先。', |
49 | 98 | '(5)提前进组:希望确定来本组的同学提前进组学习,表现优秀可推荐字节、华为等合作单位实习。', |
50 | | - '(6)研究生待遇:在学校补贴基础上提供有竞争力的补助,并对专硕给予一定租房补贴。' |
| 99 | + '(6)研究生待遇:在学校补贴基础上提供有竞争力的补助,并对专硕给予一定住房补贴。' |
51 | 100 | ], |
52 | 101 | en: [ |
53 | 102 | '(1) Master\'s positions: Subject to research funding, approximately 1 academic master, 1-2 professional masters (recommendation), and ~10 professional masters (entrance exam) per year. We do not distinguish between academic and professional masters in research, and all are eligible for PhD conversion.', |
|
62 | 111 | ] |
63 | 112 | }, |
64 | 113 | { |
65 | | - id: 'master', |
66 | | - title: { zh: '硕士研究生', en: 'Master\'s Students' }, |
| 114 | + id: 'intern', |
| 115 | + title: { zh: '实习生', en: 'Interns' }, |
67 | 116 | blocks: [ |
68 | 117 | { |
69 | | - subtitle: { zh: '说明', en: 'Note' }, |
| 118 | + subtitle: { zh: '职位介绍', en: 'Position Description' }, |
70 | 119 | paragraphs: { |
71 | | - zh: ['硕士研究生的招生要求与说明请参考上方"博士研究生"部分。'], |
72 | | - en: ['Please refer to the "PhD Students" section above for master\'s admission requirements and information.'] |
| 120 | + zh: ['请通过邮件([email protected])或 <a href="https://fudannlp.feishu.cn/share/base/form/shrcn29UYq1MCpTH0GBZh3AWPPg" target="_blank" style="color: var(--fudan-blue); text-decoration: underline;">问卷</a> 进行投递,对于非本地学生有 2000/月的住房补贴。'], |
| 121 | + en: ['Please apply via email ([email protected]) or <a href="https://fudannlp.feishu.cn/share/base/form/shrcn29UYq1MCpTH0GBZh3AWPPg" target="_blank" style="color: var(--fudan-blue); text-decoration: underline;">application form</a>. Non-local students receive a housing allowance of 2000 CNY/month.'] |
73 | 122 | } |
74 | 123 | } |
75 | 124 | ] |
|
132 | 181 | } |
133 | 182 | ] |
134 | 183 | }, |
135 | | - { |
136 | | - id: 'visiting', |
137 | | - title: { zh: '访问学者', en: 'Visiting Scholars' }, |
138 | | - blocks: [ |
139 | | - { |
140 | | - subtitle: { zh: '说明', en: 'Note' }, |
141 | | - paragraphs: { |
142 | | - zh: ['访问学者详细内容持续更新中,如有兴趣请邮件咨询。'], |
143 | | - en: ['Details for visiting scholars are being updated. Please contact us via email if interested.'] |
144 | | - } |
145 | | - } |
146 | | - ] |
147 | | - }, |
148 | 184 | { |
149 | 185 | id: 'engineer', |
150 | 186 | title: { zh: '研究工程师', en: 'Research Engineers' }, |
151 | 187 | blocks: [ |
152 | 188 | { |
153 | 189 | subtitle: { zh: '关于岗位', en: 'About the Position' }, |
154 | 190 | paragraphs: { |
155 | | - zh: ['复旦大学自然语言实验室因科研工作需要,长期招聘科研工程助理,待遇面议。'], |
156 | | - en: ['Fudan NLP Lab is recruiting research engineering assistants for long-term positions. Compensation is negotiable.'] |
| 191 | + zh: ['OpenMOSS 团队因科研工作需要,长期招聘科研工程助理,待遇面议。'], |
| 192 | + en: ['OpenMOSS Team is recruiting research engineering assistants for long-term positions. Compensation is negotiable.'] |
157 | 193 | } |
158 | 194 | }, |
159 | 195 | { |
160 | 196 | subtitle: { zh: '招聘说明', en: 'Job Description' }, |
161 | 197 | paragraphs: { |
162 | 198 | zh: [ |
163 | | - '参与实验室的 LLM 工程开发项目。', |
| 199 | + '参与实验室的大语言模型工程开发项目。', |
164 | 200 | '具备良好的工程经验,熟练掌握 Python,熟悉 PyTorch 并有 NLP 项目经验者优先。', |
165 | 201 | '具有专研精神,工作踏实认真。' |
166 | 202 | ], |
|
187 | 223 | ] |
188 | 224 | }, |
189 | 225 | { |
190 | | - id: 'intern', |
191 | | - title: { zh: '实习生', en: 'Interns' }, |
| 226 | + id: 'visiting', |
| 227 | + title: { zh: '访问学者', en: 'Visiting Scholars' }, |
192 | 228 | blocks: [ |
193 | 229 | { |
194 | | - subtitle: { zh: '职位介绍', en: 'Position Description' }, |
| 230 | + subtitle: { zh: '说明', en: 'Note' }, |
195 | 231 | paragraphs: { |
196 | | - zh: ['实习生岗位内容将于近期补充,欢迎提前投递意向。'], |
197 | | - en: ['Internship details will be updated soon. Early applications are welcome.'] |
| 232 | + zh: ['请邮件 [email protected] 咨询。'], |
| 233 | + en: ['Please contact [email protected] for inquiries.'] |
198 | 234 | } |
199 | 235 | } |
200 | 236 | ] |
|
223 | 259 | year: '2025', |
224 | 260 | support: true, |
225 | 261 | alphabetical: true, |
226 | | - links: [{ type: 'GitHub', url: 'https://github.com/OpenMOSS/SpeechGPT-2.0-preview' }] |
| 262 | + links: [ |
| 263 | + { type: 'GitHub', url: 'https://github.com/OpenMOSS/SpeechGPT-2.0-preview' }, |
| 264 | + { type: 'Blog', url: 'https://www.open-moss.com/en/speechgpt2-preview/' } |
| 265 | + ] |
227 | 266 | }, |
228 | 267 | { |
229 | 268 | title: 'MOSS-TTSD: Zero-Shot Multi-Speaker Dialogue Speech Synthesis', |
|
232 | 271 | year: '2025', |
233 | 272 | support: true, |
234 | 273 | alphabetical: true, |
235 | | - links: [{ type: 'GitHub', url: 'https://github.com/OpenMOSS/MOSS-TTSD' }] |
| 274 | + links: [ |
| 275 | + { type: 'GitHub', url: 'https://github.com/OpenMOSS/MOSS-TTSD' }, |
| 276 | + { type: 'Blog', url: 'https://www.open-moss.com/en/moss-ttsd/' } |
| 277 | + ] |
236 | 278 | }, |
237 | 279 | { |
238 | 280 | title: 'MOSS-Speech: Towards True Speech-to-Speech Models Without Text Guidance', |
|
258 | 300 | venue: '', |
259 | 301 | year: '2025', |
260 | 302 | alphabetical: true, |
261 | | - links: [{ type: 'GitHub', url: 'https://github.com/OpenMOSS/SpeechGPT-2.0-preview' }] |
| 303 | + links: [ |
| 304 | + { type: 'GitHub', url: 'https://github.com/OpenMOSS/SpeechGPT-2.0-preview' }, |
| 305 | + { type: 'Blog', url: 'https://www.open-moss.com/en/speechgpt2-preview/' } |
| 306 | + ] |
262 | 307 | }, |
263 | 308 | { |
264 | 309 | title: 'MOSS-TTSD: Zero-Shot Multi-Speaker Dialogue Speech Synthesis', |
265 | 310 | authors: 'Cheng Chang, Ke Chen, Mingshu Chen, Qinyuan Cheng, Ruifan Deng, Liwei Fan, Zhaoye Fei, Qinghui Gao, Yitian Gong, Kexin Huang, Botian Jiang, Yaozhou Jiang, Luozhijie Jin, Ruixiao Li, Shimin Li, Zhengyuan Lin, Xipeng Qiu, Qian Tu, Jin Wang, Ruiming Wang, Wenxuan Wang, Yang Wang, Chenchen Yang, Zhe Xu, Yucheng Yuan, Donghua Yu, Jun Zhan, Dong Zhang, Wenbo Zhang, Xin Zhang, Yuqian Zhang, Yiwei Zhao, Xingjian Zhao', |
266 | 311 | venue: '', |
267 | 312 | year: '2025', |
268 | 313 | alphabetical: true, |
269 | | - links: [{ type: 'GitHub', url: 'https://github.com/OpenMOSS/MOSS-TTSD' }] |
| 314 | + links: [ |
| 315 | + { type: 'GitHub', url: 'https://github.com/OpenMOSS/MOSS-TTSD' }, |
| 316 | + { type: 'Blog', url: 'https://www.open-moss.com/en/moss-ttsd/' } |
| 317 | + ] |
270 | 318 | }, |
271 | 319 | { |
272 | 320 | title: 'MOSS-Speech: Towards True Speech-to-Speech Models Without Text Guidance', |
|
0 commit comments