[{"data":1,"prerenderedAt":191},["ShallowReactive",2],{"\u002Fcn\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fparametric_memory":3,"surround-\u002Fcn\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fparametric_memory":176},{"id":4,"title":5,"avatar":6,"banner":6,"body":7,"category":6,"desc":6,"description":163,"extension":170,"links":6,"meta":171,"navigation":6,"path":172,"seo":173,"stem":174,"__hash__":175},"docs\u002Fcn\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fparametric_memory.md","参数记忆 (正在开发中)",null,{"type":8,"value":9,"toc":162},"minimark",[10,21,32,39,46,50,81,84,90,93,96,99,129,132,139,159],[11,12,13],"note",{},[14,15,16,20],"p",{},[17,18,19],"strong",{},"正在开发中","\n该功能仍在积极开发中，敬请期待更新！",[14,22,23,27,28,31],{},[24,25,26],"code",{},"参数记忆 (Parametric Memory)"," 是 MemOS 架构中核心的",[17,29,30],{},"长期知识与能力载体","。与明文记忆或激活记忆不同，参数化记忆将语言结构、世界知识以及通用推理能力的深度表征进行编码，直接嵌入在模型的权重之中。",[14,33,34,35,38],{},"在 MemOS 的架构设计中，参数化记忆不仅仅局限于静态的预训练权重，它还包含了",[17,36,37],{},"LoRA 适配器","和专家插件模块等模块化的权重组件。这使得你能够在无需重新训练整个模型的情况下，逐步扩展或定制 LLM 的能力。",[14,40,41,42,45],{},"例如，你可以将结构化或固定的知识提炼为参数形式，将其保存为独立的",[17,43,44],{},"能力模块 (Capability Blocks)","，并在推理过程中动态加载或卸载。这使得为法律推理、财务分析或特定领域摘要等任务创建“专家子模型”变得轻而易举——而这一切都由 MemOS 统一管理。",[47,48,49],"h2",{"id":49},"设计目标",[51,52,54],"list",{"icon":53},"ph:check-circle-duotone",[55,56,57,64,70],"ul",{},[58,59,60,63],"li",{},[17,61,62],{},"可控性"," — 支持按需生成、加载、切换或组合参数模块。",[58,65,66,69],{},[17,67,68],{},"可塑性"," — 与明文记忆和激活记忆协同演进；支持知识的提炼与回滚。",[58,71,72,75,76,80],{},[17,73,74],{},"可追溯性"," ",[77,78,79],"em",{},"(开发中)"," — 提供参数模块的版本控制与管理功能。",[47,82,83],{"id":83},"当前状态",[14,85,86,89],{},[24,87,88],{},"参数化记忆 (Parametric Memory)"," 目前仍处于设计和原型验证阶段。我们计划在未来的版本中发布用于生成、压缩以及热插拔参数模块的 API，旨在更好地支持多任务、多角色及多智能体架构。",[14,91,92],{},"请持续关注我们的更新！",[47,94,95],{"id":95},"相关模块",[14,97,98],{},"虽然参数记忆正在开发中，但今天你已经可以尝试这些:",[55,100,101,111,120],{},[58,102,103,110],{},[17,104,105],{},[106,107,109],"a",{"href":108},"\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fgeneral_textual_memory","GeneralTextMemory",": 基于向量的灵活语义存储",[58,112,113,119],{},[17,114,115],{},[106,116,118],{"href":117},"\u002Fopen_source\u002Fmodules\u002Fmemories\u002Ftree_textual_memory","TreeTextMemory",": 结构化、层次化和知识图谱",[58,121,122,128],{},[17,123,124],{},[106,125,127],{"href":126},"\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fkv_cache_memory","Activation Memory",": 高效的运行时状态缓存",[47,130,131],{"id":131},"开发者注意事项",[14,133,134,135,138],{},"参数化记忆将补全 MemOS 关于统一 ",[17,136,137],{},"Memory³"," 架构的愿景：",[55,140,141,147,153],{},[58,142,143,146],{},[17,144,145],{},"参数化记忆",": 内化与嵌入的隐式知识",[58,148,149,152],{},[17,150,151],{},"激活记忆",": 短暂的运行时状态",[58,154,155,158],{},[17,156,157],{},"明文记忆",": 结构化、可追溯的显式外部记忆",[14,160,161],{},"三者有机结合，将构建出一个适应性强、可持续进化且具备可解释性的智能系统。",{"title":163,"searchDepth":164,"depth":164,"links":165},"",2,[166,167,168,169],{"id":49,"depth":164,"text":49},{"id":83,"depth":164,"text":83},{"id":95,"depth":164,"text":95},{"id":131,"depth":164,"text":131},"md",{},"\u002Fcn\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fparametric_memory",{"title":5,"description":163},"cn\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fparametric_memory","5gAM_SR36KD_T4KfaeUoQAQjhcieyy_PLusIlOVX6sA",[177,185],{"title":178,"path":179,"stem":180,"icon":181,"framework":6,"module":6,"class":182,"target":-1,"active":183,"defaultOpen":183,"children":-1,"description":184},"KVCache 记忆","\u002Fcn\u002Fopen_source\u002Fmodules\u002Fmemories\u002Fkv_cache_memory","open_source\u002Fmodules\u002Fmemories\u002Fkv_cache_memory","i-ri-database-2-line",[],false,"KVCacheMemory 是MemOS中用于存储和管理KV cache的专用记忆模块，主要用于加速大语言模型（LLMs）推理并支持有效的上下文复用。作为激活记忆，它有助于提升会话式和生成式人工智能系统的性能。",{"title":186,"path":187,"stem":188,"icon":189,"framework":6,"module":6,"class":190,"target":-1,"active":183,"defaultOpen":183,"children":-1,"description":-1},"性能调优","\u002Fcn\u002Fopen_source\u002Fbest_practice\u002Fperformance_tuning","open_source\u002Fbest_practice\u002Fperformance_tuning","i-ri-speed-line",[],1774339752586]