{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"web-llm","owner":"mlc-ai","isFork":false,"description":"High-performance In-browser LLM Inference Engine ","allTopics":["deep-learning","language-model","webgpu","tvm","webml","llm","chatgpt"],"primaryLanguage":{"name":"TypeScript","color":"#3178c6"},"pullRequestCount":1,"issueCount":43,"starsCount":11120,"forksCount":692,"license":"Apache License 2.0","participation":[2,0,5,4,2,8,1,1,1,1,3,4,6,0,0,0,0,1,0,0,0,3,0,0,3,10,1,8,1,2,1,7,1,3,0,4,9,4,1,8,3,4,5,6,13,0,0,0,9,31,24,16],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T20:52:13.748Z"}},{"type":"Public","name":"web-llm-chat","owner":"mlc-ai","isFork":false,"description":"Chat with AI large language models running natively in your browser. Enjoy private, server-free, seamless AI conversations.","allTopics":["chat","privacy","ai","nextjs","chatbot","llama","hermes","chat-application","gemma","webgpu","mistral","phi2","large-language-models","llm","generative-ai","chatgpt","redpajama","qwen","tinyllama"],"primaryLanguage":{"name":"TypeScript","color":"#3178c6"},"pullRequestCount":5,"issueCount":5,"starsCount":70,"forksCount":16,"license":"Apache License 2.0","participation":[69,56,54,79,32,11,6,21,27,23,36,2,24,33,9,11,17,20,5,2,3,72,21,19,4,4,16,56,15,5,6,3,34,6,12,0,10,18,10,19,11,26,18,38,6,6,6,14,59,42,19,21],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T20:50:10.008Z"}}],"repositoryCount":2,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}