File size: 4,663 Bytes
3e9e9c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
// Script to export trained model for Hugging Face
import fs from 'fs';
import path from 'path';

// Create model export directory
const exportDir = './model-export';
if (!fs.existsSync(exportDir)) {
  fs.mkdirSync(exportDir);
}

// Create model card (README.md for the model repo)
const modelCard = `---
license: mit
base_model: Xenova/clap-htsat-unfused
tags:
- audio-classification
- transformers.js
- clap
- audio-tagging
library_name: transformers.js
---

# clip-tagger Model

This is a personalized audio tagging model based on CLAP (Contrastive Language-Audio Pre-training). It extends the base Xenova/clap-htsat-unfused model with user feedback and custom tags.

## Model Description

- **Base Model**: [Xenova/clap-htsat-unfused](https://huggingface.co/Xenova/clap-htsat-unfused)
- **Framework**: Transformers.js compatible
- **Training**: User feedback and custom tag integration
- **Use Case**: Personalized audio content tagging

## Usage

\`\`\`javascript
import { CLAPProcessor } from './clapProcessor.js';
import { LocalClassifier } from './localClassifier.js';

// Load the model
const processor = new CLAPProcessor();
const classifier = new LocalClassifier();
classifier.loadModel(); // Loads from localStorage or model files

// Process audio
const tags = await processor.processAudio(audioBuffer);
const personalizedTags = classifier.predictAll(features, candidateTags);
\`\`\`

## Files

- \`localClassifier.js\` - Local classifier implementation
- \`clapProcessor.js\` - CLAP model wrapper
- \`userFeedbackStore.js\` - User feedback storage system
- \`model-config.json\` - Model configuration
- \`example-usage.html\` - Usage example

## Demo

Try the live demo: [clip-tagger Space](https://huggingface.co/spaces/sohei1l/clip-tagger)

## Training Data

This model learns from user corrections and custom tags. The base CLAP model provides initial audio understanding, while the local classifier adapts to user preferences.
`;

fs.writeFileSync(path.join(exportDir, 'README.md'), modelCard);

// Create model configuration
const modelConfig = {
  "model_type": "clip-tagger",
  "base_model": "Xenova/clap-htsat-unfused",
  "version": "1.0.0",
  "framework": "transformers.js",
  "feature_dim": 512,
  "learning_rate": 0.01,
  "supported_formats": ["wav", "mp3", "m4a", "ogg"],
  "default_labels": [
    "speech", "music", "singing", "guitar", "piano", "drums", "violin",
    "trumpet", "saxophone", "flute", "classical music", "rock music",
    "pop music", "jazz", "electronic music", "ambient", "nature sounds",
    "rain", "wind", "ocean waves", "birds chirping", "dog barking",
    "cat meowing", "car engine", "traffic", "footsteps", "door closing",
    "applause", "laughter", "crying", "coughing", "sneezing",
    "telephone ringing", "alarm clock", "typing", "water running",
    "fire crackling", "thunder", "helicopter", "airplane", "train",
    "motorcycle", "bell ringing", "whistle", "horn", "siren",
    "explosion", "gunshot", "silence", "noise", "distortion"
  ]
};

fs.writeFileSync(path.join(exportDir, 'model-config.json'), JSON.stringify(modelConfig, null, 2));

// Copy the main model files
const filesToCopy = [
  'src/clapProcessor.js',
  'src/localClassifier.js', 
  'src/userFeedbackStore.js'
];

filesToCopy.forEach(file => {
  if (fs.existsSync(file)) {
    const fileName = path.basename(file);
    fs.copyFileSync(file, path.join(exportDir, fileName));
  }
});

// Create example usage file
const exampleUsage = `<!DOCTYPE html>
<html>
<head>
    <title>clip-tagger Model Usage Example</title>
    <script type="module">
        import { CLAPProcessor } from './clapProcessor.js';
        import { LocalClassifier } from './localClassifier.js';
        
        async function loadModel() {
            const processor = new CLAPProcessor();
            const classifier = new LocalClassifier();
            
            // Initialize
            await processor.initialize();
            classifier.loadModel();
            
            console.log('Model loaded successfully!');
            console.log('Model stats:', classifier.getModelStats());
        }
        
        // Load when page loads
        loadModel();
    </script>
</head>
<body>
    <h1>clip-tagger Model</h1>
    <p>Check the browser console for model loading status.</p>
    <p>See the full demo at: <a href="https://huggingface.co/spaces/sohei1l/clip-tagger">clip-tagger Space</a></p>
</body>
</html>`;

fs.writeFileSync(path.join(exportDir, 'example-usage.html'), exampleUsage);

console.log('Model export created in:', exportDir);
console.log('Files exported:');
fs.readdirSync(exportDir).forEach(file => {
  console.log('-', file);
});