Make scripts better
This commit is contained in:
parent
ba56147e95
commit
e8ad7df469
12 changed files with 614 additions and 432 deletions
|
|
@ -2,7 +2,7 @@
|
|||
# Concatenate multiple WAV/audio files into a single mp3 using ffmpeg concat demuxer
|
||||
#
|
||||
# Usage: ./concat_wav.sh <output.mp3> <input1.WAV> <input2.WAV> ...
|
||||
# Example: ./concat_wav.sh transcription/saramonic.mp3 20260325-091912.WAV 20260325-095007.WAV
|
||||
# Example: ./concat_wav.sh transcription/saramonic.mp3 20260325-091912.WAV 20260325-095007.WAV 20260325-102102.WAV
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
|
|
@ -14,6 +14,7 @@ fi
|
|||
OUTPUT="$1"
|
||||
shift
|
||||
|
||||
# Build concat list file
|
||||
LISTFILE=$(mktemp /tmp/ffmpeg_concat_XXXXXX.txt)
|
||||
trap "rm -f '$LISTFILE'" EXIT
|
||||
|
||||
|
|
@ -23,8 +24,10 @@ for f in "$@"; do
|
|||
done
|
||||
|
||||
echo "Concatenating $# files -> $OUTPUT"
|
||||
cat "$LISTFILE"
|
||||
|
||||
ffmpeg -y -f concat -safe 0 -i "$LISTFILE" -ac 1 -ar 16000 -b:a 64k "$OUTPUT" 2>/dev/null
|
||||
|
||||
DUR=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$OUTPUT" | cut -d. -f1)
|
||||
SIZE=$(du -h "$OUTPUT" | cut -f1)
|
||||
echo "Done: ${DUR}s ($(( DUR / 60 ))m$(( DUR % 60 ))s), $SIZE"
|
||||
echo "Done: ${DUR}s ($(( DUR / 60 ))m$(( DUR % 60 ))s), $SIZE"
|
||||
|
|
|
|||
|
|
@ -1,45 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Генерация PDF из Markdown через weasyprint.
|
||||
|
||||
УСТАНОВКА: pip install weasyprint
|
||||
ИСПОЛЬЗОВАНИЕ: python3 generate_pdf.py report.md report.pdf
|
||||
"""
|
||||
|
||||
import sys
|
||||
from weasyprint import HTML
|
||||
|
||||
def markdown_to_pdf(md_path, pdf_path):
|
||||
"""Конвертирует Markdown в PDF через HTML."""
|
||||
with open(md_path, 'r', encoding='utf-8') as f:
|
||||
md_content = f.read()
|
||||
|
||||
# Simple MD to HTML conversion
|
||||
html_content = f"""
|
||||
<html lang="ru">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<style>
|
||||
@page {{ margin: 2cm; }}
|
||||
body {{ font-family: sans-serif; line-height: 1.6; }}
|
||||
h1 {{ color: #2c3e50; border-bottom: 2px solid #3498db; }}
|
||||
h2 {{ color: #34495e; border-bottom: 1px solid #bdc3c7; }}
|
||||
table {{ border-collapse: collapse; width: 100%; }}
|
||||
th, td {{ border: 1px solid #ddd; padding: 8px; }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
{md_content.replace('## ', '<h2>').replace('# ', '<h1>')}
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
HTML(string=html_content).write_pdf(pdf_path)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: python3 generate_pdf.py <report.md> <output.pdf>")
|
||||
sys.exit(1)
|
||||
|
||||
markdown_to_pdf(sys.argv[1], sys.argv[2])
|
||||
print(f"PDF created: {sys.argv[2]}")
|
||||
|
|
@ -1,60 +1,73 @@
|
|||
#!/bin/bash
|
||||
# generate_report.sh — Full pipeline for generating meeting report (without diagrams)
|
||||
# Usage: ./generate_report.sh /absolute/path/to/meeting_folder
|
||||
# Example: ./generate_report.sh /app/hermes_data/meetings/2026-04-15
|
||||
# generate_report.sh — Simplified pipeline: transcription + merge + PDF
|
||||
# Usage: ./generate_report.sh <meeting-date-dir>
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# Load .env if exists (for hotwords etc.)
|
||||
if [ -f "$SCRIPT_DIR/.env" ]; then
|
||||
set -a
|
||||
source "$SCRIPT_DIR/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "Usage: $0 <absolute_path_to_meeting_folder>"
|
||||
echo "Example: $0 /app/hermes_data/meetings/2026-04-15"
|
||||
echo "Usage: $0 <meeting-date-dir>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MEETING_DIR="$1"
|
||||
|
||||
# If relative path provided, convert to absolute
|
||||
if [[ "$MEETING_DIR" != /* ]]; then
|
||||
MEETING_DIR="$SCRIPT_DIR/$MEETING_DIR"
|
||||
fi
|
||||
|
||||
# Resolve absolute path
|
||||
MEETING_DIR="$(realpath "$MEETING_DIR")"
|
||||
MEETING_DIR="$SCRIPT_DIR/$1"
|
||||
TRANSCRIPTION_DIR="$MEETING_DIR/transcription"
|
||||
DIAGRAMS_DIR="$MEETING_DIR/diagrams"
|
||||
|
||||
if [ ! -d "$MEETING_DIR" ]; then
|
||||
echo "Error: Meeting directory not found: $MEETING_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ============================================================
|
||||
# ------------------------------------------------------------
|
||||
# Step 1: Transcription (skip if already done)
|
||||
# ============================================================
|
||||
if [ -d "$MEETING_DIR/transcription" ] && [ -f "$MEETING_DIR/transcription/plain_text.txt" ]; then
|
||||
echo "[1/2] Transcription already exists, skipping."
|
||||
# ------------------------------------------------------------
|
||||
if [ -d "$TRANSCRIPTION_DIR" ] && [ -f "$TRANSCRIPTION_DIR/plain_text.txt" ]; then
|
||||
echo "[1/3] Transcription already exists, skipping."
|
||||
else
|
||||
echo "[1/2] Running transcription..."
|
||||
echo "[1/3] Running transcription..."
|
||||
bash "$SCRIPT_DIR/transcribe.sh" "$MEETING_DIR"
|
||||
fi
|
||||
|
||||
# ============================================================
|
||||
# Step 2: Generate PDF from markdown
|
||||
# ============================================================
|
||||
echo "[2/2] Generating PDF..."
|
||||
# ------------------------------------------------------------
|
||||
# Step 2: Merge transcriptions (if both saramonic and h2n exist)
|
||||
# ------------------------------------------------------------
|
||||
echo "[2/3] Merging transcriptions where possible..."
|
||||
if [ -f "$TRANSCRIPTION_DIR/saramonic.json" ] && [ -f "$TRANSCRIPTION_DIR/h2n_xy.json" ]; then
|
||||
echo " Merging saramonic (primary) + h2n_xy (secondary) → merged.json"
|
||||
python3 "$SCRIPT_DIR/merge_transcriptions.py" \
|
||||
"$TRANSCRIPTION_DIR/saramonic.json" \
|
||||
"$TRANSCRIPTION_DIR/h2n_xy.json" \
|
||||
"$MEETING_DIR"
|
||||
elif [ -f "$TRANSCRIPTION_DIR/saramonic.json" ] && [ -f "$TRANSCRIPTION_DIR/h2n_ms.json" ]; then
|
||||
echo " Merging saramonic (primary) + h2n_ms (secondary) → merged.json"
|
||||
python3 "$SCRIPT_DIR/merge_transcriptions.py" \
|
||||
"$TRANSCRIPTION_DIR/saramonic.json" \
|
||||
"$TRANSCRIPTION_DIR/h2n_ms.json" \
|
||||
"$MEETING_DIR"
|
||||
else
|
||||
echo " Only one transcription source found or none — copying plain text to merged_plain.txt"
|
||||
# Try to find any existing plain text
|
||||
PLAIN_SRC=$(find "$TRANSCRIPTION_DIR" -name "*_plain.txt" | head -1)
|
||||
if [ -n "$PLAIN_SRC" ]; then
|
||||
cp "$PLAIN_SRC" "$MEETING_DIR/merged_plain.txt"
|
||||
echo " Copied $PLAIN_SRC -> merged_plain.txt"
|
||||
else
|
||||
echo " No plain text found yet — will be created after transcription finishes."
|
||||
fi
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Step 3: Generate PDF from report.md (if exists)
|
||||
# ------------------------------------------------------------
|
||||
echo "[3/3] Generating PDF from report.md (if present)..."
|
||||
REPORT_MD="$MEETING_DIR/report.md"
|
||||
REPORT_PDF="$MEETING_DIR/report.pdf"
|
||||
|
||||
if [ ! -f "$REPORT_MD" ]; then
|
||||
echo " Error: report.md not found at $REPORT_MD"
|
||||
exit 1
|
||||
echo " No report.md found. Agent must write this file first."
|
||||
echo " After writing the report, run: pandoc $REPORT_MD -o $REPORT_PDF ..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cd "$MEETING_DIR"
|
||||
|
|
|
|||
|
|
@ -35,4 +35,4 @@ Wildberries, Хабр
|
|||
тимлид, календарный план, конгломерат, квиз
|
||||
стартап, деплой, инфраструктура, безопасность
|
||||
IT-льготы, GitHub, open source, VPS
|
||||
петличка, Saramonic, диктофон, скрипт
|
||||
петличка, Saramonic, диктофон
|
||||
|
|
|
|||
|
|
@ -1,44 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Транскрипция аудио через faster-whisper с исправлением MKL ошибки.
|
||||
|
||||
ИСПОЛЬЗОВАНИЕ:
|
||||
import os
|
||||
os.environ["MKL_SERVICE_FORCE_INTEL"] = "1"
|
||||
os.environ["OMP_NUM_THREADS"] = "2"
|
||||
|
||||
from faster_whisper import WhisperModel
|
||||
model = WhisperModel("small")
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# CRITICAL: Must be set BEFORE importing faster_whisper
|
||||
os.environ["MKL_SERVICE_FORCE_INTEL"] = "1"
|
||||
os.environ["OMP_NUM_THREADS"] = "2"
|
||||
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
def transcribe_audio(audio_path, model_size="small", language="ru"):
|
||||
"""Транскрибирует аудиофайл."""
|
||||
print(f"Loading model {model_size}...")
|
||||
model = WhisperModel(model_size)
|
||||
|
||||
print(f"Transcribing {audio_path}...")
|
||||
segments, _ = model.transcribe(audio_path, language=language)
|
||||
|
||||
# Convert to list for proper handling
|
||||
segments = list(segments)
|
||||
|
||||
return segments
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python3 local_whisper.py <audio.wav>")
|
||||
sys.exit(1)
|
||||
|
||||
segments = transcribe_audio(sys.argv[1])
|
||||
|
||||
for segment in segments:
|
||||
print(segment.text)
|
||||
|
|
@ -1,25 +1,241 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Объединение транскрипций из нескольких файлов."""
|
||||
"""
|
||||
Merge two transcription sources by timestamps.
|
||||
|
||||
Primary source (e.g., lavalier mic / Saramonic) — better quality for main speaker.
|
||||
Secondary source (e.g., room mic / H2n XY) — captures audience/student voices.
|
||||
|
||||
Strategy:
|
||||
1. Both sources have timestamped segments from Whisper.
|
||||
2. For each secondary segment, check if primary has a similar segment at the same time.
|
||||
3. If primary has coverage (overlapping segment exists) — keep primary's version.
|
||||
4. If primary has NO coverage (gap) — insert secondary segment, tagged as [audience].
|
||||
5. Time alignment: the two recordings may have different start times.
|
||||
We detect the offset by cross-correlating the first few segments' text.
|
||||
|
||||
Usage:
|
||||
python3 merge_transcriptions.py <primary.json> <secondary.json> <output_dir> [--offset SECONDS]
|
||||
|
||||
Output:
|
||||
<output_dir>/merged.json — combined segments with source tags
|
||||
<output_dir>/merged.txt — timestamped text
|
||||
<output_dir>/merged_plain.txt — plain text for LLM processing
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
from difflib import SequenceMatcher
|
||||
|
||||
def merge_transcriptions(timeline_dir, output_path="merged_plain.txt"):
|
||||
"""Собирает все .txt файлы в один."""
|
||||
txt_files = sorted([f for f in os.listdir(timeline_dir) if f.endswith('.txt') and 'merged' not in f])
|
||||
|
||||
|
||||
def load_segments(json_path):
|
||||
"""Load segments from whisper JSON output."""
|
||||
with open(json_path) as f:
|
||||
data = json.load(f)
|
||||
segments = []
|
||||
for seg in data.get("segments", []):
|
||||
segments.append({
|
||||
"start": seg.get("start", 0),
|
||||
"end": seg.get("end", 0),
|
||||
"text": seg.get("text", "").strip(),
|
||||
})
|
||||
return segments, data.get("duration", 0)
|
||||
|
||||
|
||||
def estimate_offset(primary_segs, secondary_segs, search_window=120):
|
||||
"""
|
||||
Estimate time offset between two recordings.
|
||||
Returns offset such that: secondary_time + offset ≈ primary_time
|
||||
Uses text similarity of segments within search window.
|
||||
"""
|
||||
if not primary_segs or not secondary_segs:
|
||||
return 0.0
|
||||
|
||||
best_offset = 0.0
|
||||
best_score = 0.0
|
||||
|
||||
# Try offsets in 1-second steps within search window
|
||||
for offset_int in range(-search_window, search_window + 1):
|
||||
offset = float(offset_int)
|
||||
score = 0.0
|
||||
comparisons = 0
|
||||
|
||||
for p_seg in primary_segs[:30]: # check first 30 primary segments
|
||||
p_mid = (p_seg["start"] + p_seg["end"]) / 2
|
||||
# Find closest secondary segment at (p_mid - offset)
|
||||
target_time = p_mid - offset
|
||||
best_match = None
|
||||
best_dist = float("inf")
|
||||
|
||||
for s_seg in secondary_segs[:40]:
|
||||
s_mid = (s_seg["start"] + s_seg["end"]) / 2
|
||||
dist = abs(s_mid - target_time)
|
||||
if dist < best_dist:
|
||||
best_dist = dist
|
||||
best_match = s_seg
|
||||
|
||||
if best_match and best_dist < 15: # within 15 seconds
|
||||
sim = SequenceMatcher(
|
||||
None, p_seg["text"].lower(), best_match["text"].lower()
|
||||
).ratio()
|
||||
score += sim
|
||||
comparisons += 1
|
||||
|
||||
if comparisons > 0:
|
||||
score /= comparisons
|
||||
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_offset = offset
|
||||
|
||||
return best_offset
|
||||
|
||||
|
||||
def merge(primary_segs, secondary_segs, offset=0.0, gap_threshold=3.0, sim_threshold=0.3):
|
||||
"""
|
||||
Merge primary and secondary segments.
|
||||
|
||||
Args:
|
||||
primary_segs: segments from primary source (lavalier)
|
||||
secondary_segs: segments from secondary source (room mic)
|
||||
offset: time offset to add to secondary timestamps to align with primary
|
||||
gap_threshold: minimum gap (seconds) in primary to consider inserting secondary
|
||||
sim_threshold: below this similarity, secondary segment is considered unique content
|
||||
"""
|
||||
merged = []
|
||||
for txt_file in txt_files:
|
||||
with open(os.path.join(timeline_dir, txt_file), 'r', encoding='utf-8') as f:
|
||||
content = f.read().strip()
|
||||
if content:
|
||||
merged.append(f"--- {txt_file} ---\n{content}\n")
|
||||
|
||||
with open(os.path.join(timeline_dir, output_path), 'w', encoding='utf-8') as f:
|
||||
f.write('\n\n'.join(merged))
|
||||
|
||||
print(f"Merged {len(txt_files)} files into {output_path}")
|
||||
|
||||
# Add source tag to primary segments
|
||||
for seg in primary_segs:
|
||||
merged.append({
|
||||
**seg,
|
||||
"source": "primary",
|
||||
})
|
||||
|
||||
# Build primary timeline: list of (start, end) intervals
|
||||
primary_intervals = [(s["start"], s["end"]) for s in primary_segs]
|
||||
|
||||
def primary_covers(t_start, t_end):
|
||||
"""Check if primary has any segment overlapping [t_start, t_end]."""
|
||||
for p_start, p_end in primary_intervals:
|
||||
if p_start <= t_end and p_end >= t_start:
|
||||
return True
|
||||
return False
|
||||
|
||||
def find_similar_primary(text, t_start, t_end, window=10):
|
||||
"""Find most similar primary segment near the given time."""
|
||||
best_sim = 0.0
|
||||
for seg in primary_segs:
|
||||
if abs(seg["start"] - t_start) > window and abs(seg["end"] - t_end) > window:
|
||||
continue
|
||||
sim = SequenceMatcher(None, text.lower(), seg["text"].lower()).ratio()
|
||||
if sim > best_sim:
|
||||
best_sim = sim
|
||||
return best_sim
|
||||
|
||||
# Check each secondary segment
|
||||
inserted = 0
|
||||
for seg in secondary_segs:
|
||||
adj_start = seg["start"] + offset
|
||||
adj_end = seg["end"] + offset
|
||||
text = seg["text"]
|
||||
|
||||
if not text:
|
||||
continue
|
||||
|
||||
# Check if primary already covers this time range
|
||||
if primary_covers(adj_start, adj_end):
|
||||
# Primary has something here — check if it's the same content
|
||||
sim = find_similar_primary(text, adj_start, adj_end)
|
||||
if sim >= sim_threshold:
|
||||
continue # primary already has this, skip
|
||||
|
||||
# This segment is unique to secondary (likely audience voice)
|
||||
merged.append({
|
||||
"start": round(adj_start, 2),
|
||||
"end": round(adj_end, 2),
|
||||
"text": text,
|
||||
"source": "secondary",
|
||||
})
|
||||
inserted += 1
|
||||
|
||||
# Sort by start time
|
||||
merged.sort(key=lambda s: s["start"])
|
||||
|
||||
return merged, inserted
|
||||
|
||||
|
||||
def format_timestamp(seconds):
|
||||
h = int(seconds // 3600)
|
||||
m = int((seconds % 3600) // 60)
|
||||
s = int(seconds % 60)
|
||||
return f"{h:02d}:{m:02d}:{s:02d}"
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Merge two transcription sources")
|
||||
parser.add_argument("primary", help="Primary source JSON (lavalier/Saramonic)")
|
||||
parser.add_argument("secondary", help="Secondary source JSON (room mic/H2n)")
|
||||
parser.add_argument("output_dir", help="Output directory")
|
||||
parser.add_argument("--offset", type=float, default=None,
|
||||
help="Time offset (seconds) to add to secondary timestamps. "
|
||||
"Auto-detected if not specified.")
|
||||
parser.add_argument("--gap-threshold", type=float, default=3.0,
|
||||
help="Minimum gap in primary to insert secondary (default: 3.0)")
|
||||
parser.add_argument("--sim-threshold", type=float, default=0.3,
|
||||
help="Similarity threshold below which secondary is unique (default: 0.3)")
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"Primary: {args.primary}")
|
||||
print(f"Secondary: {args.secondary}")
|
||||
|
||||
primary_segs, primary_dur = load_segments(args.primary)
|
||||
secondary_segs, secondary_dur = load_segments(args.secondary)
|
||||
print(f"Primary: {len(primary_segs)} segments, {primary_dur:.0f}s")
|
||||
print(f"Secondary: {len(secondary_segs)} segments, {secondary_dur:.0f}s")
|
||||
|
||||
# Estimate or use provided offset
|
||||
if args.offset is not None:
|
||||
offset = args.offset
|
||||
print(f"Using provided offset: {offset:+.1f}s")
|
||||
else:
|
||||
print("Estimating time offset...")
|
||||
offset = estimate_offset(primary_segs, secondary_segs)
|
||||
print(f"Estimated offset: {offset:+.1f}s (secondary is {abs(offset):.0f}s "
|
||||
f"{'behind' if offset > 0 else 'ahead of'} primary)")
|
||||
|
||||
# Merge
|
||||
merged, inserted = merge(
|
||||
primary_segs, secondary_segs,
|
||||
offset=offset,
|
||||
gap_threshold=args.gap_threshold,
|
||||
sim_threshold=args.sim_threshold,
|
||||
)
|
||||
print(f"Merged: {len(merged)} segments ({len(primary_segs)} primary + {inserted} from secondary)")
|
||||
|
||||
# Write outputs
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
|
||||
# JSON
|
||||
json_path = os.path.join(args.output_dir, "merged.json")
|
||||
with open(json_path, "w") as f:
|
||||
json.dump({"segments": merged, "offset": offset}, f, ensure_ascii=False, indent=2)
|
||||
print(f"Written: {json_path}")
|
||||
|
||||
# Timestamped text
|
||||
txt_path = os.path.join(args.output_dir, "merged.txt")
|
||||
with open(txt_path, "w") as f:
|
||||
for seg in merged:
|
||||
tag = "" if seg["source"] == "primary" else " [аудитория]"
|
||||
f.write(f"[{format_timestamp(seg['start'])}]{tag} {seg['text']}\n")
|
||||
print(f"Written: {txt_path}")
|
||||
|
||||
# Plain text
|
||||
plain_path = os.path.join(args.output_dir, "merged_plain.txt")
|
||||
with open(plain_path, "w") as f:
|
||||
f.write(" ".join(seg["text"] for seg in merged))
|
||||
print(f"Written: {plain_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
dir_path = sys.argv[1] if len(sys.argv) > 1 else "transcription"
|
||||
merge_transcriptions(dir_path)
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -1,30 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Обёртка для запуска транскрипции с правильной настройкой окружения
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# CRITICAL: переменные для Intel oneMKL
|
||||
export MKL_SERVICE_FORCE_INTEL=1
|
||||
export OMP_NUM_THREADS=2
|
||||
|
||||
MEETING_DIR="${1:-.}"
|
||||
|
||||
if [ ! -f "$MEETING_DIR"/*.wav ] && [ ! -f "$MEETING_DIR"/*.WAV ]; then
|
||||
echo "Error: No WAV file found in $MEETING_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Check audio duration
|
||||
DURATION=$(ffprobe -i "$MEETING_DIR"/*.wav -show_entries format=duration -v quiet -of csv="p=0" 2>/dev/null | cut -d. -f1)
|
||||
|
||||
if [ $DURATION -gt 1800 ]; then # >30 минут
|
||||
echo "Audio is $DURATION seconds. Using chunked transcription..."
|
||||
bash "$SCRIPT_DIR/transcribe_chunked.sh" "$MEETING_DIR"
|
||||
else
|
||||
echo "Audio is $DURATION seconds. Using standard transcription..."
|
||||
bash "$SCRIPT_DIR/transcribe.sh" "$MEETING_DIR"
|
||||
fi
|
||||
|
||||
echo "Transcription complete. Check $MEETING_DIR/transcription/"
|
||||
|
|
@ -1,59 +1,42 @@
|
|||
#!/bin/bash
|
||||
# Transcribe audio recordings using local faster-whisper
|
||||
# Supports multiple sources: Zoom H2n (4ch WAV), Saramonic (mono WAV), etc.
|
||||
#
|
||||
# Usage:
|
||||
# ./transcribe.sh /absolute/path/to/meeting_folder
|
||||
# ./transcribe.sh /absolute/path/to/meeting_folder specific.WAV output_name
|
||||
#
|
||||
# Examples:
|
||||
# ./transcribe.sh /app/hermes_data/meetings/2026-02-18
|
||||
# ./transcribe.sh /app/hermes_data/meetings/2026-02-18 SR003XY.WAV h2n_xy
|
||||
# Transcribe audio recordings using local whisper server (with API key)
|
||||
# Usage: ./transcribe.sh <meeting_dir> [<file.WAV> <output_name>]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
WHISPER_MODEL="base"
|
||||
: ${WHISPER_URL:?ERROR: WHISPER_URL not set (e.g., https://llm.lambda.coredump.ru/v1)}
|
||||
: ${WHISPER_API_KEY:?ERROR: WHISPER_API_KEY not set}
|
||||
MODEL="whisper-1" # Изменено с конкретной модели на общее название
|
||||
LANGUAGE="ru"
|
||||
|
||||
# Load hotwords
|
||||
HOTWORDS_FILE="$SCRIPT_DIR/hotwords.txt"
|
||||
# Hotwords
|
||||
HOTWORDS_FILE="${HOTWORKS_PATH:-$SCRIPT_DIR/hotwords.txt}"
|
||||
HOTWORDS=""
|
||||
if [ -f "$HOTWORDS_FILE" ]; then
|
||||
HOTWORDS=$(grep -v '^#' "$HOTWORDS_FILE" | grep -v '^$' | tr '\n' ',' | sed 's/,,*/,/g; s/^,//; s/,$//')
|
||||
echo "Loaded hotwords from $HOTWORDS_FILE"
|
||||
else
|
||||
HOTWORDS=""
|
||||
echo "Warning: hotwords.txt not found, proceeding without hotwords"
|
||||
fi
|
||||
|
||||
# ---------- argument parsing ----------
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "Usage: $0 <absolute_meeting_dir> [<file.WAV> <output_name>]"
|
||||
echo "Example: $0 /app/hermes_data/meetings/2026-02-18"
|
||||
echo "Usage: $0 <meeting_dir> [<file.WAV> <output_name>]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MEETING_DIR="$1"
|
||||
if [[ "$MEETING_DIR" != /* ]]; then
|
||||
MEETING_DIR="$(realpath "$MEETING_DIR")"
|
||||
else
|
||||
MEETING_DIR="$(realpath "$MEETING_DIR")"
|
||||
fi
|
||||
|
||||
WORK_DIR="$MEETING_DIR"
|
||||
WORK_DIR="$(cd "$SCRIPT_DIR/$MEETING_DIR" && pwd)"
|
||||
OUTPUT_DIR="$WORK_DIR/transcription"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Function: convert WAV(s) to mono mp3
|
||||
convert_to_mp3() {
|
||||
local output_mp3="$1"
|
||||
shift
|
||||
local inputs=("$@")
|
||||
|
||||
if [ -f "$output_mp3" ]; then
|
||||
echo " $output_mp3 already exists, skipping conversion"
|
||||
return
|
||||
fi
|
||||
|
||||
if [ ${#inputs[@]} -eq 1 ]; then
|
||||
echo " Converting ${inputs[0]} -> $output_mp3"
|
||||
ffmpeg -y -i "${inputs[0]}" -ac 1 -ar 16000 -b:a 64k "$output_mp3" 2>/dev/null
|
||||
|
|
@ -67,13 +50,8 @@ convert_to_mp3() {
|
|||
ffmpeg -y -f concat -safe 0 -i "$listfile" -ac 1 -ar 16000 -b:a 64k "$output_mp3" 2>/dev/null
|
||||
rm -f "$listfile"
|
||||
fi
|
||||
|
||||
local dur
|
||||
dur=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$output_mp3" | cut -d. -f1)
|
||||
echo " Duration: ${dur}s ($(( dur / 60 ))m$(( dur % 60 ))s)"
|
||||
}
|
||||
|
||||
# Function: transcribe using local faster-whisper (with chunking if needed)
|
||||
transcribe_file() {
|
||||
local mp3_file="$1"
|
||||
local name="$2"
|
||||
|
|
@ -84,21 +62,42 @@ transcribe_file() {
|
|||
return
|
||||
fi
|
||||
|
||||
# Check duration of mp3
|
||||
local duration=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$mp3_file" | cut -d. -f1)
|
||||
if [ "$duration" -gt 1800 ]; then # > 30 minutes
|
||||
echo " Audio is ${duration}s long (>30 min), using chunked transcription..."
|
||||
bash "$SCRIPT_DIR/transcribe_chunked.sh" "$mp3_file" "$name" "$OUTPUT_DIR"
|
||||
return
|
||||
fi
|
||||
|
||||
echo " Transcribing $name (local faster-whisper)..."
|
||||
echo " Transcribing $name..."
|
||||
local started
|
||||
started=$(date +%s)
|
||||
|
||||
MKL_SERVICE_FORCE_INTEL=1 OMP_NUM_THREADS=2 python3 "$SCRIPT_DIR/local_whisper.py" "$mp3_file" "$json_file" "$WHISPER_MODEL" "$HOTWORDS"
|
||||
# Form the full URL for transcription
|
||||
local full_url="${WHISPER_URL}/audio/transcriptions"
|
||||
|
||||
local curl_args=(
|
||||
-s -w "%{http_code}" -o "$json_file"
|
||||
-X POST "$full_url"
|
||||
-H "Authorization: Bearer $WHISPER_API_KEY"
|
||||
-F "file=@${mp3_file}"
|
||||
-F "model=${MODEL}"
|
||||
-F "language=${LANGUAGE}"
|
||||
-F "response_format=verbose_json"
|
||||
-F "temperature=0.0"
|
||||
--max-time 3600
|
||||
)
|
||||
if [ -n "$HOTWORDS" ]; then
|
||||
curl_args+=(-F "hotwords=${HOTWORDS}")
|
||||
fi
|
||||
|
||||
local http_code
|
||||
http_code=$(curl "${curl_args[@]}")
|
||||
local elapsed=$(( $(date +%s) - started ))
|
||||
|
||||
if [ "$http_code" != "200" ]; then
|
||||
echo " ERROR: HTTP $http_code"
|
||||
# Display error response body for debugging
|
||||
if [ -f "$json_file" ]; then
|
||||
cat "$json_file"
|
||||
fi
|
||||
rm -f "$json_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo " Done in ${elapsed}s"
|
||||
|
||||
# Extract plain text and timestamped text
|
||||
|
|
@ -135,7 +134,7 @@ print(f" {len(segs)} segments, {len(plain)} chars")
|
|||
PYEOF
|
||||
}
|
||||
|
||||
# Manual mode: specific file
|
||||
# ---------- manual mode (specific file) ----------
|
||||
if [ $# -ge 3 ]; then
|
||||
WAV_FILE="$WORK_DIR/$2"
|
||||
NAME="$3"
|
||||
|
|
@ -148,14 +147,11 @@ if [ $# -ge 3 ]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
# Auto mode: detect and transcribe all sources
|
||||
# ---------- auto mode ----------
|
||||
echo "=== Auto-detecting audio sources in $WORK_DIR ==="
|
||||
|
||||
# Detect H2n files (SR*XY.WAV, SR*MS.WAV)
|
||||
H2N_XY=$(find "$WORK_DIR" -maxdepth 1 -name "SR*XY.WAV" | head -1)
|
||||
H2N_MS=$(find "$WORK_DIR" -maxdepth 1 -name "SR*MS.WAV" | head -1)
|
||||
|
||||
# Detect Saramonic / other timestamped WAV files (not SR*)
|
||||
mapfile -t SARAMONIC_FILES < <(find "$WORK_DIR" -maxdepth 1 -name "*.WAV" ! -name "SR*" | sort)
|
||||
|
||||
SOURCES=()
|
||||
|
|
@ -201,10 +197,4 @@ done
|
|||
|
||||
echo ""
|
||||
echo "=== Done! ==="
|
||||
echo "Results in: $OUTPUT_DIR/"
|
||||
for entry in "${SOURCES[@]}"; do
|
||||
name="${entry%%:*}"
|
||||
echo " ${name}.json - whisper JSON with segments"
|
||||
echo " ${name}.txt - timestamped transcription"
|
||||
echo " ${name}_plain.txt - plain text"
|
||||
done
|
||||
echo "Results in: $OUTPUT_DIR/"
|
||||
|
|
@ -1,64 +1,197 @@
|
|||
#!/bin/bash
|
||||
# Транскрипция с разбивкой на чанки для длинных аудио
|
||||
# Transcribe long audio by splitting at silence boundaries
|
||||
# Uses Whisper API with authentication
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
MEETING_DIR="${1:-.}"
|
||||
CHUNKS_DIR="$MEETING_DIR/transcription"
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
: ${WHISPER_URL:?ERROR: WHISPER_URL not set}
|
||||
: ${WHISPER_API_KEY:?ERROR: WHISPER_API_KEY not set}
|
||||
MODEL="whisper-1" # Изменено с конкретной модели на общее название
|
||||
LANGUAGE="ru"
|
||||
TARGET_CHUNK=600
|
||||
|
||||
HOTWORDS_FILE="${HOTWORKS_PATH:-$SCRIPT_DIR/hotwords.txt}"
|
||||
HOTWORDS=""
|
||||
if [ -f "$HOTWORDS_FILE" ]; then
|
||||
HOTWORDS=$(grep -v '^#' "$HOTWORDS_FILE" | grep -v '^$' | tr '\n' ',' | sed 's/,,*/,/g; s/^,//; s/,$//')
|
||||
fi
|
||||
|
||||
MP3_FILE="$1"
|
||||
NAME="$2"
|
||||
OUTPUT_DIR="$3"
|
||||
CHUNKS_DIR="$OUTPUT_DIR/chunks_${NAME}"
|
||||
|
||||
if [ -f "$OUTPUT_DIR/${NAME}.json" ]; then
|
||||
echo "$NAME already transcribed, skipping"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mkdir -p "$CHUNKS_DIR"
|
||||
|
||||
# Get audio file
|
||||
WAV_FILE=$(ls "$MEETING_DIR"/*.wav 2>/dev/null || ls "$MEETING_DIR"/*.WAV 2>/dev/null)
|
||||
if [ -z "$WAV_FILE" ] || [ ! -f "$WAV_FILE" ]; then
|
||||
echo "Error: No WAV file found"
|
||||
exit 1
|
||||
DURATION=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$MP3_FILE" | cut -d. -f1)
|
||||
echo "=== Chunked transcription: $NAME ($DURATION s / $((DURATION/60))m) ==="
|
||||
|
||||
# Find silence gaps
|
||||
SILENCES_FILE="$CHUNKS_DIR/silences.txt"
|
||||
if [ ! -f "$SILENCES_FILE" ]; then
|
||||
ffmpeg -i "$MP3_FILE" -af "silencedetect=noise=-35dB:d=0.5" -f null - 2>&1 \
|
||||
| grep "silence_end" \
|
||||
| sed 's/.*silence_end: \([0-9.]*\).*/\1/' \
|
||||
> "$SILENCES_FILE"
|
||||
fi
|
||||
echo " Found $(wc -l < "$SILENCES_FILE") silence gaps"
|
||||
|
||||
# Duration
|
||||
DURATION=$(ffprobe -i "$WAV_FILE" -show_entries format=duration -v quiet -of csv="p=0")
|
||||
# Compute split points
|
||||
SPLIT_POINTS=$(python3 - "$SILENCES_FILE" "$TARGET_CHUNK" "$DURATION" <<'PYEOF'
|
||||
import sys
|
||||
silences_file = sys.argv[1]
|
||||
target = float(sys.argv[2])
|
||||
duration = float(sys.argv[3])
|
||||
|
||||
echo "Audio duration: $DURATION seconds"
|
||||
with open(silences_file) as f:
|
||||
silences = [float(line.strip()) for line in f if line.strip()]
|
||||
|
||||
# Chunk settings
|
||||
chunk_duration=600
|
||||
offset=0
|
||||
chunk_num=0
|
||||
if not silences:
|
||||
n = max(2, int(duration / target))
|
||||
splits = [duration * i / n for i in range(1, n)]
|
||||
else:
|
||||
splits = []
|
||||
t = target
|
||||
while t < duration - 30:
|
||||
best = min(silences, key=lambda s: abs(s - t))
|
||||
if not splits or best > splits[-1] + 30:
|
||||
splits.append(best)
|
||||
t += target
|
||||
|
||||
echo "Extracting chunks..."
|
||||
print(" ".join(f"{s:.2f}" for s in splits))
|
||||
PYEOF
|
||||
)
|
||||
|
||||
while (( $(echo "$offset < $DURATION" | bc -l) )); do
|
||||
chunk_file="$CHUNKS_DIR/chunk_${chunk_num}.wav"
|
||||
echo "Extracting chunk $chunk_num at offset $offset..."
|
||||
|
||||
# Retry logic
|
||||
for attempt in 1 2 3; do
|
||||
if ffmpeg -i "$WAV_FILE" -ss "$offset" -t "$chunk_duration" -acodec pcm_s16le -ar 16000 "$chunk_file" -y 2>/dev/null; then
|
||||
break
|
||||
elif [ $attempt -eq 3 ]; then
|
||||
echo "Error: Failed to extract chunk $chunk_num"
|
||||
exit 1
|
||||
IFS=' ' read -ra POINTS <<< "$SPLIT_POINTS"
|
||||
N_CHUNKS=$((${#POINTS[@]} + 1))
|
||||
echo " Will create $N_CHUNKS chunks"
|
||||
|
||||
# Split audio
|
||||
PREV=0
|
||||
for i in $(seq 0 $((N_CHUNKS - 1))); do
|
||||
CHUNK_FILE="$CHUNKS_DIR/chunk_$(printf '%03d' $i).mp3"
|
||||
if [ -f "$CHUNK_FILE" ] && [ $(stat -c%s "$CHUNK_FILE") -gt 1000 ]; then
|
||||
if [ $i -lt ${#POINTS[@]} ]; then
|
||||
PREV="${POINTS[$i]}"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
offset=$((offset + chunk_duration))
|
||||
((chunk_num++))
|
||||
done
|
||||
echo " chunk_$(printf '%03d' $i): exists, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Transcribing $chunk_num chunks..."
|
||||
if [ $i -lt ${#POINTS[@]} ]; then
|
||||
END="${POINTS[$i]}"
|
||||
DUR=$(python3 -c "print(f'{$END - $PREV:.2f}')")
|
||||
ffmpeg -y -i "$MP3_FILE" -ss "$PREV" -t "$DUR" -c copy "$CHUNK_FILE" 2>/dev/null
|
||||
PREV="$END"
|
||||
else
|
||||
ffmpeg -y -i "$MP3_FILE" -ss "$PREV" -c copy "$CHUNK_FILE" 2>/dev/null
|
||||
fi
|
||||
CHUNK_DUR=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$CHUNK_FILE" | cut -d. -f1)
|
||||
echo " chunk_$(printf '%03d' $i): ${CHUNK_DUR}s"
|
||||
done
|
||||
|
||||
# Transcribe each chunk
|
||||
for i in $(seq 0 $((chunk_num - 1))); do
|
||||
chunk_file="$CHUNKS_DIR/chunk_${i}.wav"
|
||||
output_file="$CHUNKS_DIR/chunk_${i}.txt"
|
||||
|
||||
echo "Transcribing chunk $i..."
|
||||
MKL_SERVICE_FORCE_INTEL=1 OMP_NUM_THREADS=2 python3 "${BASH_SOURCE[0]%/*}/local_whisper.py" "$chunk_file" > "$output_file"
|
||||
echo "Transcribing chunks..."
|
||||
for i in $(seq 0 $((N_CHUNKS - 1))); do
|
||||
CHUNK_FILE="$CHUNKS_DIR/chunk_$(printf '%03d' $i).mp3"
|
||||
CHUNK_JSON="$CHUNKS_DIR/chunk_$(printf '%03d' $i).json"
|
||||
|
||||
if [ -f "$CHUNK_JSON" ]; then
|
||||
echo " chunk_$(printf '%03d' $i): already transcribed"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo -n " chunk_$(printf '%03d' $i): transcribing... "
|
||||
STARTED=$(date +%s)
|
||||
|
||||
full_url="${WHISPER_URL}/audio/transcriptions"
|
||||
|
||||
CURL_ARGS=(
|
||||
-s -w "%{http_code}" -o "$CHUNK_JSON"
|
||||
-X POST "$full_url"
|
||||
-H "Authorization: Bearer $WHISPER_API_KEY"
|
||||
-F "file=@${CHUNK_FILE}"
|
||||
-F "model=${MODEL}"
|
||||
-F "language=${LANGUAGE}"
|
||||
-F "response_format=verbose_json"
|
||||
-F "temperature=0.0"
|
||||
--max-time 600
|
||||
)
|
||||
[ -n "$HOTWORDS" ] && CURL_ARGS+=(-F "hotwords=${HOTWORDS}")
|
||||
|
||||
HTTP_CODE=$(curl "${CURL_ARGS[@]}")
|
||||
ELAPSED=$(( $(date +%s) - STARTED ))
|
||||
|
||||
if [ "$HTTP_CODE" != "200" ]; then
|
||||
echo "ERROR (HTTP $HTTP_CODE)"
|
||||
if [ -f "$CHUNK_JSON" ]; then
|
||||
cat "$CHUNK_JSON"
|
||||
fi
|
||||
rm -f "$CHUNK_JSON"
|
||||
exit 1
|
||||
fi
|
||||
echo "done in ${ELAPSED}s"
|
||||
done
|
||||
|
||||
# Merge
|
||||
echo "Merging transcriptions..."
|
||||
cat "$CHUNKS_DIR"/chunk_*.txt > "$CHUNKS_DIR/merged_raw.txt"
|
||||
# Merge chunks into final JSON
|
||||
echo "Merging chunks..."
|
||||
python3 - "$CHUNKS_DIR" "$OUTPUT_DIR" "$NAME" "$SPLIT_POINTS" <<'PYEOF'
|
||||
import json, sys, os, glob
|
||||
|
||||
echo "Done. Output: $CHUNKS_DIR/merged_raw.txt"
|
||||
chunks_dir = sys.argv[1]
|
||||
output_dir = sys.argv[2]
|
||||
name = sys.argv[3]
|
||||
split_points_str = sys.argv[4] if len(sys.argv) > 4 else ""
|
||||
|
||||
if split_points_str.strip():
|
||||
split_points = [float(x) for x in split_points_str.strip().split()]
|
||||
else:
|
||||
split_points = []
|
||||
offsets = [0.0] + split_points
|
||||
|
||||
chunk_files = sorted(glob.glob(os.path.join(chunks_dir, "chunk_*.json")))
|
||||
all_segments = []
|
||||
total_duration = 0
|
||||
|
||||
for idx, cf in enumerate(chunk_files):
|
||||
with open(cf) as f:
|
||||
data = json.load(f)
|
||||
offset = offsets[idx] if idx < len(offsets) else offsets[-1]
|
||||
for seg in data.get("segments", []):
|
||||
all_segments.append({
|
||||
"start": round(seg.get("start", 0) + offset, 2),
|
||||
"end": round(seg.get("end", 0) + offset, 2),
|
||||
"text": seg.get("text", "").strip(),
|
||||
})
|
||||
chunk_dur = data.get("duration", 0)
|
||||
total_duration = max(total_duration, offset + chunk_dur)
|
||||
|
||||
all_segments.sort(key=lambda s: s["start"])
|
||||
merged = {"segments": all_segments, "duration": total_duration}
|
||||
json_path = os.path.join(output_dir, f"{name}.json")
|
||||
with open(json_path, "w") as f:
|
||||
json.dump(merged, f, ensure_ascii=False, indent=2)
|
||||
|
||||
txt_path = os.path.join(output_dir, f"{name}.txt")
|
||||
with open(txt_path, "w") as f:
|
||||
for seg in all_segments:
|
||||
start = seg["start"]
|
||||
h, m, s = int(start // 3600), int((start % 3600) // 60), int(start % 60)
|
||||
f.write(f"[{h:02d}:{m:02d}:{s:02d}] {seg['text']}\n")
|
||||
|
||||
plain_path = os.path.join(output_dir, f"{name}_plain.txt")
|
||||
with open(plain_path, "w") as f:
|
||||
f.write(" ".join(seg["text"] for seg in all_segments))
|
||||
|
||||
print(f" {len(all_segments)} segments total")
|
||||
print(f" Written: {json_path}, {txt_path}, {plain_path}")
|
||||
PYEOF
|
||||
|
||||
echo "=== Done: $NAME ==="
|
||||
echo "=== Done: $NAME ==="
|
||||
Loading…
Add table
Add a link
Reference in a new issue