@NEHANIPHADKAR/

ChirpDetection

Python

Trying to figure out the chirp signal in a recorded file.

fork
loading
Files
  • main.py
  • played.wav
  • recorded.wav

This Plugin Crashed!

Error: Error: must not create an existing file {"type":"CREATE_FILE","wid":"0.39908345475949036","path":"main.py","file":{"path":"main.py","content":{"asEncoding":{"base64":"aW1wb3J0IG1hdHBsb3RsaWIucHlwbG90IGFzIHBsdAppbXBvcnQgbWF0cGxvdGxpYgpmcm9tIG1hdHBsb3RsaWIgaW1wb3J0IGFuaW1hdGlvbgppbXBvcnQgc2NpcHkuaW8ud2F2ZmlsZSBhcyB3YXZmaWxlCmZyb20gc2NpcHkuc2lnbmFsIGltcG9ydCBidXR0ZXIsIGxmaWx0ZXIKaW1wb3J0IHNjaXB5CmltcG9ydCBsaWJyb3NhCmltcG9ydCBzY2lweS5mZnRwYWNrCmltcG9ydCBudW1weSBhcyBucApmcm9tIElQeXRob24uZGlzcGxheSBpbXBvcnQgQXVkaW8sIGRpc3BsYXksIEhUTUwKZnJvbSBpcHl3aWRnZXRzIGltcG9ydCBpbnRlcmFjdApmcm9tIHNjaXB5LmlvIGltcG9ydCB3YXZmaWxlCmltcG9ydCByZXF1ZXN0cwoKIyBJIHBsYXllZCBhcm91bmQgd2l0aCB0aGlzIGJ1dCB1bHRpbWF0ZWx5IGtlcHQgdGhlIGRlZmF1bHQgdmFsdWUKaG9wbGVuPTUxMgp5LCBzciA9IGxpYnJvc2EuY29yZS5sb2FkKCJyZWNvcmRlZC53YXYiKQojIE5vdGUgdGhhdCB0aGUgZmlyc3QgfjIyNDAgc2FtcGxlcyAoMC4xIHNlY29uZHMpIGFyZSBhbm9tYWxvdXNseSBsb3cgbm9pc2UsCiMgc28gY3V0IG91dCB0aGlzIHNlY3Rpb24gZnJvbSBwcm9jZXNzaW5nCnN0YXJ0ID0gMjI0MAp5ID0geVtzdGFydDpdCmlkeCA9IG5wLmFyYW5nZShsZW4oeSkpCgojIENhbGN1YWx0ZSB0aGUgb25zZXQgZnJhbWVzIGluIHRoZSB1c3VhbCB3YXkKb25zZXRfZnJhbWVzID0gbGlicm9zYS5vbnNldC5vbnNldF9kZXRlY3QoeT15LCBzcj1zciwgaG9wX2xlbmd0aD1ob3BsZW4pCm9uc3RtID0gbGlicm9zYS5mcmFtZXNfdG9fdGltZShvbnNldF9mcmFtZXMsIHNyPXNyLCBob3BfbGVuZ3RoPWhvcGxlbikKCiMgQ2FsY3VsYXRlIFJNUyBlbmVyZ3kgcGVyIGZyYW1lLiAgSSBzaG9ydGVuZWQgdGhlIGZyYW1lIGxlbmd0aCBmcm9tIHRoZQojIGRlZmF1bHQgdmFsdWUgaW4gb3JkZXIgdG8gYXZvaWQgZW5kaW5nIHVwIHdpdGggdG9vIG11Y2ggc21vb3RoaW5nCnJtc2UgPSBsaWJyb3NhLmZlYXR1cmUucm1zZSh5PXksIGZyYW1lX2xlbmd0aD01MTIsIGhvcF9sZW5ndGg9aG9wbGVuKVswLF0KZW52dG0gPSBsaWJyb3NhLmZyYW1lc190b190aW1lKG5wLmFyYW5nZShsZW4ocm1zZSkpLCBzcj1zciwgaG9wX2xlbmd0aD1ob3BsZW4pCiMgVXNlIGZpbmFsIDMgc2Vjb25kcyBvZiByZWNvcmRpbmcgaW4gb3JkZXIgdG8gZXN0aW1hdGUgbWVkaWFuIG5vaXNlIGxldmVsCiMgYW5kIHR5cGljYWwgdmFyaWF0aW9uCm5vaXNlaWR4ID0gW2VudnRtID4gZW52dG1bLTFdIC0gMy4wXQpub2lzZW1lZGlhbiA9IG5wLnBlcmNlbnRpbGUocm1zZVtub2lzZWlkeF0sIDUwKQpzaWdtYSA9IG5wLnBlcmNlbnRpbGUocm1zZVtub2lzZWlkeF0sIDg0LjEpIC0gbm9pc2VtZWRpYW4KIyBTZXQgdGhlIG1pbmltdW0gUk1TIGVuZXJneSB0aHJlc2hvbGQgdGhhdCBpcyBuZWVkZWQgaW4gb3JkZXIgdG8gZGVjbGFyZQojIGFuICJvbnNldCIgZXZlbnQgdG8gYmUgZXF1YWwgdG8gNSBzaWdtYSBhYm92ZSB0aGUgbWVkaWFuCnRocmVzaG9sZCA9IG5vaXNlbWVkaWFuICsgNSpzaWdtYQp0aHJlc2hpZHggPSBbcm1zZSA+IHRocmVzaG9sZF0KIyBDaG9vc2UgdGhlIGNvcnJlY3RlZCBvbnNldCB0aW1lcyBhcyBvbmx5IHRob3NlIHdoaWNoIG1lZXQgdGhlIFJNUyBlbmVyZ3kKIyBtaW5pbXVtIHRocmVzaG9sZCByZXF1aXJlbWVudApjb3JyZWN0ZWRvbnN0bSA9IG9uc3RtW1t0bSBpbiBlbnZ0bVt0aHJlc2hpZHhdIGZvciB0bSBpbiBvbnN0bV1dCgojIFByaW50IGJvdGggaW4gdW5pdHMgb2YgYWN0dWFsIHRpbWUgKHNlY29uZHMpIGFuZCBzYW1wbGUgSUQgbnVtYmVyCnByaW50KGNvcnJlY3RlZG9uc3RtK3N0YXJ0L3NyKQpwcmludChjb3JyZWN0ZWRvbnN0bSpzcitzdGFydCkKCmZnID0gcGx0LmZpZ3VyZShmaWdzaXplPVsxMiwgOF0pCgojIFByaW50IHRoZSB3YXZlZm9ybSB0b2dldGhlciB3aXRoIG9uc2V0IHRpbWVzIHN1cGVyaW1wb3NlZCBpbiByZWQKYXgxID0gZmcuYWRkX3N1YnBsb3QoMiwxLDEpCmF4MS5wbG90KGlkeCtzdGFydCwgeSkKZm9yIGlpIGluIGNvcnJlY3RlZG9uc3RtKnNyK3N0YXJ0OgogICAgYXgxLmF4dmxpbmUoaWksIGNvbG9yPSdyJykKYXgxLnNldF95bGFiZWwoJ0FtcGxpdHVkZScsIGZvbnRzaXplPTE2KQoKIyBQcmludCB0aGUgUk1TRSB0b2dldGhlciB3aXRoIG9uc2V0IHRpbWVzIHN1cGVyaW1wb3NlZCBpbiByZWQKYXgyID0gZmcuYWRkX3N1YnBsb3QoMiwxLDIsIHNoYXJleD1heDEpCmF4Mi5wbG90KGVudnRtKnNyK3N0YXJ0LCBybXNlKQpmb3IgaWkgaW4gY29ycmVjdGVkb25zdG0qc3Irc3RhcnQ6CiAgICBheDIuYXh2bGluZShpaSwgY29sb3I9J3InKQojIFBsb3QgdGhyZXNob2xkIHZhbHVlIHN1cGVyaW1wb3NlZCBhcyBhIGJsYWNrIGRvdHRlZCBsaW5lCmF4Mi5heGhsaW5lKHRocmVzaG9sZCwgbGluZXN0eWxlPSc6JywgY29sb3I9J2snKQpheDIuc2V0X3lsYWJlbCgiUk1TRSIsIGZvbnRzaXplPTE2KQpheDIuc2V0X3hsYWJlbCgiU2FtcGxlIE51bWJlciIsIGZvbnRzaXplPTE2KQoKZmcuc2hvdygp"},"asBuffer":null},"loaded":true}}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import animation
import scipy.io.wavfile as wavfile
from scipy.signal import butter, lfilter
import scipy
import librosa
import scipy.fftpack
import numpy as np
from IPython.display import Audio, display, HTML
from ipywidgets import interact
from scipy.io import wavfile
import requests

# I played around with this but ultimately kept the default value
hoplen=512
y, sr = librosa.core.load("recorded.wav")
# Note that the first ~2240 samples (0.1 seconds) are anomalously low noise,
# so cut out this section from processing
start = 2240
y = y[start:]
idx = np.arange(len(y))

# Calcualte the onset frames in the usual way
onset_frames = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hoplen)
onstm = librosa.frames_to_time(onset_frames, sr=sr, hop_length=hoplen)

# Calculate RMS energy per frame.  I shortened the frame length from the
# default value in order to avoid ending up with too much smoothing
rmse = librosa.feature.rmse(y=y, frame_length=512, hop_length=hoplen)[0,]
envtm = librosa.frames_to_time(np.arange(len(rmse)), sr=sr, hop_length=hoplen)
# Use final 3 seconds of recording in order to estimate median noise level
# and typical variation
noiseidx = [envtm > envtm[-1] - 3.0]
noisemedian = np.percentile(rmse[noiseidx], 50)
sigma = np.percentile(rmse[noiseidx], 84.1) - noisemedian
# Set the minimum RMS energy threshold that is needed in order to declare
# an "onset" event to be equal to 5 sigma above the median
threshold = noisemedian + 5*sigma
threshidx = [rmse > threshold]
# Choose the corrected onset times as only those which meet the RMS energy
# minimum threshold requirement
correctedonstm = onstm[[tm in envtm[threshidx] for tm in onstm]]

# Print both in units of actual time (seconds) and sample ID number
print(correctedonstm+start/sr)
print(correctedonstm*sr+start)

fg = plt.figure(figsize=[12, 8])

# Print the waveform together with onset times superimposed in red
ax1 = fg.add_subplot(2,1,1)
ax1.plot(idx+start, y)
for ii in correctedonstm*sr+start:
    ax1.axvline(ii, color='r')
ax1.set_ylabel('Amplitude', fontsize=16)

# Print the RMSE together with onset times superimposed in red
ax2 = fg.add_subplot(2,1,2, sharex=ax1)
ax2.plot(envtm*sr+start, rmse)
for ii in correctedonstm*sr+start:
    ax2.axvline(ii, color='r')
# Plot threshold value superimposed as a black dotted line
ax2.axhline(threshold, linestyle=':', color='k')
ax2.set_ylabel("RMSE", fontsize=16)
ax2.set_xlabel("Sample Number", fontsize=16)

fg.show()