diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml new file mode 100644 index 000000000..c91710a46 --- /dev/null +++ b/.github/workflows/python-package.yml @@ -0,0 +1,36 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Python package + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.7, 3.8, 3.9] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics diff --git a/pyAudioAnalysis/ShortTermFeatures.py b/pyAudioAnalysis/ShortTermFeatures.py index b892634d0..090f95d14 100644 --- a/pyAudioAnalysis/ShortTermFeatures.py +++ b/pyAudioAnalysis/ShortTermFeatures.py @@ -6,6 +6,7 @@ import matplotlib.pyplot as plt from scipy.signal import lfilter from scipy.fftpack.realtransforms import dct +from scikits.talkbox import lpc from tqdm import tqdm eps = sys.float_info.epsilon @@ -604,6 +605,7 @@ def feature_extraction(signal, sampling_rate, window, step, deltas=True): feature_names = feature_names_2 features = [] + feature_vector_prev = None # for each short-term window to end of signal while current_position + window - 1 < number_of_samples: count_fr += 1 @@ -669,7 +671,7 @@ def feature_extraction(signal, sampling_rate, window, step, deltas=True): features.append(feature_vector) else: # delta features - if count_fr > 1: + if count_fr > 1 and feature_vector_prev: delta = feature_vector - feature_vector_prev feature_vector_2 = np.concatenate((feature_vector, delta)) else: diff --git a/pyAudioAnalysis/data/recordRadio.py b/pyAudioAnalysis/data/recordRadio.py index 6c7bf48a3..417e44139 100755 --- a/pyAudioAnalysis/data/recordRadio.py +++ b/pyAudioAnalysis/data/recordRadio.py @@ -110,9 +110,9 @@ def recordStation(stationName, outputName, sleepTime = -1, Listen = False): r.listen = Listen r.start() - print r.bus() + print(r.bus()) if sleepTime<=0: - raw_input('Press [Enter] to stop') + input('Press [Enter] to stop') else: time.sleep(sleepTime) r.stop() diff --git a/pyAudioAnalysis/data/testComputational.py b/pyAudioAnalysis/data/testComputational.py index f665e306c..9f1b19f90 100755 --- a/pyAudioAnalysis/data/testComputational.py +++ b/pyAudioAnalysis/data/testComputational.py @@ -16,7 +16,8 @@ def main(argv): t1 = time.time() F = MidTermFeatures.short_term_feature_extraction(x, Fs, 0.050 * Fs, 0.050 * Fs); t2 = time.time() - perTime1 = duration / (t2-t1); print "short-term feature extraction: {0:.1f} x realtime".format(perTime1) + perTime1 = duration / (t2-t1) + print("short-term feature extraction: {0:.1f} x realtime".format(perTime1)) elif argv[1] == "-classifyFile": for i in range(nExp): [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav"); @@ -24,7 +25,8 @@ def main(argv): t1 = time.time() aT.file_classification("diarizationExample.wav", "svmSM", "svm") t2 = time.time() - perTime1 = duration / (t2-t1); print "Mid-term feature extraction + classification \t {0:.1f} x realtime".format(perTime1) + perTime1 = duration / (t2-t1) + print("Mid-term feature extraction + classification \t {0:.1f} x realtime".format(perTime1)) elif argv[1] == "-mtClassify": for i in range(nExp): [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav"); @@ -32,7 +34,8 @@ def main(argv): t1 = time.time() [flagsInd, classesAll, acc] = aS.mid_term_file_classification("diarizationExample.wav", "svmSM", "svm", False, '') t2 = time.time() - perTime1 = duration / (t2-t1); print "Fix-sized classification - segmentation \t {0:.1f} x realtime".format(perTime1) + perTime1 = duration / (t2-t1) + print("Fix-sized classification - segmentation \t {0:.1f} x realtime".format(perTime1)) elif argv[1] == "-hmmSegmentation": for i in range(nExp): [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav"); @@ -40,7 +43,8 @@ def main(argv): t1 = time.time() aS.hmm_segmentation('diarizationExample.wav', 'hmmRadioSM', False, '') t2 = time.time() - perTime1 = duration / (t2-t1); print "HMM-based classification - segmentation \t {0:.1f} x realtime".format(perTime1) + perTime1 = duration / (t2-t1) + print("HMM-based classification - segmentation \t {0:.1f} x realtime".format(perTime1)) elif argv[1] == "-silenceRemoval": for i in range(nExp): [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav"); @@ -49,7 +53,8 @@ def main(argv): [Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav"); segments = aS.silence_removal(x, Fs, 0.050, 0.050, smooth_window= 1.0, Weight = 0.3, plot = False) t2 = time.time() - perTime1 = duration / (t2-t1); print "Silence removal \t {0:.1f} x realtime".format(perTime1) + perTime1 = duration / (t2-t1) + print("Silence removal \t {0:.1f} x realtime".format(perTime1)) elif argv[1] == "-thumbnailing": for i in range(nExp): [Fs1, x1] = audioBasicIO.read_audio_file("scottish.wav") @@ -57,7 +62,8 @@ def main(argv): t1 = time.time() [A1, A2, B1, B2, Smatrix] = aS.music_thumbnailing(x1, Fs1, 1.0, 1.0, 15.0) # find thumbnail endpoints t2 = time.time() - perTime1 = duration1 / (t2-t1); print "Thumbnail \t {0:.1f} x realtime".format(perTime1) + perTime1 = duration1 / (t2-t1) + print("Thumbnail \t {0:.1f} x realtime".format(perTime1)) elif argv[1] == "-diarization-noLDA": for i in range(nExp): [Fs1, x1] = audioBasicIO.read_audio_file("diarizationExample.wav") @@ -65,7 +71,8 @@ def main(argv): t1 = time.time() aS.speaker_diarization("diarizationExample.wav", 4, LDAdim = 0, PLOT = False) t2 = time.time() - perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1) + perTime1 = duration1 / (t2-t1) + print("Diarization \t {0:.1f} x realtime".format(perTime1)) elif argv[1] == "-diarization-LDA": for i in range(nExp): [Fs1, x1] = audioBasicIO.read_audio_file("diarizationExample.wav") @@ -73,7 +80,8 @@ def main(argv): t1 = time.time() aS.speaker_diarization("diarizationExample.wav", 4, PLOT = False) t2 = time.time() - perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1) + perTime1 = duration1 / (t2-t1) + print("Diarization \t {0:.1f} x realtime".format(perTime1)) if __name__ == '__main__': main(sys.argv)