65.9K
CodeProject 正在变化。 阅读更多。
Home

使用 DirectSound 播放 Wave 文件并实时显示其频谱

starIconstarIconstarIcon
emptyStarIcon
starIcon
emptyStarIcon

3.44/5 (8投票s)

2008年9月16日

CPOL

2分钟阅读

viewsIcon

148307

downloadIcon

8466

本文展示了如何使用DirectSound播放Wave文件并实时显示其频谱。

DSound_Spectrum.gif

引言

您想用DirectSound播放Wave文件并实时显示其频谱吗? 本文使用DirectSound Win32 GDI API来实现这一点。 本文包括两部分:一是播放Wave文件,这基于文章:使用DirectSound播放音频流数据;另一部分是如何实时显示声音频谱? 这是我们的主要工作。

目录

播放时维护我们的循环缓冲区

在本节中,我添加了一些代码来维护一个显示为红色的循环缓冲区,这基于文章:使用DirectSound播放音频流数据

class CMyDirectSound  
{
public:

    CMyDirectSound();
    virtual ~CMyDirectSound();

    void SetFormat(WAVEFORMATEX WFE);
    void SetCallback(LPGETAUDIOSAMPLES_PROGRESS Function_Callback, 
                     LPVOID lpData);
    void Play();
    void Pause();
    void Stop();
    DWORD GetSamplesPlayed(DWORD* pCurPlayPos);
    void TimerCallback();
    LPDIRECTSOUNDBUFFER GetSoundBuffer() { return m_lpDSB; }
    WAVEFORMATEX GetWaveFormateEx() { return m_WFE; } // jacky_zz[2008-09-04]
    LPDIRECTSOUNDBUFFER GetDirectSoundBuffer() { return m_lpDSB; } // jacky_zz[2008-09-04]
    LPBYTE GetSampleDataBuffer() { return m_lpSampleDataBuffer; } // jacky_zz[2008-09-04]

private:

    //<DirectSound>
    WAVEFORMATEX m_WFE;
    LPDIRECTSOUND m_lpDS;
    LPDIRECTSOUNDBUFFER m_lpDSB;
    HANDLE m_pHEvent[2];
    //</DirectSound>

    //<Audio Buffer>
    LPBYTE m_lpAudioBuf;
    LPGETAUDIOSAMPLES_PROGRESS m_lpGETAUDIOSAMPLES;
    LPVOID m_lpData;
    //</Audio Buffer>

    //<SampleData> jacky_zz[2008-09-04]
    LPBYTE m_lpSampleDataBuffer;
    //</SampleData> jacky_zz[2008-09-04]

    //<Playing>
    MMRESULT m_timerID;
    DWORD m_dwCircles1;
    DWORD m_dwCircles2;
    int m_iDB;    
    //</Playing>

    //<Error Information>
    CString m_strLastError;
    //</Error Information>

    DWORD m_dwThreadID;
    HANDLE m_hThread;
};

成员m_lpSampleDataBuffer的长度为两秒,与类CMyDirectSound中的第二个缓冲区m_lpDSB相同。 类CMyDirectSound读取Wave文件声音数据并将其填充到第二个缓冲区中,我在这里添加了一些代码将声音数据复制到m_lpSampleDataBuffer中。

void CMyDirectSound::Play()
{
    ...

    //Copy Audio Buffer to DirectSoundBuffer
    if (NULL == lpvAudio2)
    {
        memcpy(lpvAudio1, m_lpAudioBuf, dwRetBytes);

        //jacky_zz
        memcpy(m_lpSampleDataBuffer, m_lpAudioBuf, dwRetBytes);
        //jacky_zz
    }
    else
    {
        memcpy(lpvAudio1, m_lpAudioBuf, dwBytesAudio1);
        memcpy(lpvAudio2, m_lpAudioBuf + dwBytesAudio1, dwBytesAudio2);

        //jacky_zz
        memcpy(m_lpSampleDataBuffer, m_lpAudioBuf, 
               dwBytesAudio1 + dwBytesAudio2);
        //jacky_zz
    }

    ...
}
void CMyDirectSound::TimerCallback()
{
    ...

    //If near the end of the audio data
    if (dwRetSamples < m_WFE.nSamplesPerSec)
    {
        DWORD dwRetBytes = dwRetSamples*m_WFE.nBlockAlign;
        memset(m_lpAudioBuf+dwRetBytes, 0, 
               m_WFE.nAvgBytesPerSec - dwRetBytes);

        //jacky_zz
        memset(m_lpSampleDataBuffer+dwRetBytes, 0, 
               m_WFE.nAvgBytesPerSec*2 - dwRetBytes);
        //jacky_zz
    }

    //Copy AudioBuffer to DirectSoundBuffer
    if (NULL == lpvAudio2)
    {
        memcpy(lpvAudio1, m_lpAudioBuf, dwBytesAudio1);

        //jacky_zz
        memcpy(m_lpSampleDataBuffer+dwOffset, 
               m_lpAudioBuf, dwBytesAudio1);
        //jacky_zz
    }
    else
    {
        memcpy(lpvAudio1, m_lpAudioBuf, dwBytesAudio1);
        memcpy(lpvAudio2, m_lpAudioBuf + dwBytesAudio1, dwBytesAudio2);

        //jacky_zz
        memcpy(m_lpSampleDataBuffer+dwOffset, m_lpAudioBuf, 
               dwBytesAudio1+dwBytesAudio2);
        //jacky_zz
    }

    ...
}

计算左声道和右声道的值,然后使用FFT进行计算

在本节中,我将描述如何计算左声道和右声道的值,然后使用FFT进行计算。 在此之前我想说几件事。 我参考了一些文章,例如waveIn音频信号的FFT简单的音频输出示波器和频谱分析仪多媒体PeakMeter控件。 它们都将声音数据(以字节为单位)转换为带有reinterpret_castshort*)short数组。 但是这种方法在我的程序中不起作用,所以我使用了Java MP3播放器,它可以显示频谱。

LPBYTE lpAudioBuffer = playmod->pMyDS->GetSampleDataBuffer();
if(lpAudioBuffer == NULL)
    return;

float left, right;
for(int i=0;i<FFT_SAMPLE_SIZE;i++) {
    if(dwCurPlayPos > dw2SecondByteSize)
        dwCurPlayPos -= dw2SecondByteSize;

    left = (float)((lpAudioBuffer[dwCurPlayPos+1] << 8) + 
                    lpAudioBuffer[dwCurPlayPos+0])/32767;
    right = (float)((lpAudioBuffer[dwCurPlayPos+3] << 8) + 
                     lpAudioBuffer[dwCurPlayPos+2])/32767;
    floatSamples[i] = (left+right)/2;
    dwCurPlayPos+=4;
}

FFT* fft = (FFT*)playmod->fft;
float* lpFloatFFTData = fft->calculate(floatSamples, FFT_SAMPLE_SIZE);
memcpy(floatMag, lpFloatFFTData, FFT_SAMPLE_SIZE/2);
SendMessage(playmod->hWndMain, WM_PAINT+913, (WPARAM)0, (LPARAM)13);

使用Win32 GDI API绘制频谱

void DrawSpectrum(HWND hwnd, float* fftData)
{
    if(fftData == NULL)
        return;    

    HDC hdc = GetWindowDC(hwnd);
    SetBkMode(hdc, TRANSPARENT);

    HPEN hpen, hpenOld;
    HBRUSH hbrush, hbrushOld;
    HBRUSH hbrush1, hbrushOld1;
    RECT rect;

    rect.left = 4;
    rect.top = 23;
    rect.right = rect.left+SPECTRUM_WIDTH;
    rect.bottom = rect.top+SPECTRUM_HEIGHT;

    // Create a green pen.
    hpen = CreatePen(PS_SOLID, 1, RGB(0, 255, 0));
    // Create a red brush.
    hbrush = CreateSolidBrush(RGB(0, 0, 0));
    hbrush1 = CreateSolidBrush(RGB(125, 125, 125));

    // Select the new pen and brush, and then draw.
    hpenOld = (HPEN)SelectObject(hdc, hpen);
    hbrushOld = (HBRUSH)SelectObject(hdc, hbrush);
    hbrushOld1 = (HBRUSH)SelectObject(hdc, hbrush1);
    Rectangle(hdc, rect.left, rect.top, rect.right, rect.bottom);

    int maxFreq = FFT_SIZE / 2;
    int height = 0;
    int maxHeight = SPECTRUM_HEIGHT;

    float c = 0;
    float floatFrrh = 1.0;
    float floatDecay = (float)SPECTRUM_DECAY;
    float floatSadFrr = (floatFrrh*floatDecay);
    float floatBandWidth = ((float)SPECTRUM_WIDTH/(float)SPECTRUM_BANDS);
    float floatMultiplier = 2.0;

    //CString xx;
    RECT r;
    for(int a=0, band=0; band < SPECTRUM_BANDS; 
            a+=(int)floatMultiplier, band++)
    {
        float wFs = 0;

        // -- Average out nearest bands.
        for (int b = 0; b < floatMultiplier; b++) {
            wFs += fftData[a + b];
        }

        // -- Log filter.
        wFs = (wFs * (float) log((float)(band + 2)));
        //xx.Format(_T(&quot;%1.4f\n&quot;), wFs);
        //OutputDebugString(xx);
        if (wFs > 1.0f) {
            wFs = 1.0f;
        }

        // -- Compute SA decay...
        if (wFs >= (floatOldMag[a] - floatSadFrr)) {
            floatOldMag[a] = wFs;
        } else {
            floatOldMag[a] -= floatSadFrr;
            if (floatOldMag[a] < 0) {
                floatOldMag[a] = 0;
            }
            wFs = floatOldMag[a];
        }

        r.left = rect.left + (int)c + 1;
        r.right = r.left + (int)(floatBandWidth-1);
        r.top = SPECTRUM_HEIGHT - (int)(wFs*SPECTRUM_HEIGHT);
        if(r.top < rect.top)
            r.top = rect.top + 2;

        r.top += 22;
        r.bottom = rect.bottom-2;        

        FillRect(hdc, &r, hbrushOld1);

        int height = HEIGHT(r);
        if(height > intPeaks[band])
        {
            intPeaks[band] = height;
            intPeaksDelay[band] = SPECTRUM_DELAY;
        }
        else
        {
            intPeaksDelay[band]--;
            if (intPeaksDelay[band] < 0) {
                intPeaks[band]--;
            }

            if (intPeaks[band] < 0) {
                intPeaks[band] = 0;
            }
        }

        r.top -= intPeaks[band];
        if(r.top < rect.top)
            r.top = rect.top + 2;

        r.top += 22;
        if(r.top >= rect.bottom)
            r.top = rect.bottom - 2;

        r.bottom = r.top + 1;
        FillRect(hdc, &r, hbrushOld1);

        c += floatBandWidth;
    }

    // Do not forget to clean up.
    SelectObject(hdc, hpenOld);
    DeleteObject(hpen);
    SelectObject(hdc, hbrushOld);
    DeleteObject(hbrush);
    SelectObject(hdc, hbrushOld1);
    DeleteObject(hbrush1);
    ReleaseDC(hwnd, hdc);

    Sleep(20);
}

我得到的位置不完全是一个循环缓冲区

此程序尚未成熟。 因此,仍然存在一些问题,其中一些我无法找到正确的方法来解决。 我希望CodeProject上的用户可以帮助我。 其中一个问题是我得到的位置不完全是一个循环缓冲区。 我尝试了使用线程或计时器等方法。 您认为您可以帮助我解决这个问题吗?

© . All rights reserved.