Am streaming audio through datagrampackets using datagramsockets for telephonic conversation. I can able to hear audio from other end. But Other end can't hear my audio.
Here data packets are fixed to 340byte. First 20 bytes have some value for connection establishment, source address destination address etc. The next 320 byte has audio data. Here audio data are always fixed to 320byte.
private static final int SAMPLE_INTERVAL = 20;
private static final int SAMPLE_SIZE = 2;
private static final int BUF_SIZE = SAMPLE_INTERVAL * SAMPLE_INTERVAL * SAMPLE_SIZE * 2; //1600Bytes
// the audio recording options
private static final int RECORDING_RATE = 8000; // Hertz
private static final int CHANNEL = AudioFormat.CHANNEL_IN_MONO;//16
private static final int FORMAT = AudioFormat.ENCODING_PCM_16BIT;//2
AudioRecord audioRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDING_RATE, CHANNEL, FORMAT, BUF_SIZE * 10);
//audio transmitting thread
@Override
public void run() {
long seq=0;
byte[] buffer = new byte[BUF_SIZE];
WiphoneProp callProp=new WiphoneProp();
callProp.setOpCode(WiPhonePacket.OpCodes.Call.getValue());//1 byte
callProp.setOperand(WiPhonePacket.OperandCodes.Talk.getValue());//1 byte
callProp.setSelfState(0);//1 byte
callProp.setCallId(1);//1 byte
callProp.setCheckSum(BitConverter.checkSum(deviceSignature.getSignatureData(),0,deviceSignature.getSignatureData().length));//4 byte
try {
int offset=0;
audioRecorder.startRecording();
while (mic) {
// Capture audio from the mic and transmit it
try {
callProp.setWiphoneId(ByteBuffer.wrap(getBroadcastQuadIp().getAddress()).order(ByteOrder.LITTLE_ENDIAN).getInt());//4 byte
callProp.setClientId(ByteBuffer.wrap(getLocalIp().getAddress()).order(ByteOrder.LITTLE_ENDIAN).getInt());//4 byte
} catch (IOException e) {
e.printStackTrace();
}
callProp.setSequence(seq);//4 byte
byte[] buff=new byte[320];
audioRecorder.read(buffer, offset, 320);
System.arraycopy(buffer,offset,buff,0,buff.length);
callProp.setVoice(buff);//320 byte
byte[] voice = callProp.ToBuffer();
DatagramPacket packet = new DatagramPacket(voice, voice.length, InetAddress.getByName(deviceSignature.getDeviceIP()), WIPHONEPORT);
commonSoc.send(packet);
offset+=320;
if(offset>=1600){
offset=0;
}
seq++;
}
// Stop recording and release resources
audioRecorder.stop();
audioRecorder.release();
mic = false;
}
/* catch(InterruptedException e) {
Log.e(TAG, "InterruptedException: " + e.toString());
mic = false;
}*/
catch(SocketException e) {
Log.e(TAG, "SocketException: " + e.toString());
mic = false;
}
catch(UnknownHostException e) {
Log.e(TAG, "UnknownHostException: " + e.toString());
mic = false;
}
catch(IOException e) {
Log.e(TAG, "IOException: " + e.toString());
mic = false;
}
}
now my audio buffer size is 1600byte. There is no audio in other end.
I referred this
This is my c# code to get audio streaming with 320 byte
CaptureDevicesCollection captureDeviceCollection = new CaptureDevicesCollection();
DeviceInformation deviceInfo = captureDeviceCollection[0];
capture = new Capture(deviceInfo.DriverGuid);
short channels = 1; //Stereo.
short bitsPerSample = 16; //16Bit, alternatively use 8Bits.
int samplesPerSecond = 8000;//Default: 22050; //11KHz use 11025 , 22KHz use 22050, 44KHz use 44100 etc.
//Set up the wave format to be captured.
waveFormat = new WaveFormat();
waveFormat.Channels = channels;
waveFormat.FormatTag = WaveFormatTag.Pcm;
waveFormat.SamplesPerSecond = samplesPerSecond;
waveFormat.BitsPerSample = bitsPerSample;
waveFormat.BlockAlign = (short)(channels * (bitsPerSample / (short)8));
waveFormat.AverageBytesPerSecond = waveFormat.BlockAlign * samplesPerSecond;
captureBufferDescription = new CaptureBufferDescription();
captureBufferDescription.BufferBytes = waveFormat.AverageBytesPerSecond / 5; //Approx 200 milliseconds of PCM data.
captureBufferDescription.Format = waveFormat;
playbackBufferDescription = new BufferDescription();
playbackBufferDescription.BufferBytes = waveFormat.AverageBytesPerSecond / 5;
playbackBufferDescription.Format = waveFormat;
playbackBuffer = new SecondaryBuffer(playbackBufferDescription, device);
bufferSize = captureBufferDescription.BufferBytes;
//// Logic for getting 320 byte audio data
try
{
//The following lines get audio from microphone and then send them
//across network.
captureBuffer = new CaptureBuffer(captureBufferDescription, capture);
CreateNotifyPositions();
int blockSize = bufferSize / 10;
int halfBuffer = bufferSize / 2;
captureBuffer.Start(true);
//bool readFirstBufferPart = true;
int offset = 0;
MemoryStream memStream = new MemoryStream(halfBuffer);
bStop = false;
uint seq = 0;
while (!bStop)
{
autoResetEvent.WaitOne();
this.Invoke(new Action(() =>
{
listBox1.Items.Add(DateTime.Now.ToString("HH:mm:ss.fff"));
}));
offset = (int)(((seq + 5) % 10) * blockSize);
halfBuffer = blockSize;
memStream.Seek(0, SeekOrigin.Begin);
captureBuffer.Read(offset, memStream, halfBuffer, LockFlag.None);
//readFirstBufferPart = !readFirstBufferPart;
//offset = readFirstBufferPart ? 0 : halfBuffer;
byte[] dataToWrite = memStream.GetBuffer();
var voicePacket = new WiPhonePacket
{
OpCode = (byte)WiPhonePacket.OpCodes.Call,
Operand = (byte)WiPhonePacket.OperandCodes.Talk,
SelfState = 0,
CallId = 1,
WiPhoneId = broadcastIp,
ClientId = localIp,
CheckSum = Helpers.General.CalculateChecksum(Configurations.Signature.SignatureData, 0, Configurations.Signature.SignatureData.Length),
Sequence = seq++
};
voicePacket.SetVoice(dataToWrite);
var voiceBuffer = voicePacket.ToBuffer();
IPEndPoint ep = new IPEndPoint(IPAddress.Parse(Configurations.Signature.DeviceIP), 2739);
clientSocket.SendTo(voiceBuffer, ep);
//udpClient.Send(dataToWrite, dataToWrite.Length, otherPartyIP.Address.ToString(), 2739);
}
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, "VoiceChat-Send ()", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
finally
{
captureBuffer.Stop();
captureBuffer.Dispose();
captureBuffer = null;
autoResetEvent.Dispose();
autoResetEvent = null;
notify.Dispose();
notify = null;
//Increment flag by one.
nUdpClientFlag += 1;
//When flag is two then it means we have got out of loops in Send and Receive.
//while (nUdpClientFlag != 2)
//{ }
//Clear the flag.
nUdpClientFlag = 0;
//Close the socket.
//udpClient.Close();
}
private void CreateNotifyPositions()
{
try
{
autoResetEvent = new AutoResetEvent(false);
notify = new Notify(captureBuffer);
BufferPositionNotify[] nots = new BufferPositionNotify[10];
for (int i = 0; i < 10; i++)
{
var bufferPositionNotify1 = new BufferPositionNotify();
bufferPositionNotify1.Offset = (i + 1) * (bufferSize / 10) - 1;
bufferPositionNotify1.EventNotifyHandle = autoResetEvent.SafeWaitHandle.DangerousGetHandle();
nots[i] = bufferPositionNotify1;
}
notify.SetNotificationPositions(nots);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, "VoiceChat-CreateNotifyPositions ()", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
}
Thank you for adding the missing part of your source. This was very useful as it contains the most obvious bug:
if(i==0){
bytes_read = audioRecorder.read(buffer, 0, 319);
System.arraycopy(buffer,0,buff,0,buff.length);
callProp.setVoice(buff);//320 byte
}else{
bytes_read = audioRecorder.read(buffer, 320, 639);
System.arraycopy(buffer,320,buff,0,buff.length);
callProp.setVoice(buff);//320 byte
}
audioRecorder.read()
has the number of bytes to read as the 3rd argument (and not the index of the last element of the array to be written). So you should change the code to:
if(i==0){
bytes_read = audioRecorder.read(buffer, 0, 320);
System.arraycopy(buffer,0,buff,0,buff.length);
callProp.setVoice(buff);//320 byte
}else{
bytes_read = audioRecorder.read(buffer, 320, 320);
System.arraycopy(buffer,320,buff,0,buff.length);
callProp.setVoice(buff);//320 byte
}
After that the sound quality should be much better but don't expect too much: your sample rate is only 8kHz which means that the highest possible frequencies are lower that 4kHz which is about old telephone quality.