C   116

shaper++.c

Guest on 8th May 2022 07:45:29 AM

  1. /*
  2.  * Copyright (c) Washington University in St. Louis.
  3.  * All rights reserved
  4.  *
  5.  *  Redistribution and use in source and binary forms, with or without
  6.  *  modification, are permitted provided that the following conditions
  7.  *  are met:
  8.  *    1. Redistributions of source code must retain the above copyright
  9.  *       notice, this list of conditions and the following disclaimer.
  10.  *    2. Redistributions in binary form must reproduce the above copyright
  11.  *       notice, this list of conditions and the following disclaimer in the
  12.  *       documentation and/or other materials provided with the distribution.
  13.  *    3. The name of the author or Washington University may not be used
  14.  *       to endorse or promote products derived from this source code
  15.  *       without specific prior written permission.
  16.  *    4. Conditions of any other entities that contributed to this are also
  17.  *       met. If a copyright notice is present from another entity, it must
  18.  *       be maintained in redistributions of the source code.
  19.  *
  20.  * THIS INTELLECTUAL PROPERTY (WHICH MAY INCLUDE BUT IS NOT LIMITED TO SOFTWARE,
  21.  * FIRMWARE, VHDL, etc) IS PROVIDED BY THE AUTHOR AND WASHINGTON UNIVERSITY
  22.  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  23.  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  24.  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR WASHINGTON UNIVERSITY
  25.  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26.  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27.  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  28.  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  29.  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  30.  * ARISING IN ANY WAY OUT OF THE USE OF THIS INTELLECTUAL PROPERTY, EVEN IF
  31.  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32.  *
  33.  * */
  34. /*
  35.  * File: shaper++.c  
  36.  * Organization: Applied Research Laboratory
  37.  *
  38.  * Derived from: priq.c and setNxtBlk.c
  39.  *
  40.  *      Modified control msgs; changed queueing to be more like delay
  41.  *      plugin queueing.
  42.  *
  43.  * Description: traffic shaper using token bucket
  44.  *
  45.  * Modification History:
  46.  *              version v0
  47.  *
  48.  */
  49.  
  50.  
  51. #include <memory.h>
  52. #include "plugin_api.h"
  53. #include "plugin_dl.h"
  54.  
  55. #include "scratch_rings_WU.h"
  56. #include "sram_rings_WU.h"
  57.  
  58.  
  59. //-----------------------------------------------------------
  60. // constants
  61. //-----------------------------------------------------------
  62.  
  63.                         // defaults
  64. #ifndef TEST_MODE
  65.     #define DEF_RATE    1000    // 1 Mbps
  66. #else
  67.     #define DEF_RATE    100     // 100 Kbps (when in test mode)
  68. #endif
  69. #define DEF_BUCKETSZ    3000    // Bytes
  70.  
  71.                         // plugin counters
  72. #define PKT_COUNT       0       // #pkts received by handle_pkt_user()
  73. #define CB_COUNT        1       // #pkts sent by callback()
  74. #define ERR_COUNT       3       // #errors
  75.  
  76. //-----------------------------------------------------------
  77. // typedefs, unions, enums
  78. //-----------------------------------------------------------
  79. union tm_tag {
  80.     long long   tm;
  81.     struct {
  82.         unsigned long   hi;
  83.         unsigned long   lo;
  84.     }   tm2;
  85. };
  86.  
  87. // sizeof(struct item_tag) = 32 ==> 32,768 items in 1 MB
  88. struct item_tag {
  89.     plugin_out_data     metapkt;;
  90.     unsigned int        iplen;
  91.     struct item_tag     *next;
  92. };
  93.  
  94. struct queue_tag {
  95.     unsigned long       npkts;          // #pkts in queue
  96.     unsigned long       nbytes;         // #bytes in queue
  97.     unsigned long       maxinq;         // max #pkts in queue
  98.     unsigned long       ndrops;         // #overflows from queue
  99.     unsigned long       nerrs;          // #errors other than drops
  100.     struct item_tag     *hd;            // head ptr
  101.     struct item_tag     *tl;            // tail ptr
  102.     struct item_tag *free_hd;           // free list
  103. };
  104.  
  105.  
  106. //-----------------------------------------------------------
  107. // Global variables/Registers
  108. //-----------------------------------------------------------
  109. //
  110. // >> thread-specific globals <<
  111. //
  112. __declspec(gp_reg) int dlNextBlock;  // where to send packets to next
  113. __declspec(gp_reg) int dlFromBlock;  // where to get packets from
  114. __declspec(gp_reg) int msgNextBlock; // where to send control messages to next
  115. __declspec(gp_reg) int msgFromBlock; // where to get control messages from
  116.  
  117. // see ring_formats.h for struct definitions
  118. volatile __declspec(gp_reg) plc_plugin_data ring_in;    // ring data from PLC
  119. volatile __declspec(gp_reg) plugin_out_data ring_out;   // ring data to nxt blk
  120.  
  121. const unsigned int SLEEP_CYCLES = 14000;        // cycles between
  122.                                                 //   callbacks (10 usec)
  123. __declspec(gp_reg) unsigned int pluginId;       // plugin id (0...7)
  124.  
  125. // >> user globals <<
  126. __declspec(shared gp_reg) unsigned int npkts;   // total #pkts
  127. __declspec(shared gp_reg) unsigned int nsent;   // total #pkts sent by callback()
  128. __declspec(shared gp_reg) unsigned int maxinq;  // max #pkts queued
  129. __declspec(shared gp_reg) unsigned int ndrops;  // total #pkts dropped
  130. __declspec(shared gp_reg) unsigned int debug_on;
  131.  
  132. __declspec(shared sram) unsigned int rate_Kbps; // average target rate
  133. __declspec(shared gp_reg) unsigned int bucketsz; // bucket size (bytes)
  134. __declspec(shared gp_reg) union tm_tag told;    // last callback time
  135. __declspec(shared gp_reg) unsigned int token_cnt; // 1 token = 0.01 bit
  136.  
  137. //------
  138. #define UNLOCKED 0
  139. #define LOCKED   1
  140. __declspec(shared gp_reg) unsigned int queue_lock;
  141.  
  142. #define MAX_QUEUE_SZ    32000
  143. __declspec(shared sram) struct queue_tag queue;
  144.  
  145.  
  146. //------
  147.  
  148.  
  149. #include "plugin_helpers.h"
  150.  
  151. //-----------------------------------------------------------
  152. // Function prototypes
  153. //-----------------------------------------------------------
  154. void handle_pkt_user();
  155. void handle_msg_user();
  156. void plugin_init_user();
  157. int queue_init( __declspec(shared, sram) struct queue_tag *qptr );
  158.  
  159. static void wait_packet_signal(SIGNAL *);
  160. static void send_packet_signal(SIGNAL *);
  161.  
  162. // forward reference
  163. static __forceinline int
  164. helper_send_from_queue_to_x(
  165.                         __declspec(shared sram) struct queue_tag *qptr,
  166.                         __declspec(gp_reg) int dlNextBlock );
  167. static __forceinline int
  168. helper_send_from_queue( __declspec(shared sram) struct queue_tag *qptr,
  169.                         __declspec(gp_reg) int dlNextBlock );
  170. struct item_tag * queue_alloc( __declspec(shared, sram) struct queue_tag *qptr );
  171. void queue_free( __declspec(shared, sram) struct queue_tag *qptr, struct item_tag *item );
  172. //struct item_tag * queue_alloc( struct queue_tag *queue );
  173. //void queue_free( struct queue_tag *queue, struct item_tag *item );
  174. int queue_enq(  __declspec(shared, sram) struct queue_tag *qptr,
  175.                 volatile __declspec(gp_reg) plc_plugin_data ring_in,
  176.                 __declspec(local_mem) unsigned int iplen );
  177. int queue_pop( __declspec(shared sram) struct queue_tag *qptr );
  178.  
  179.  
  180. //-----------------------------------------------------------
  181. // New helper functions
  182. //-----------------------------------------------------------
  183.  
  184. #define FormRawQid(out_port,qid)        ((((out_port)+1) << 13) | qid)
  185.  
  186. // handle errors
  187. #define BAD_QUEUE_INIT_ERR      1       // bad queue_init()
  188. #define BAD_ENQ_ERR             2       // bad queue_enq()
  189. #define BAD_POP_EMPTY_ERR       3       // bad queue_pop() - empty queue
  190. #define BAD_POP_FREE_ERR        4       // bad queue_pop() - free() failed
  191.  
  192. __declspec(shared gp_reg) unsigned int nerrs;           // #errors
  193. volatile __declspec(shared sram) unsigned int errno[5]; // 1st 5 errors
  194.  
  195. // record error number
  196. //                                                                      <<<<<
  197. static __forceinline void
  198. helper_set_errno( __declspec(local_mem) unsigned int n ) {
  199.     if( nerrs < 5 )     errno[nerrs] = n;
  200.     ++nerrs;
  201.     onl_api_plugin_cntr_inc(pluginId, 0);       // external error counter
  202. }
  203.  
  204. // set ring_out qid given output port# and external qid
  205. //                                                                      <<<<<
  206. static __forceinline void
  207. helper_set_meta_qid(    __declspec(gp_reg) unsigned int out_port,
  208.                         __declspec(gp_reg) unsigned int xqid ) {
  209.     ring_out.plugin_qm_data_out.qid = (out_port+1 << 13) | xqid;
  210. }
  211.  
  212. // reset global counters
  213. //                                                                      <<<<<
  214. static __forceinline void
  215. reset_counters( void ) {
  216.     npkts = 0;
  217.     nsent = 0;
  218.     maxinq = 0;
  219.     ndrops = 0;
  220.     token_cnt = 0;
  221.  
  222.     nerrs = 0;
  223.     errno[0] = 0;       errno[1] = 0;   errno[2] = 0;   errno[3] = 0;
  224.     errno[4] = 0;
  225.  
  226.     sleep( SLEEP_CYCLES );              // not sure if I need this
  227.     helper_plugin_cntr_zero( PKT_COUNT );
  228.     helper_plugin_cntr_zero( CB_COUNT );
  229.     helper_plugin_cntr_zero( ERR_COUNT );
  230. }
  231.  
  232.  
  233. // Hard coded to send meta-pkts to plugin ME 4
  234. // Used in place of helper_send_from_queue() while debugging
  235. //
  236. static __forceinline int
  237. helper_send_from_queue_to_x(
  238.                         __declspec(shared sram) struct queue_tag *qptr,
  239.                         __declspec(gp_reg) int dlNextBlock ) {
  240.     int                 rc;
  241.  
  242.     // ASSUME dlNextBlock == plugin ME 4
  243.     {
  244.         sram_ring_put_buffer_6word( PLC_TO_PLUGIN_4_SRAM_RING,
  245.                                                 qptr->hd->metapkt.i, 0 );
  246.     }
  247.  
  248.     rc = queue_pop( qptr );
  249.     if( rc == -1 ) {
  250.         helper_set_errno( BAD_POP_EMPTY_ERR );
  251.         onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  252.     } else if( rc == -2 ) {
  253.         helper_set_errno( BAD_POP_FREE_ERR );
  254.         onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  255.     }
  256.  
  257.     return 0;
  258. }
  259.  
  260.  
  261. // Same as dl_sink_packet() but don't do any signalling
  262. //
  263. // set ring_out data from item in queue data.
  264. //      return 0 if OK; -1 otherwise
  265. //
  266. static __forceinline int
  267. helper_send_from_queue( __declspec(shared sram) struct queue_tag *qptr,
  268.                         __declspec(gp_reg) int dlNextBlock ) {
  269.     int                 rc;
  270.  
  271.     if( dlNextBlock == QM ) {
  272.         plugin_out_data my_ring_out;    // ring data to next block
  273.         __declspec(gp_reg) int  out_port;
  274.  
  275.         out_port = (qptr->hd->metapkt.plugin_plugin_data_out.uc_mc_bits >> 3)
  276.                         & 0x7;
  277.         my_ring_out.plugin_qm_data_out.out_port         = out_port;
  278.         my_ring_out.plugin_qm_data_out.qid              =
  279.                 qptr->hd->metapkt.plugin_plugin_data_out.qid;
  280.         my_ring_out.plugin_qm_data_out.l3_pkt_len       =
  281.                 qptr->hd->metapkt.plugin_plugin_data_out.l3_pkt_len;
  282.         my_ring_out.plugin_qm_data_out.buf_handle_lo24  =
  283.                 qptr->hd->metapkt.plugin_plugin_data_out.buf_handle_lo24;
  284.         scr_ring_put_buffer_3word( PLUGIN_TO_QM_RING, my_ring_out.i, 0 );
  285.     } else if( dlNextBlock == PACKET_IN_RING_0 )        return -1;
  286.     else if(    (dlNextBlock == PACKET_IN_RING_1)  ||
  287.                 (dlNextBlock == PACKET_IN_RING_2)  ||
  288.                 (dlNextBlock == PACKET_IN_RING_3)  ||
  289.                 (dlNextBlock == PACKET_IN_RING_4)  ) {
  290.         if( dlNextBlock == PACKET_IN_RING_1 )
  291.             sram_ring_put_buffer_6word( PLC_TO_PLUGIN_1_SRAM_RING,
  292.                                                 qptr->hd->metapkt.i, 0 );
  293.         else if( dlNextBlock == PACKET_IN_RING_2 )
  294.             sram_ring_put_buffer_6word( PLC_TO_PLUGIN_2_SRAM_RING,
  295.                                                 qptr->hd->metapkt.i, 0 );
  296.         else if( dlNextBlock == PACKET_IN_RING_3 )
  297.             sram_ring_put_buffer_6word( PLC_TO_PLUGIN_3_SRAM_RING,
  298.                                                 qptr->hd->metapkt.i, 0 );
  299.         else if( dlNextBlock == PACKET_IN_RING_4 )
  300.             sram_ring_put_buffer_6word( PLC_TO_PLUGIN_4_SRAM_RING,
  301.                                                 qptr->hd->metapkt.i, 0 );
  302.     } else {                                    // all other options
  303.         return -1;
  304.     }
  305.  
  306.     rc = queue_pop( qptr );
  307.     if( rc == -1 ) {
  308.         helper_set_errno( BAD_POP_EMPTY_ERR );
  309.         onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  310.     } else if( rc == -2 ) {
  311.         helper_set_errno( BAD_POP_FREE_ERR );
  312.         onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  313.     }
  314.  
  315.     return 0;
  316. }
  317.  
  318.  
  319. // Same as dl_sink_packet() but don't do any signalling and assume pkt
  320. // goes to QM
  321. // Used during debugging
  322. //
  323. static __forceinline void
  324. helper_send_from_queue_to_QM( __declspec(shared,sram) struct queue_tag *qptr ) {
  325.     plugin_out_data     my_ring_out;    // ring data to next block
  326.     __declspec(gp_reg) int      out_port;
  327.     int                 rc;
  328.  
  329.     out_port = (qptr->hd->metapkt.plugin_plugin_data_out.uc_mc_bits >> 3) & 0x7;
  330.     my_ring_out.plugin_qm_data_out.out_port     = out_port;
  331.     my_ring_out.plugin_qm_data_out.qid          =
  332.                 qptr->hd->metapkt.plugin_plugin_data_out.qid;
  333.     my_ring_out.plugin_qm_data_out.l3_pkt_len   =
  334.                 qptr->hd->metapkt.plugin_plugin_data_out.l3_pkt_len;
  335.     my_ring_out.plugin_qm_data_out.buf_handle_lo24      =
  336.                 qptr->hd->metapkt.plugin_plugin_data_out.buf_handle_lo24;
  337.  
  338.     rc = queue_pop( qptr );
  339.     if( rc == -1 ) {
  340.         helper_set_errno( BAD_POP_EMPTY_ERR );
  341.         onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  342.     } else if( rc == -2 ) {
  343.         helper_set_errno( BAD_POP_FREE_ERR );
  344.         onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  345.     }
  346.  
  347. #ifdef DEBUG3
  348.     if( debug_on )      helper_check_meta( my_ring_out );       // DEBUG3
  349. #endif
  350.  
  351.     scr_ring_put_buffer_3word( PLUGIN_TO_QM_RING, my_ring_out.i, 0 );
  352. }
  353.  
  354. //-----------------------------------------------------------
  355. // Begin Normal Functions
  356. //-----------------------------------------------------------
  357. //                                                                      <<<<<
  358. void handle_pkt_user( )  {
  359.     __declspec(gp_reg) buf_handle_t buf_handle;
  360.     __declspec(gp_reg) onl_api_buf_desc bufDescriptor;
  361.     __declspec(local_mem) unsigned int  bufDescPtr;
  362.     __declspec(local_mem) unsigned int  ipv4HdrPtr;
  363.     __declspec(local_mem) unsigned int  dramBufferPtr;
  364.     __declspec(gp_reg) onl_api_ip_hdr   ipv4_hdr;
  365.     unsigned long       ninq;           // #pkts queued
  366.  
  367.     // accounting
  368.     ++npkts;
  369.     onl_api_plugin_cntr_inc(pluginId, PKT_COUNT);
  370.  
  371.     // prepare to read IPv4 header
  372.     onl_api_get_buf_handle(&buf_handle);                        // rd handle
  373.     bufDescPtr = onl_api_getBufferDescriptorPtr(buf_handle);    // descr addr
  374.     onl_api_readBufferDescriptor(bufDescPtr, &bufDescriptor);   // rd descriptor
  375.     dramBufferPtr = onl_api_getBufferPtr(buf_handle);           // dram addr
  376.     ipv4HdrPtr = onl_api_getIpv4HdrPtr(dramBufferPtr, bufDescriptor.offset);
  377.     onl_api_readIpv4Hdr(ipv4HdrPtr, &ipv4_hdr);
  378.  
  379.     ninq = queue_enq( &queue, ring_in, ipv4_hdr.ip_len );
  380.  
  381.     if( ninq == -1 ) {
  382.         helper_set_errno( BAD_ENQ_ERR );
  383.         onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  384.         ++ndrops;
  385.         helper_set_out_to_DROP( );
  386.         return;
  387.     } else {
  388.         if ( ninq > maxinq )    maxinq = ninq;
  389.         helper_set_out_to_DO_NOTHING( );
  390.     }
  391. }
  392.  
  393. //                                                                      <<<<<
  394. void handle_msg_user(){}                                // NOT USED
  395.  
  396. //                                                                      <<<<<
  397. void plugin_init_user()
  398. {
  399.     if(ctx() == 0)
  400.     {
  401.         rate_Kbps = DEF_RATE;
  402.         bucketsz  = DEF_BUCKETSZ;
  403.         reset_counters( );
  404.         debug_on = 0;
  405.  
  406.         queue_lock = UNLOCKED;
  407.         if( queue_init( &queue ) != 0 ) {
  408.             helper_set_errno( BAD_QUEUE_INIT_ERR );
  409.         }
  410.  
  411. #ifdef DEBUGX
  412.         __set_timestamp( 0 );
  413. #endif
  414.     }
  415.  
  416.     // plugin chain
  417.     if( pluginId == 0 )         dlNextBlock = PACKET_IN_RING_1;
  418.     else if( pluginId == 1 )    dlNextBlock = PACKET_IN_RING_2;
  419.     else if( pluginId == 2 )    dlNextBlock = PACKET_IN_RING_3;
  420.     else if( pluginId == 3 )    dlNextBlock = PACKET_IN_RING_4;
  421.     else                        dlNextBlock = QM;
  422. }
  423.  
  424.  
  425.  
  426. /**
  427.         ----------------------------------------------------------------
  428.  @User: YOU SHOULD NOT NEED TO MAKE ANY CHANGES TO THE REST OF THIS FILE
  429.         ----------------------------------------------------------------
  430. */
  431.  
  432.  
  433. /* handle packets */
  434. //                                                                      <<<<<
  435. void handle_pkt()
  436. {
  437.     dl_source_packet( dlFromBlock );
  438.  
  439.     handle_pkt_user( );
  440.  
  441.     dl_sink_packet( dlNextBlock );
  442. }
  443.  
  444.  
  445. /* handle control messages */
  446. //                                                                      <<<<<
  447. // op codes:
  448. //   set:
  449. //      params= rate_Kbps bucketsz(bytes)
  450. //              set traffic shaper parameters (rate_Kbps, bucketsz)
  451. //   get:
  452. //      =vers   display version number
  453. //      =params display parameters (rate_Kbps, bucketsz, counter)
  454. //      =counts display counts (npkts, maxinq, nerrs)
  455. //      =errno  display errno[0], ... , errno[4]
  456. //   miscellaneous:
  457. //      reset   reset npkts[], ndrops[], errno[] counters, etc.
  458. //      debug   toggle debug_on
  459. //
  460. void handle_msg()
  461. {
  462.     // assume messages are at most 8 words for now
  463.     __declspec(gp_reg) unsigned int i;
  464.     __declspec(gp_reg) unsigned int message[8];
  465.     __declspec(gp_reg) onl_api_ctrl_msg_hdr hdr;
  466.     __declspec(local_mem) char inmsgstr[28];                    // inbound
  467.     __declspec(local_mem) char outmsgstr[28];                   // outbound
  468.     __declspec(local_mem) char lmem_tmpstr[8];
  469.     __declspec(sram) char sram_inmsgstr[28];
  470.     __declspec(sram) char vers[4] = "1.0";
  471.  
  472.     char SET_params[8]  = "params=";
  473.     char GET_params[8]  = "=params";
  474.     char GET_vers[8]    = "=vers";
  475.     char GET_counts[8]  = "=counts";
  476.     char GET_errno[8]   = "=errno";
  477.     char RESET[8]       = "reset";
  478.     char DEBUG_op[8]    = "debug";
  479.  
  480.     char OK_msg[4]      = "OK";
  481.     char BAD_OP_msg[8]  = "BAD OP";
  482.     char NEED_ARG_msg[12]= "NEED ARG";
  483.  
  484.     // expand for-loop to get rid of the compiler error:
  485.     //                                  "Incorrect use of register variable
  486.     message[0] = 0;
  487.     message[1] = 0;
  488.     message[2] = 0;
  489.     message[3] = 0;
  490.     message[4] = 0;
  491.     message[5] = 0;
  492.     message[6] = 0;
  493.     message[7] = 0;
  494.  
  495.     dl_source_message(msgFromBlock, message);
  496.  
  497.     hdr.value = message[0];
  498.     if( hdr.type != CM_CONTROLMSG )     return;
  499.     if( hdr.response_requested != 1 )   return;
  500.  
  501.     onl_api_intarr2str( &message[1], inmsgstr );
  502.  
  503.     outmsgstr[0] = '\0';
  504.     memcpy_sram_lmem( sram_inmsgstr, inmsgstr, 28 );
  505.  
  506.     if( strncmp_sram(sram_inmsgstr, GET_vers, 5) == 0 ) {
  507.         memcpy_lmem_sram( outmsgstr, (void *)vers, 4 );
  508.     } else if( strncmp_sram(sram_inmsgstr, GET_params, 7) == 0 ) {
  509.         helper_sram_outmsg_3ul( rate_Kbps, bucketsz, 0, outmsgstr );
  510.     } else if( strncmp_sram(sram_inmsgstr, GET_counts, 7) == 0 ) {
  511.         helper_sram_outmsg_3ul( npkts, maxinq, nerrs, outmsgstr );
  512.     } else if( strncmp_sram(sram_inmsgstr, GET_errno, 6) == 0 ) {
  513.         helper_sram_outmsg_5ul( errno[0], errno[1], errno[2], errno[3],
  514.                                                         errno[4], outmsgstr );
  515.     } else if( strncmp_sram(sram_inmsgstr, SET_params, 7) == 0 ) {
  516.         char    *cmnd_word;             // points to input command field
  517.         char    *rate_word;             // points to input rate(Kbps) field
  518.         char    *bucketsz_word;         // points to input bucketsz(bytes) field
  519.         unsigned int    nwords;
  520.  
  521.         nwords = helper_count_words( sram_inmsgstr );
  522.         if( nwords != 3 ) {
  523.             memcpy_lmem_sram( outmsgstr, NEED_ARG_msg, 12 );
  524.         } else {
  525.             cmnd_word = helper_tokenize( sram_inmsgstr );       // get command
  526.             rate_word = helper_tokenize( cmnd_word+strlen(cmnd_word)+1 );
  527.             bucketsz_word = helper_tokenize( rate_word+strlen(rate_word)+1 );
  528.  
  529.             rate_Kbps = helper_atou_sram( rate_word );
  530.             bucketsz = helper_atou_sram( bucketsz_word );
  531.             helper_sram_outmsg_2ul( rate_Kbps, bucketsz, outmsgstr );
  532.         }
  533.     } else if( strncmp_sram(sram_inmsgstr, RESET, 5) == 0 ) {
  534.         reset_counters( );
  535.     } else if( strncmp_sram(sram_inmsgstr, DEBUG_op, 5) == 0 ) {
  536.         debug_on = (debug_on+1) & 0x1;
  537.         helper_sram_outmsg_1ul( debug_on, outmsgstr );
  538.     } else {
  539.         memcpy_lmem_sram( outmsgstr, BAD_OP_msg, 8 );
  540.     }
  541.  
  542.     if( onl_api_str2intarr(outmsgstr, &message[1]) < 0 )        return;
  543.  
  544.     hdr.type = CM_CONTROLMSGRSP;
  545.     hdr.response_requested = 0;
  546.     hdr.num_words = 7;
  547.     message[0] = hdr.value;
  548.  
  549.     dl_sink_message(msgNextBlock, message);
  550. }
  551.  
  552. // handle periodic functionality
  553. //                                                                      <<<<<
  554. // Called ABOUT every 10 usec
  555. //    - We can only guarantee that this thread will not get control sooner
  556. //      than 10 usec.
  557. // Min rate of 1 Kbps and 1 token = 0.0001 bits ==>
  558. //      Add about 100 tokens every callback.  Although we could have chosen
  559. //      1 token = 0.01 bits, our choice allows greater accuracy.
  560. // Computing number of tokens to add:
  561. //      Let     T = elapsed time in nsec since last token update
  562. //              R = avg rate of regulator (Kbps)
  563. //
  564. //      token_cnt' = token_cnt + T*R/100
  565. //
  566. //      For R = 1 Mbps and T about 10 usec,
  567. //
  568. //      token_cnt' = token_cnt + (10,000,000*1)/100 = token_cnt + 100,000
  569. //
  570. #define TOKENS_PER_BYTE 80000           // 1 token = 0.0001 bits
  571. void callback()
  572. {
  573.     __declspec(gp_reg) unsigned int     pktlen_tokens;
  574.     union tm_tag        tnow;
  575.     long long           tdiff_nsec;
  576.     int                 rc;
  577.  
  578.         // update token counter
  579.     tnow.tm2.lo = local_csr_read( local_csr_timestamp_low );
  580.     tnow.tm2.hi = local_csr_read( local_csr_timestamp_high );
  581.     tdiff_nsec = diff_nsec( tnow.tm, told.tm );
  582.     token_cnt = token_cnt + (tdiff_nsec*rate_Kbps)/100;
  583.     if( token_cnt > TOKENS_PER_BYTE*bucketsz )
  584.         { token_cnt = TOKENS_PER_BYTE*bucketsz; }
  585.     told.tm = tnow.tm;
  586.         // forward pkts as long as there are enough tokens
  587.     while( queue.npkts > 0 ) {
  588.         pktlen_tokens = TOKENS_PER_BYTE*queue.hd->iplen;
  589.         if( token_cnt >= pktlen_tokens ) {      // fwd first pkt
  590.             onl_api_plugin_cntr_inc(pluginId, CB_COUNT);
  591. #ifdef DEBUG1
  592. helper_sram_dbgmsg_3ul( token_cnt, pktlen_tokens, nsent );
  593. helper_sram_dbgmsg_3ul( queue.npkts, dlNextBlock, 0 );
  594. #endif
  595. // used in early testing
  596. //XXX       helper_send_from_queue_to_QM( &queue );
  597. //XXX       rc = helper_send_from_queue_to_x( &queue, dlNextBlock );
  598.             rc = helper_send_from_queue( &queue, dlNextBlock );
  599.             token_cnt -= pktlen_tokens;
  600.             if( rc == 0 ) {
  601.                 ++nsent;
  602. #ifdef DEBUG1
  603. helper_sram_dbgmsg_3ul( token_cnt, pktlen_tokens, nsent );
  604. helper_sram_dbgmsg_3ul( queue.npkts, 0, 0 );
  605. #endif
  606.             } else {
  607.                 onl_api_plugin_cntr_inc(pluginId, ERR_COUNT);
  608.                 break;
  609.             }
  610.         } else  break;
  611.     }
  612.  
  613.     sleep( SLEEP_CYCLES );
  614. }
  615.  
  616.  
  617. /* take care of any setup that needs to be done before processing begins */
  618. //                                                                      <<<<<
  619. void plugin_init()
  620. {
  621.   /* set the default next block to be the Queue Manager */
  622.   dlNextBlock = QM;
  623.  
  624.   /* by default, get packets and get and put control messages from input rings
  625.    * based on which microengine we are currently running on; this assumes a
  626.    * default one to one mapping */
  627.   switch(__ME())
  628.   {
  629.     case 0x7:
  630.       pluginId = 0;
  631.       dlFromBlock  = PACKET_IN_RING_0;
  632.       msgFromBlock = MESSAGE_IN_RING_0;
  633.       msgNextBlock = MESSAGE_OUT_RING_0;
  634.       break;
  635.     case 0x10:
  636.       pluginId = 1;
  637.       dlFromBlock  = PACKET_IN_RING_1;
  638.       msgFromBlock = MESSAGE_IN_RING_1;
  639.       msgNextBlock = MESSAGE_OUT_RING_1;
  640.  
  641.       break;
  642.     case 0x11:
  643.       pluginId = 2;
  644.       dlFromBlock  = PACKET_IN_RING_2;
  645.       msgFromBlock = MESSAGE_IN_RING_2;
  646.       msgNextBlock = MESSAGE_OUT_RING_2;  
  647.       break;
  648.     case 0x12:
  649.       pluginId = 3;
  650.       dlFromBlock  = PACKET_IN_RING_3;
  651.       msgFromBlock = MESSAGE_IN_RING_3;
  652.       msgNextBlock = MESSAGE_OUT_RING_3;    
  653.       break;
  654.     case 0x13:
  655.       pluginId = 4;
  656.       dlFromBlock  = PACKET_IN_RING_4;
  657.       msgFromBlock = MESSAGE_IN_RING_4;
  658.       msgNextBlock = MESSAGE_OUT_RING_4;
  659.       break;
  660.     default:  // keep the compiler happy
  661.       pluginId = 0;
  662.       dlFromBlock  = PACKET_IN_RING_0;
  663.       msgFromBlock = MESSAGE_IN_RING_0;
  664.       msgNextBlock = MESSAGE_OUT_RING_0;
  665.       break;
  666.   }
  667.  
  668.   plugin_init_user(); // user hook
  669. }
  670.  
  671.  
  672. /* entry point */
  673. //                                                                      <<<<<
  674. void main()
  675. {
  676.   int c;
  677.  
  678.   /* do initialization */
  679.   plugin_init();
  680.   dl_sink_init();
  681.   dl_source_init();
  682.  
  683.   /* get the current thread's context number (0-7) */
  684.   c = ctx();
  685.  
  686.   if(c >= FIRST_PACKET_THREAD && c <= LAST_PACKET_THREAD)
  687.   {
  688.     while(1)
  689.     {
  690.       handle_pkt();
  691.     }
  692.   }
  693. #ifdef MESSAGE_THREAD
  694.   else if(c == MESSAGE_THREAD)
  695.   {
  696.     while(1)
  697.     {
  698.       handle_msg();
  699.     }
  700.   }
  701. #endif
  702. #ifdef CALLBACK_THREAD
  703.   else if(c == CALLBACK_THREAD)
  704.   {
  705.     while(1)
  706.     {
  707.       callback();
  708.     }
  709.   }
  710. #endif
  711. }
  712.  
  713. // --------------------------------------------------------------------------
  714. // queueing functions
  715. //
  716. //      queue_init              initialize free list and queue descriptor
  717. //      queue_enq               enqueue an item onto a queue
  718. //      queue_pop               pop an item from a queue
  719. //      queue_alloc             allocate space for an item from the free list
  720. //      queue_free              put an item back onto the free list
  721. //
  722. // --------------------------------------------------------------------------
  723.  
  724. // initialize queue
  725. //                                                                      <<<<<
  726. int
  727. queue_init( __declspec(shared, sram) struct queue_tag *qptr ) {
  728.     int         i;
  729.     int         K = MAX_QUEUE_SZ-1;
  730.     struct item_tag *item_ptr;
  731.  
  732.     if ( pluginId == 0)         item_ptr = (struct item_tag *) 0xC0100000;
  733.     else if ( pluginId == 1)    item_ptr = (struct item_tag *) 0xC0200000;
  734.     else if ( pluginId == 2)    item_ptr = (struct item_tag *) 0xC0300000;
  735.     else if ( pluginId == 3)    item_ptr = (struct item_tag *) 0xC0400000;
  736.     else if ( pluginId == 4)    item_ptr = (struct item_tag *) 0xC0500000;
  737.     else        return -1;
  738.  
  739.     qptr->free_hd = item_ptr;
  740.     qptr->hd = qptr->tl = 0;
  741.     qptr->npkts = 0;
  742.     qptr->nbytes = 0;
  743.     qptr->maxinq = 0;
  744.     qptr->ndrops = 0;
  745.     qptr->nerrs = 0;
  746.  
  747.     (item_ptr+K)->next = 0;
  748.  
  749.     for (i=0; i<K; i++) {
  750.         item_ptr->next = item_ptr+1;
  751.         ++item_ptr;
  752.     }
  753.  
  754.     return 0;
  755. }
  756.  
  757. // insert item at end of queue
  758. //      return number of items if OK; else -1
  759. int
  760. queue_enq(      __declspec(shared, sram) struct queue_tag *qptr,
  761.                 volatile __declspec(gp_reg) plc_plugin_data ring_in,
  762.                 __declspec(local_mem) unsigned int iplen ) {
  763.     struct item_tag     *item;
  764.  
  765.     while( queue_lock == LOCKED )       ctx_swap();
  766.     queue_lock = LOCKED;
  767.  
  768.         item = queue_alloc( &queue );
  769.         if( item == 0 ) {
  770.             ++qptr->ndrops;
  771.             return -1;
  772.         }
  773.  
  774.         item->metapkt.i[0] = ring_in.i[0];
  775.         item->metapkt.i[1] = ring_in.i[1];
  776.         item->metapkt.i[2] = ring_in.i[2];
  777.         item->metapkt.i[3] = ring_in.i[3];
  778.         item->metapkt.i[4] = ring_in.i[4];
  779.         item->metapkt.i[5] = ring_in.i[5];
  780.         item->iplen = iplen;
  781.  
  782.         if( qptr->npkts == 0 )  qptr->hd = item;
  783.         else                    qptr->tl->next = item;
  784.         qptr->tl = item;
  785.  
  786.         ++(qptr->npkts);
  787.         if( qptr->npkts > qptr->maxinq )        qptr->maxinq = qptr->npkts;
  788.  
  789.     queue_lock = UNLOCKED;
  790.  
  791.     return qptr->npkts;
  792. }
  793.  
  794. // pop front of list
  795. int
  796. queue_pop( __declspec(shared, sram) struct queue_tag *qptr ) {
  797.     struct item_tag     *item;
  798.  
  799.     while( queue_lock == LOCKED )       ctx_swap();
  800.     queue_lock = LOCKED;
  801.  
  802.         if( qptr->npkts <= 0 ) {
  803.             ++qptr->nerrs;
  804.             return -1;
  805.         }
  806.  
  807.         item = qptr->hd;
  808.         qptr->hd = item->next;
  809.         --(qptr->npkts);
  810.         if( qptr->npkts == 0 )  qptr->tl = 0;
  811.         queue_free( qptr, item );
  812.  
  813.     queue_lock = UNLOCKED;
  814.     return 0;
  815. }
  816.  
  817. // allocate an item
  818. struct item_tag *
  819. queue_alloc( __declspec(shared, sram) struct queue_tag *qptr ) {
  820.     struct item_tag *item;
  821.  
  822.     if( qptr->free_hd == 0 )    return 0;
  823.  
  824.     item = qptr->free_hd;
  825.     qptr->free_hd = item->next;
  826.     return item;
  827. }
  828.  
  829. // free an item
  830. void
  831. queue_free(     __declspec(shared, sram) struct queue_tag *qptr,
  832.                 struct item_tag *item ) {
  833.     if( item == 0 )     return;
  834.     item->next = qptr->free_hd;
  835.     qptr->free_hd = item;
  836. }

Raw Paste


Login or Register to edit or fork this paste. It's free.